//===-- SparcInstrSelection.cpp -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file was developed by the LLVM research group and is distributed under
+// the University of Illinois Open Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
//
// BURS instruction selection for SPARC V9 architecture.
//
//===----------------------------------------------------------------------===//
-#include "SparcInternals.h"
#include "SparcInstrSelectionSupport.h"
+#include "SparcInternals.h"
#include "SparcRegClassInfo.h"
-#include "llvm/CodeGen/InstrSelectionSupport.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineInstrAnnot.h"
+#include "llvm/Constants.h"
+#include "llvm/ConstantHandling.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/InstrForest.h"
#include "llvm/CodeGen/InstrSelection.h"
+#include "llvm/CodeGen/InstrSelectionSupport.h"
+#include "llvm/CodeGen/MachineCodeForInstruction.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineCodeForInstruction.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/iTerminators.h"
-#include "llvm/iMemory.h"
-#include "llvm/iOther.h"
-#include "llvm/Function.h"
-#include "llvm/Constants.h"
-#include "llvm/ConstantHandling.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineInstrAnnot.h"
#include "Support/MathExtras.h"
-#include <math.h>
-using std::vector;
+#include <algorithm>
+#include <cmath>
static inline void Add3OperandInstr(unsigned Opcode, InstructionNode* Node,
- vector<MachineInstr*>& mvec) {
+ std::vector<MachineInstr*>& mvec) {
mvec.push_back(BuildMI(Opcode, 3).addReg(Node->leftChild()->getValue())
.addReg(Node->rightChild()->getValue())
.addRegDef(Node->getValue()));
}
-
-//---------------------------------------------------------------------------
-// Function: GetMemInstArgs
-//
-// Purpose:
-// Get the pointer value and the index vector for a memory operation
-// (GetElementPtr, Load, or Store). If all indices of the given memory
-// operation are constant, fold in constant indices in a chain of
-// preceding GetElementPtr instructions (if any), and return the
-// pointer value of the first instruction in the chain.
-// All folded instructions are marked so no code is generated for them.
-//
-// Return values:
-// Returns the pointer Value to use.
-// Returns the resulting IndexVector in idxVec.
-// Returns true/false in allConstantIndices if all indices are/aren't const.
-//---------------------------------------------------------------------------
-
-
//---------------------------------------------------------------------------
// Function: FoldGetElemChain
//
}
static Value*
-FoldGetElemChain(InstrTreeNode* ptrNode, vector<Value*>& chainIdxVec,
+FoldGetElemChain(InstrTreeNode* ptrNode, std::vector<Value*>& chainIdxVec,
bool lastInstHasLeadingNonZero)
{
InstructionNode* gepNode = dyn_cast<InstructionNode>(ptrNode);
InstructionNode* ptrChild = gepNode;
while (ptrChild && (ptrChild->getOpLabel() == Instruction::GetElementPtr ||
ptrChild->getOpLabel() == GetElemPtrIdx))
- {
- // Child is a GetElemPtr instruction
- gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
- User::op_iterator OI, firstIdx = gepInst->idx_begin();
- User::op_iterator lastIdx = gepInst->idx_end();
- bool allConstantOffsets = true;
-
- // The first index of every GEP must be an array index.
- assert((*firstIdx)->getType() == Type::LongTy &&
- "INTERNAL ERROR: Structure index for a pointer type!");
-
- // If the last instruction had a leading non-zero index, check if the
- // current one references a sequential (i.e., indexable) type.
- // If not, the code is not type-safe and we would create an illegal GEP
- // by folding them, so don't fold any more instructions.
- //
- if (lastInstHasLeadingNonZero)
- if (! isa<SequentialType>(gepInst->getType()->getElementType()))
- break; // cannot fold in any preceding getElementPtr instrs.
-
- // Check that all offsets are constant for this instruction
- for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
- allConstantOffsets = isa<ConstantInt>(*OI);
-
- if (allConstantOffsets)
- { // Get pointer value out of ptrChild.
- ptrVal = gepInst->getPointerOperand();
-
- // Remember if it has leading zero index: it will be discarded later.
- lastInstHasLeadingNonZero = ! IsZero(*firstIdx);
-
- // Insert its index vector at the start, skipping any leading [0]
- chainIdxVec.insert(chainIdxVec.begin(),
- firstIdx + !lastInstHasLeadingNonZero, lastIdx);
-
- // Mark the folded node so no code is generated for it.
- ((InstructionNode*) ptrChild)->markFoldedIntoParent();
-
- // Get the previous GEP instruction and continue trying to fold
- ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
- }
- else // cannot fold this getElementPtr instr. or any preceding ones
- break;
- }
+ {
+ // Child is a GetElemPtr instruction
+ gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
+ User::op_iterator OI, firstIdx = gepInst->idx_begin();
+ User::op_iterator lastIdx = gepInst->idx_end();
+ bool allConstantOffsets = true;
+
+ // The first index of every GEP must be an array index.
+ assert((*firstIdx)->getType() == Type::LongTy &&
+ "INTERNAL ERROR: Structure index for a pointer type!");
+
+ // If the last instruction had a leading non-zero index, check if the
+ // current one references a sequential (i.e., indexable) type.
+ // If not, the code is not type-safe and we would create an illegal GEP
+ // by folding them, so don't fold any more instructions.
+ //
+ if (lastInstHasLeadingNonZero)
+ if (! isa<SequentialType>(gepInst->getType()->getElementType()))
+ break; // cannot fold in any preceding getElementPtr instrs.
+
+ // Check that all offsets are constant for this instruction
+ for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
+ allConstantOffsets = isa<ConstantInt>(*OI);
+
+ if (allConstantOffsets) {
+ // Get pointer value out of ptrChild.
+ ptrVal = gepInst->getPointerOperand();
+
+ // Insert its index vector at the start, skipping any leading [0]
+ // Remember the old size to check if anything was inserted.
+ unsigned oldSize = chainIdxVec.size();
+ int firstIsZero = IsZero(*firstIdx);
+ chainIdxVec.insert(chainIdxVec.begin(), firstIdx + firstIsZero, lastIdx);
+
+ // Remember if it has leading zero index: it will be discarded later.
+ if (oldSize < chainIdxVec.size())
+ lastInstHasLeadingNonZero = !firstIsZero;
+
+ // Mark the folded node so no code is generated for it.
+ ((InstructionNode*) ptrChild)->markFoldedIntoParent();
+
+ // Get the previous GEP instruction and continue trying to fold
+ ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
+ } else // cannot fold this getElementPtr instr. or any preceding ones
+ break;
+ }
// If the first getElementPtr instruction had a leading [0], add it back.
- // Note that this instruction is the *last* one successfully folded above.
+ // Note that this instruction is the *last* one that was successfully
+ // folded *and* contributed any indices, in the loop above.
+ //
if (ptrVal && ! lastInstHasLeadingNonZero)
chainIdxVec.insert(chainIdxVec.begin(), ConstantSInt::get(Type::LongTy,0));
static Value *
GetGEPInstArgs(InstructionNode* gepNode,
- vector<Value*>& idxVec,
+ std::vector<Value*>& idxVec,
bool& allConstantIndices)
{
allConstantIndices = true;
Value* ptrVal = gepI->getPointerOperand();
InstrTreeNode* ptrChild = gepNode->leftChild();
- // Extract the index vector of the GEP instructin.
+ // Extract the index vector of the GEP instruction.
// If all indices are constant and first index is zero, try to fold
// in preceding GEPs with all constant indices.
for (User::op_iterator OI=gepI->idx_begin(), OE=gepI->idx_end();
bool foldedGEPs = false;
bool leadingNonZeroIdx = gepI && ! IsZero(*gepI->idx_begin());
if (allConstantIndices)
- if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx))
- {
- ptrVal = newPtr;
- foldedGEPs = true;
- }
+ if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx)) {
+ ptrVal = newPtr;
+ foldedGEPs = true;
+ }
// Append the index vector of the current instruction.
// Skip the leading [0] index if preceding GEPs were folded into this.
static Value*
GetMemInstArgs(InstructionNode* memInstrNode,
- vector<Value*>& idxVec,
+ std::vector<Value*>& idxVec,
bool& allConstantIndices)
{
allConstantIndices = false;
InstructionNode* gepNode = NULL;
if (isa<GetElementPtrInst>(memInst))
gepNode = memInstrNode;
- else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal))
- { // Child of load/store is a GEP and memInst is its only use.
- // Use its indices and mark it as folded.
- gepNode = cast<InstructionNode>(ptrChild);
- gepNode->markFoldedIntoParent();
- }
+ else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal)) {
+ // Child of load/store is a GEP and memInst is its only use.
+ // Use its indices and mark it as folded.
+ gepNode = cast<InstructionNode>(ptrChild);
+ gepNode->markFoldedIntoParent();
+ }
// If there are no indices, return the current pointer.
// Else extract the pointer from the GEP and fold the indices.
((InstructionNode*) instrNode->leftChild())->getInstruction();
switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::BRZ; break;
- case Instruction::SetNE: opCode = V9::BRNZ; break;
- case Instruction::SetLE: opCode = V9::BRLEZ; break;
- case Instruction::SetGE: opCode = V9::BRGEZ; break;
- case Instruction::SetLT: opCode = V9::BRLZ; break;
- case Instruction::SetGT: opCode = V9::BRGZ; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- opCode = V9::INVALID_OPCODE;
- break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::BRZ; break;
+ case Instruction::SetNE: opCode = V9::BRNZ; break;
+ case Instruction::SetLE: opCode = V9::BRLEZ; break;
+ case Instruction::SetGE: opCode = V9::BRGEZ; break;
+ case Instruction::SetLT: opCode = V9::BRLZ; break;
+ case Instruction::SetGT: opCode = V9::BRGZ; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ opCode = V9::INVALID_OPCODE;
+ break;
+ }
return opCode;
}
bool isSigned = setCCInstr->getOperand(0)->getType()->isSigned();
- if (isSigned)
+ if (isSigned) {
+ switch(setCCInstr->getOpcode())
{
- switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::BE; break;
- case Instruction::SetNE: opCode = V9::BNE; break;
- case Instruction::SetLE: opCode = V9::BLE; break;
- case Instruction::SetGE: opCode = V9::BGE; break;
- case Instruction::SetLT: opCode = V9::BL; break;
- case Instruction::SetGT: opCode = V9::BG; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ case Instruction::SetEQ: opCode = V9::BE; break;
+ case Instruction::SetNE: opCode = V9::BNE; break;
+ case Instruction::SetLE: opCode = V9::BLE; break;
+ case Instruction::SetGE: opCode = V9::BGE; break;
+ case Instruction::SetLT: opCode = V9::BL; break;
+ case Instruction::SetGT: opCode = V9::BG; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
}
- else
+ } else {
+ switch(setCCInstr->getOpcode())
{
- switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::BE; break;
- case Instruction::SetNE: opCode = V9::BNE; break;
- case Instruction::SetLE: opCode = V9::BLEU; break;
- case Instruction::SetGE: opCode = V9::BCC; break;
- case Instruction::SetLT: opCode = V9::BCS; break;
- case Instruction::SetGT: opCode = V9::BGU; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ case Instruction::SetEQ: opCode = V9::BE; break;
+ case Instruction::SetNE: opCode = V9::BNE; break;
+ case Instruction::SetLE: opCode = V9::BLEU; break;
+ case Instruction::SetGE: opCode = V9::BCC; break;
+ case Instruction::SetLT: opCode = V9::BCS; break;
+ case Instruction::SetGT: opCode = V9::BGU; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
}
+ }
return opCode;
}
MachineOpCode opCode = V9::INVALID_OPCODE;
switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::FBE; break;
- case Instruction::SetNE: opCode = V9::FBNE; break;
- case Instruction::SetLE: opCode = V9::FBLE; break;
- case Instruction::SetGE: opCode = V9::FBGE; break;
- case Instruction::SetLT: opCode = V9::FBL; break;
- case Instruction::SetGT: opCode = V9::FBG; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::FBE; break;
+ case Instruction::SetNE: opCode = V9::FBNE; break;
+ case Instruction::SetLE: opCode = V9::FBLE; break;
+ case Instruction::SetGE: opCode = V9::FBGE; break;
+ case Instruction::SetLT: opCode = V9::FBL; break;
+ case Instruction::SetGT: opCode = V9::FBG; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
+ }
return opCode;
}
// TmpInstructions will be freed along with the rest of the Function anyway.
//
static TmpInstruction*
-GetTmpForCC(Value* boolVal, const Function *F, const Type* ccType)
+GetTmpForCC(Value* boolVal, const Function *F, const Type* ccType,
+ MachineCodeForInstruction& mcfi)
{
typedef hash_map<const Value*, TmpInstruction*> BoolTmpCache;
static BoolTmpCache boolToTmpCache; // Map boolVal -> TmpInstruction*
assert(boolVal->getType() == Type::BoolTy && "Weird but ok! Delete assert");
- if (lastFunction != F)
- {
- lastFunction = F;
- boolToTmpCache.clear();
- }
+ if (lastFunction != F) {
+ lastFunction = F;
+ boolToTmpCache.clear();
+ }
// Look for tmpI and create a new one otherwise. The new value is
// directly written to map using the ref returned by operator[].
TmpInstruction*& tmpI = boolToTmpCache[boolVal];
if (tmpI == NULL)
- tmpI = new TmpInstruction(ccType, boolVal);
+ tmpI = new TmpInstruction(mcfi, ccType, boolVal);
return tmpI;
}
static inline MachineOpCode
ChooseBccInstruction(const InstructionNode* instrNode,
- bool& isFPBranch)
+ const Type*& setCCType)
{
InstructionNode* setCCNode = (InstructionNode*) instrNode->leftChild();
assert(setCCNode->getOpLabel() == SetCCOp);
BinaryOperator* setCCInstr =cast<BinaryOperator>(setCCNode->getInstruction());
- const Type* setCCType = setCCInstr->getOperand(0)->getType();
-
- isFPBranch = setCCType->isFloatingPoint(); // Return value: don't delete!
+ setCCType = setCCInstr->getOperand(0)->getType();
- if (isFPBranch)
+ if (setCCType->isFloatingPoint())
return ChooseBFpccInstruction(instrNode, setCCInstr);
else
return ChooseBpccInstruction(instrNode, setCCInstr);
}
+// WARNING: since this function has only one caller, it always returns
+// the opcode that expects an immediate and a register. If this function
+// is ever used in cases where an opcode that takes two registers is required,
+// then modify this function and use convertOpcodeFromRegToImm() where required.
+//
+// It will be necessary to expand convertOpcodeFromRegToImm() to handle the
+// new cases of opcodes.
static inline MachineOpCode
-ChooseMovFpccInstruction(const InstructionNode* instrNode)
+ChooseMovFpcciInstruction(const InstructionNode* instrNode)
{
MachineOpCode opCode = V9::INVALID_OPCODE;
switch(instrNode->getInstruction()->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::MOVFE; break;
- case Instruction::SetNE: opCode = V9::MOVFNE; break;
- case Instruction::SetLE: opCode = V9::MOVFLE; break;
- case Instruction::SetGE: opCode = V9::MOVFGE; break;
- case Instruction::SetLT: opCode = V9::MOVFL; break;
- case Instruction::SetGT: opCode = V9::MOVFG; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::MOVFEi; break;
+ case Instruction::SetNE: opCode = V9::MOVFNEi; break;
+ case Instruction::SetLE: opCode = V9::MOVFLEi; break;
+ case Instruction::SetGE: opCode = V9::MOVFGEi; break;
+ case Instruction::SetLT: opCode = V9::MOVFLi; break;
+ case Instruction::SetGT: opCode = V9::MOVFGi; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
+ }
return opCode;
}
-// Assumes that SUBcc v1, v2 -> v3 has been executed.
-// In most cases, we want to clear v3 and then follow it by instruction
-// MOVcc 1 -> v3.
-// Set mustClearReg=false if v3 need not be cleared before conditional move.
-// Set valueToMove=0 if we want to conditionally move 0 instead of 1
-// (i.e., we want to test inverse of a condition)
-// (The latter two cases do not seem to arise because SetNE needs nothing.)
+// ChooseMovpcciForSetCC -- Choose a conditional-move instruction
+// based on the type of SetCC operation.
+//
+// WARNING: since this function has only one caller, it always returns
+// the opcode that expects an immediate and a register. If this function
+// is ever used in cases where an opcode that takes two registers is required,
+// then modify this function and use convertOpcodeFromRegToImm() where required.
+//
+// It will be necessary to expand convertOpcodeFromRegToImm() to handle the
+// new cases of opcodes.
//
static MachineOpCode
-ChooseMovpccAfterSub(const InstructionNode* instrNode,
- bool& mustClearReg,
- int& valueToMove)
+ChooseMovpcciForSetCC(const InstructionNode* instrNode)
{
MachineOpCode opCode = V9::INVALID_OPCODE;
- mustClearReg = true;
- valueToMove = 1;
+
+ const Type* opType = instrNode->leftChild()->getValue()->getType();
+ assert(opType->isIntegral() || isa<PointerType>(opType));
+ bool noSign = opType->isUnsigned() || isa<PointerType>(opType);
switch(instrNode->getInstruction()->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::MOVE; break;
- case Instruction::SetLE: opCode = V9::MOVLE; break;
- case Instruction::SetGE: opCode = V9::MOVGE; break;
- case Instruction::SetLT: opCode = V9::MOVL; break;
- case Instruction::SetGT: opCode = V9::MOVG; break;
- case Instruction::SetNE: assert(0 && "No move required!"); break;
- default: assert(0 && "Unrecognized VM instr!"); break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::MOVEi; break;
+ case Instruction::SetLE: opCode = noSign? V9::MOVLEUi : V9::MOVLEi; break;
+ case Instruction::SetGE: opCode = noSign? V9::MOVCCi : V9::MOVGEi; break;
+ case Instruction::SetLT: opCode = noSign? V9::MOVCSi : V9::MOVLi; break;
+ case Instruction::SetGT: opCode = noSign? V9::MOVGUi : V9::MOVGi; break;
+ case Instruction::SetNE: opCode = V9::MOVNEi; break;
+ default: assert(0 && "Unrecognized LLVM instr!"); break;
+ }
return opCode;
}
-static inline MachineOpCode
-ChooseConvertToFloatInstr(OpLabel vopCode, const Type* opType)
+
+// ChooseMovpregiForSetCC -- Choose a conditional-move-on-register-value
+// instruction based on the type of SetCC operation. These instructions
+// compare a register with 0 and perform the move is the comparison is true.
+//
+// WARNING: like the previous function, this function it always returns
+// the opcode that expects an immediate and a register. See above.
+//
+static MachineOpCode
+ChooseMovpregiForSetCC(const InstructionNode* instrNode)
{
MachineOpCode opCode = V9::INVALID_OPCODE;
- switch(vopCode)
- {
- case ToFloatTy:
- if (opType == Type::SByteTy || opType == Type::ShortTy || opType == Type::IntTy)
- opCode = V9::FITOS;
- else if (opType == Type::LongTy)
- opCode = V9::FXTOS;
- else if (opType == Type::DoubleTy)
- opCode = V9::FDTOS;
- else if (opType == Type::FloatTy)
- ;
- else
- assert(0 && "Cannot convert this type to FLOAT on SPARC");
- break;
-
- case ToDoubleTy:
- // This is usually used in conjunction with CreateCodeToCopyIntToFloat().
- // Both functions should treat the integer as a 32-bit value for types
- // of 4 bytes or less, and as a 64-bit value otherwise.
- if (opType == Type::SByteTy || opType == Type::UByteTy ||
- opType == Type::ShortTy || opType == Type::UShortTy ||
- opType == Type::IntTy || opType == Type::UIntTy)
- opCode = V9::FITOD;
- else if (opType == Type::LongTy || opType == Type::ULongTy)
- opCode = V9::FXTOD;
- else if (opType == Type::FloatTy)
- opCode = V9::FSTOD;
- else if (opType == Type::DoubleTy)
- ;
- else
- assert(0 && "Cannot convert this type to DOUBLE on SPARC");
- break;
-
- default:
- break;
- }
+ switch(instrNode->getInstruction()->getOpcode())
+ {
+ case Instruction::SetEQ: opCode = V9::MOVRZi; break;
+ case Instruction::SetLE: opCode = V9::MOVRLEZi; break;
+ case Instruction::SetGE: opCode = V9::MOVRGEZi; break;
+ case Instruction::SetLT: opCode = V9::MOVRLZi; break;
+ case Instruction::SetGT: opCode = V9::MOVRGZi; break;
+ case Instruction::SetNE: opCode = V9::MOVRNZi; break;
+ default: assert(0 && "Unrecognized VM instr!"); break;
+ }
return opCode;
}
-static inline MachineOpCode
-ChooseConvertFPToIntInstr(Type::PrimitiveID tid, const Type* opType)
+
+static inline MachineOpCode
+ChooseConvertToFloatInstr(const TargetMachine& target,
+ OpLabel vopCode, const Type* opType)
{
- MachineOpCode opCode = V9::INVALID_OPCODE;;
+ assert((vopCode == ToFloatTy || vopCode == ToDoubleTy) &&
+ "Unrecognized convert-to-float opcode!");
+ assert((opType->isIntegral() || opType->isFloatingPoint() ||
+ isa<PointerType>(opType))
+ && "Trying to convert a non-scalar type to FLOAT/DOUBLE?");
+
+ MachineOpCode opCode = V9::INVALID_OPCODE;
+
+ unsigned opSize = target.getTargetData().getTypeSize(opType);
+
+ if (opType == Type::FloatTy)
+ opCode = (vopCode == ToFloatTy? V9::NOP : V9::FSTOD);
+ else if (opType == Type::DoubleTy)
+ opCode = (vopCode == ToFloatTy? V9::FDTOS : V9::NOP);
+ else if (opSize <= 4)
+ opCode = (vopCode == ToFloatTy? V9::FITOS : V9::FITOD);
+ else {
+ assert(opSize == 8 && "Unrecognized type size > 4 and < 8!");
+ opCode = (vopCode == ToFloatTy? V9::FXTOS : V9::FXTOD);
+ }
+
+ return opCode;
+}
+static inline MachineOpCode
+ChooseConvertFPToIntInstr(const TargetMachine& target,
+ const Type* destType, const Type* opType)
+{
assert((opType == Type::FloatTy || opType == Type::DoubleTy)
&& "This function should only be called for FLOAT or DOUBLE");
+ assert((destType->isIntegral() || isa<PointerType>(destType))
+ && "Trying to convert FLOAT/DOUBLE to a non-scalar type?");
- if (tid==Type::UIntTyID)
- {
- assert(tid != Type::UIntTyID && "FP-to-uint conversions must be expanded"
- " into FP->long->uint for SPARC v9: SO RUN PRESELECTION PASS!");
- }
- else if (tid==Type::SByteTyID || tid==Type::ShortTyID || tid==Type::IntTyID ||
- tid==Type::UByteTyID || tid==Type::UShortTyID)
- {
- opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
- }
- else if (tid==Type::LongTyID || tid==Type::ULongTyID)
- {
- opCode = (opType == Type::FloatTy)? V9::FSTOX : V9::FDTOX;
- }
- else
- assert(0 && "Should not get here, Mo!");
+ MachineOpCode opCode = V9::INVALID_OPCODE;
+
+ unsigned destSize = target.getTargetData().getTypeSize(destType);
+
+ if (destType == Type::UIntTy)
+ assert(destType != Type::UIntTy && "Expand FP-to-uint beforehand.");
+ else if (destSize <= 4)
+ opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
+ else {
+ assert(destSize == 8 && "Unrecognized type size > 4 and < 8!");
+ opCode = (opType == Type::FloatTy)? V9::FSTOX : V9::FDTOX;
+ }
return opCode;
}
-MachineInstr*
-CreateConvertFPToIntInstr(Type::PrimitiveID destTID,
- Value* srcVal, Value* destVal)
+static MachineInstr*
+CreateConvertFPToIntInstr(const TargetMachine& target,
+ Value* srcVal,
+ Value* destVal,
+ const Type* destType)
{
- MachineOpCode opCode = ChooseConvertFPToIntInstr(destTID, srcVal->getType());
+ MachineOpCode opCode = ChooseConvertFPToIntInstr(target, destType,
+ srcVal->getType());
assert(opCode != V9::INVALID_OPCODE && "Expected to need conversion!");
return BuildMI(opCode, 2).addReg(srcVal).addRegDef(destVal);
}
// CreateCodeToConvertFloatToInt: Convert FP value to signed or unsigned integer
// The FP value must be converted to the dest type in an FP register,
// and the result is then copied from FP to int register via memory.
-//
+// SPARC does not have a float-to-uint conversion, only a float-to-int (fdtoi).
// Since fdtoi converts to signed integers, any FP value V between MAXINT+1
-// and MAXUNSIGNED (i.e., 2^31 <= V <= 2^32-1) would be converted incorrectly
-// *only* when converting to an unsigned. (Unsigned byte, short or long
-// don't have this problem.)
-// For unsigned int, we therefore have to generate the code sequence:
-//
-// if (V > (float) MAXINT) {
-// unsigned result = (unsigned) (V - (float) MAXINT);
-// result = result + (unsigned) MAXINT;
-// }
-// else
-// result = (unsigned) V;
+// and MAXUNSIGNED (i.e., 2^31 <= V <= 2^32-1) would be converted incorrectly.
+// Therefore, for converting an FP value to uint32_t, we first need to convert
+// to uint64_t and then to uint32_t.
//
static void
CreateCodeToConvertFloatToInt(const TargetMachine& target,
std::vector<MachineInstr*>& mvec,
MachineCodeForInstruction& mcfi)
{
+ Function* F = destI->getParent()->getParent();
+
// Create a temporary to represent the FP register into which the
// int value will placed after conversion. The type of this temporary
// depends on the type of FP register to use: single-prec for a 32-bit
// int or smaller; double-prec for a 64-bit int.
//
size_t destSize = target.getTargetData().getTypeSize(destI->getType());
- const Type* destTypeToUse = (destSize > 4)? Type::DoubleTy : Type::FloatTy;
- TmpInstruction* destForCast = new TmpInstruction(destTypeToUse, opVal);
- mcfi.addTemp(destForCast);
- // Create the fp-to-int conversion code
- MachineInstr* M =CreateConvertFPToIntInstr(destI->getType()->getPrimitiveID(),
- opVal, destForCast);
- mvec.push_back(M);
+ const Type* castDestType = destI->getType(); // type for the cast instr result
+ const Type* castDestRegType; // type for cast instruction result reg
+ TmpInstruction* destForCast; // dest for cast instruction
+ Instruction* fpToIntCopyDest = destI; // dest for fp-reg-to-int-reg copy instr
+
+ // For converting an FP value to uint32_t, we first need to convert to
+ // uint64_t and then to uint32_t, as explained above.
+ if (destI->getType() == Type::UIntTy) {
+ castDestType = Type::ULongTy; // use this instead of type of destI
+ castDestRegType = Type::DoubleTy; // uint64_t needs 64-bit FP register.
+ destForCast = new TmpInstruction(mcfi, castDestRegType, opVal);
+ fpToIntCopyDest = new TmpInstruction(mcfi, castDestType, destForCast);
+ }
+ else {
+ castDestRegType = (destSize > 4)? Type::DoubleTy : Type::FloatTy;
+ destForCast = new TmpInstruction(mcfi, castDestRegType, opVal);
+ }
+
+ // Create the fp-to-int conversion instruction (src and dest regs are FP regs)
+ mvec.push_back(CreateConvertFPToIntInstr(target, opVal, destForCast,
+ castDestType));
// Create the fpreg-to-intreg copy code
- target.getInstrInfo().
- CreateCodeToCopyFloatToInt(target, destI->getParent()->getParent(),
- destForCast, destI, mvec, mcfi);
+ target.getInstrInfo().CreateCodeToCopyFloatToInt(target, F, destForCast,
+ fpToIntCopyDest, mvec, mcfi);
+
+ // Create the uint64_t to uint32_t conversion, if needed
+ if (destI->getType() == Type::UIntTy)
+ target.getInstrInfo().
+ CreateZeroExtensionInstructions(target, F, fpToIntCopyDest, destI,
+ /*numLowBits*/ 32, mvec, mcfi);
}
// instead of an FADD (1 vs 3 cycles). There is no integer MOV.
//
if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
- double dval = FPC->getValue();
- if (dval == 0.0)
- minstr = CreateMovFloatInstruction(instrNode,
- instrNode->getInstruction()->getType());
- }
+ double dval = FPC->getValue();
+ if (dval == 0.0)
+ minstr = CreateMovFloatInstruction(instrNode,
+ instrNode->getInstruction()->getType());
+ }
return minstr;
}
{
MachineOpCode opCode = V9::INVALID_OPCODE;
- if (resultType->isInteger() || isa<PointerType>(resultType))
+ if (resultType->isInteger() || isa<PointerType>(resultType)) {
+ opCode = V9::SUBr;
+ } else {
+ switch(resultType->getPrimitiveID())
{
- opCode = V9::SUB;
+ case Type::FloatTyID: opCode = V9::FSUBS; break;
+ case Type::DoubleTyID: opCode = V9::FSUBD; break;
+ default: assert(0 && "Invalid type for SUB instruction"); break;
}
- else
- switch(resultType->getPrimitiveID())
- {
- case Type::FloatTyID: opCode = V9::FSUBS; break;
- case Type::DoubleTyID: opCode = V9::FSUBD; break;
- default: assert(0 && "Invalid type for SUB instruction"); break;
- }
-
+ }
+
return opCode;
}
MachineOpCode opCode = V9::INVALID_OPCODE;
if (resultType->isInteger())
- opCode = V9::MULX;
+ opCode = V9::MULXr;
else
switch(resultType->getPrimitiveID())
- {
- case Type::FloatTyID: opCode = V9::FMULS; break;
- case Type::DoubleTyID: opCode = V9::FMULD; break;
- default: assert(0 && "Invalid type for MUL instruction"); break;
- }
+ {
+ case Type::FloatTyID: opCode = V9::FMULS; break;
+ case Type::DoubleTyID: opCode = V9::FMULD; break;
+ default: assert(0 && "Invalid type for MUL instruction"); break;
+ }
return opCode;
}
CreateIntNegInstruction(const TargetMachine& target,
Value* vreg)
{
- return BuildMI(V9::SUB, 3).addMReg(target.getRegInfo().getZeroRegNum())
+ return BuildMI(V9::SUBr, 3).addMReg(target.getRegInfo().getZeroRegNum())
.addReg(vreg).addRegDef(vreg);
}
Value* optArgVal2, /* Use optArgVal2 if not NULL */
unsigned optShiftNum, /* else use optShiftNum */
Instruction* destVal,
- vector<MachineInstr*>& mvec,
+ std::vector<MachineInstr*>& mvec,
MachineCodeForInstruction& mcfi)
{
assert((optArgVal2 != NULL || optShiftNum <= 64) &&
//
Value* shiftDest = destVal;
unsigned opSize = target.getTargetData().getTypeSize(argVal1->getType());
- if ((shiftOpCode == V9::SLL || shiftOpCode == V9::SLLX) && opSize < 8)
- { // put SLL result into a temporary
- shiftDest = new TmpInstruction(argVal1, optArgVal2, "sllTmp");
- mcfi.addTemp(shiftDest);
- }
+
+ if ((shiftOpCode == V9::SLLr5 || shiftOpCode == V9::SLLXr6) && opSize < 8) {
+ // put SLL result into a temporary
+ shiftDest = new TmpInstruction(mcfi, argVal1, optArgVal2, "sllTmp");
+ }
MachineInstr* M = (optArgVal2 != NULL)
? BuildMI(shiftOpCode, 3).addReg(argVal1).addReg(optArgVal2)
.addReg(shiftDest, MOTy::Def);
mvec.push_back(M);
- if (shiftDest != destVal)
- { // extend the sign-bit of the result into all upper bits of dest
- assert(8*opSize <= 32 && "Unexpected type size > 4 and < IntRegSize?");
- target.getInstrInfo().
- CreateSignExtensionInstructions(target, F, shiftDest, destVal,
- 8*opSize, mvec, mcfi);
- }
+ if (shiftDest != destVal) {
+ // extend the sign-bit of the result into all upper bits of dest
+ assert(8*opSize <= 32 && "Unexpected type size > 4 and < IntRegSize?");
+ target.getInstrInfo().
+ CreateSignExtensionInstructions(target, F, shiftDest, destVal,
+ 8*opSize, mvec, mcfi);
+ }
}
// create a cheaper instruction.
// This returns the approximate cost of the instructions generated,
// which is used to pick the cheapest when both operands are constant.
-static inline unsigned
+static unsigned
CreateMulConstInstruction(const TargetMachine &target, Function* F,
Value* lval, Value* rval, Instruction* destVal,
- vector<MachineInstr*>& mvec,
+ std::vector<MachineInstr*>& mvec,
MachineCodeForInstruction& mcfi)
{
/* Use max. multiply cost, viz., cost of MULX */
- unsigned cost = target.getInstrInfo().minLatency(V9::MULX);
+ unsigned cost = target.getInstrInfo().minLatency(V9::MULXr);
unsigned firstNewInstr = mvec.size();
Value* constOp = rval;
if (resultType->isInteger() || isa<PointerType>(resultType)) {
bool isValidConst;
- int64_t C = GetConstantValueAsSignedInt(constOp, isValidConst);
+ int64_t C = (int64_t) target.getInstrInfo().ConvertConstantToIntType(target,
+ constOp, constOp->getType(), isValidConst);
if (isValidConst) {
unsigned pow;
bool needNeg = false;
}
if (C == 0 || C == 1) {
- cost = target.getInstrInfo().minLatency(V9::ADD);
+ cost = target.getInstrInfo().minLatency(V9::ADDr);
unsigned Zero = target.getRegInfo().getZeroRegNum();
MachineInstr* M;
if (C == 0)
- M = BuildMI(V9::ADD,3).addMReg(Zero).addMReg(Zero).addRegDef(destVal);
+ M =BuildMI(V9::ADDr,3).addMReg(Zero).addMReg(Zero).addRegDef(destVal);
else
- M = BuildMI(V9::ADD,3).addReg(lval).addMReg(Zero).addRegDef(destVal);
+ M = BuildMI(V9::ADDr,3).addReg(lval).addMReg(Zero).addRegDef(destVal);
mvec.push_back(M);
- }
- else if (isPowerOf2(C, pow)) {
+ } else if (isPowerOf2(C, pow)) {
unsigned opSize = target.getTargetData().getTypeSize(resultType);
- MachineOpCode opCode = (opSize <= 32)? V9::SLL : V9::SLLX;
+ MachineOpCode opCode = (opSize <= 32)? V9::SLLr5 : V9::SLLXr6;
CreateShiftInstructions(target, F, opCode, lval, NULL, pow,
destVal, mvec, mcfi);
}
- if (mvec.size() > 0 && needNeg)
- { // insert <reg = SUB 0, reg> after the instr to flip the sign
+ if (mvec.size() > 0 && needNeg) {
+ // insert <reg = SUB 0, reg> after the instr to flip the sign
MachineInstr* M = CreateIntNegInstruction(target, destVal);
mvec.push_back(M);
}
Function* F,
Value* lval, Value* rval,
Instruction* destVal,
- vector<MachineInstr*>& mvec,
+ std::vector<MachineInstr*>& mvec,
MachineCodeForInstruction& mcfi)
{
Value* constOp;
- if (isa<Constant>(lval) && isa<Constant>(rval))
- { // both operands are constant: evaluate and "set" in dest
- Constant* P = ConstantFoldBinaryInstruction(Instruction::Mul,
- cast<Constant>(lval), cast<Constant>(rval));
- target.getInstrInfo().CreateCodeToLoadConst(target,F,P,destVal,mvec,mcfi);
- }
+ if (isa<Constant>(lval) && isa<Constant>(rval)) {
+ // both operands are constant: evaluate and "set" in dest
+ Constant* P = ConstantFoldBinaryInstruction(Instruction::Mul,
+ cast<Constant>(lval),
+ cast<Constant>(rval));
+ target.getInstrInfo().CreateCodeToLoadConst(target,F,P,destVal,mvec,mcfi);
+ }
else if (isa<Constant>(rval)) // rval is constant, but not lval
CreateMulConstInstruction(target, F, lval, rval, destVal, mvec, mcfi);
else if (isa<Constant>(lval)) // lval is constant, but not rval
static inline void
CreateMulInstruction(const TargetMachine &target, Function* F,
Value* lval, Value* rval, Instruction* destVal,
- vector<MachineInstr*>& mvec,
+ std::vector<MachineInstr*>& mvec,
MachineCodeForInstruction& mcfi,
MachineOpCode forceMulOp = INVALID_MACHINE_OPCODE)
{
const Type* resultType = instrNode->getInstruction()->getType();
if (resultType->isInteger())
- opCode = resultType->isSigned()? V9::SDIVX : V9::UDIVX;
+ opCode = resultType->isSigned()? V9::SDIVXr : V9::UDIVXr;
else
switch(resultType->getPrimitiveID())
{
// Return if we cannot exploit constant to create a cheaper instruction
-static inline void
+static void
CreateDivConstInstruction(TargetMachine &target,
const InstructionNode* instrNode,
- vector<MachineInstr*>& mvec)
+ std::vector<MachineInstr*>& mvec)
{
Value* LHS = instrNode->leftChild()->getValue();
Value* constOp = ((InstrTreeNode*) instrNode->rightChild())->getValue();
if (!isa<Constant>(constOp))
return;
- Value* DestVal = instrNode->getValue();
+ Instruction* destVal = instrNode->getInstruction();
unsigned ZeroReg = target.getRegInfo().getZeroRegNum();
// Cases worth optimizing are:
//
const Type* resultType = instrNode->getInstruction()->getType();
- if (resultType->isInteger())
- {
+ if (resultType->isInteger()) {
unsigned pow;
bool isValidConst;
- int64_t C = GetConstantValueAsSignedInt(constOp, isValidConst);
+ int64_t C = (int64_t) target.getInstrInfo().ConvertConstantToIntType(target,
+ constOp, constOp->getType(), isValidConst);
if (isValidConst) {
bool needNeg = false;
if (C < 0) {
needNeg = true;
C = -C;
}
-
+
if (C == 1) {
- mvec.push_back(BuildMI(V9::ADD, 3).addReg(LHS).addMReg(ZeroReg)
- .addRegDef(DestVal));
+ mvec.push_back(BuildMI(V9::ADDr, 3).addReg(LHS).addMReg(ZeroReg)
+ .addRegDef(destVal));
} else if (isPowerOf2(C, pow)) {
- unsigned opCode= ((resultType->isSigned())
- ? (resultType==Type::LongTy) ? V9::SRAX : V9::SRA
- : (resultType==Type::LongTy) ? V9::SRLX : V9::SRL);
- mvec.push_back(BuildMI(opCode, 3).addReg(LHS).addZImm(pow)
- .addRegDef(DestVal));
+ unsigned opCode;
+ Value* shiftOperand;
+ unsigned opSize = target.getTargetData().getTypeSize(resultType);
+
+ if (resultType->isSigned()) {
+ // For N / 2^k, if the operand N is negative,
+ // we need to add (2^k - 1) before right-shifting by k, i.e.,
+ //
+ // (N / 2^k) = N >> k, if N >= 0;
+ // (N + 2^k - 1) >> k, if N < 0
+ //
+ // If N is <= 32 bits, use:
+ // sra N, 31, t1 // t1 = ~0, if N < 0, 0 else
+ // srl t1, 32-k, t2 // t2 = 2^k - 1, if N < 0, 0 else
+ // add t2, N, t3 // t3 = N + 2^k -1, if N < 0, N else
+ // sra t3, k, result // result = N / 2^k
+ //
+ // If N is 64 bits, use:
+ // srax N, k-1, t1 // t1 = sign bit in high k positions
+ // srlx t1, 64-k, t2 // t2 = 2^k - 1, if N < 0, 0 else
+ // add t2, N, t3 // t3 = N + 2^k -1, if N < 0, N else
+ // sra t3, k, result // result = N / 2^k
+ //
+ TmpInstruction *sraTmp, *srlTmp, *addTmp;
+ MachineCodeForInstruction& mcfi
+ = MachineCodeForInstruction::get(destVal);
+ sraTmp = new TmpInstruction(mcfi, resultType, LHS, 0, "getSign");
+ srlTmp = new TmpInstruction(mcfi, resultType, LHS, 0, "getPlus2km1");
+ addTmp = new TmpInstruction(mcfi, resultType, LHS, srlTmp,"incIfNeg");
+
+ // Create the SRA or SRAX instruction to get the sign bit
+ mvec.push_back(BuildMI((opSize > 4)? V9::SRAXi6 : V9::SRAi5, 3)
+ .addReg(LHS)
+ .addSImm((resultType==Type::LongTy)? pow-1 : 31)
+ .addRegDef(sraTmp));
+
+ // Create the SRL or SRLX instruction to get the sign bit
+ mvec.push_back(BuildMI((opSize > 4)? V9::SRLXi6 : V9::SRLi5, 3)
+ .addReg(sraTmp)
+ .addSImm((resultType==Type::LongTy)? 64-pow : 32-pow)
+ .addRegDef(srlTmp));
+
+ // Create the ADD instruction to add 2^pow-1 for negative values
+ mvec.push_back(BuildMI(V9::ADDr, 3).addReg(LHS).addReg(srlTmp)
+ .addRegDef(addTmp));
+
+ // Get the shift operand and "right-shift" opcode to do the divide
+ shiftOperand = addTmp;
+ opCode = (opSize > 4)? V9::SRAXi6 : V9::SRAi5;
+ } else {
+ // Get the shift operand and "right-shift" opcode to do the divide
+ shiftOperand = LHS;
+ opCode = (opSize > 4)? V9::SRLXi6 : V9::SRLi5;
+ }
+
+ // Now do the actual shift!
+ mvec.push_back(BuildMI(opCode, 3).addReg(shiftOperand).addZImm(pow)
+ .addRegDef(destVal));
}
if (needNeg && (C == 1 || isPowerOf2(C, pow))) {
// insert <reg = SUB 0, reg> after the instr to flip the sign
- mvec.push_back(CreateIntNegInstruction(target, DestVal));
+ mvec.push_back(CreateIntNegInstruction(target, destVal));
}
}
} else {
(dval < 0) ? (resultType == Type::FloatTy? V9::FNEGS : V9::FNEGD)
: (resultType == Type::FloatTy? V9::FMOVS : V9::FMOVD);
- mvec.push_back(BuildMI(opCode, 2).addReg(LHS).addRegDef(DestVal));
+ mvec.push_back(BuildMI(opCode, 2).addReg(LHS).addRegDef(destVal));
}
}
}
Instruction* result,
unsigned tsize,
Value* numElementsVal,
- vector<MachineInstr*>& getMvec)
+ std::vector<MachineInstr*>& getMvec)
{
Value* totalSizeVal;
MachineInstr* M;
// Enforce the alignment constraints on the stack pointer at
// compile time if the total size is a known constant.
- if (isa<Constant>(numElementsVal))
- {
- bool isValid;
- int64_t numElem = GetConstantValueAsSignedInt(numElementsVal, isValid);
- assert(isValid && "Unexpectedly large array dimension in alloca!");
- int64_t total = numElem * tsize;
- if (int extra= total % target.getFrameInfo().getStackFrameSizeAlignment())
- total += target.getFrameInfo().getStackFrameSizeAlignment() - extra;
- totalSizeVal = ConstantSInt::get(Type::IntTy, total);
- }
- else
- {
- // The size is not a constant. Generate code to compute it and
- // code to pad the size for stack alignment.
- // Create a Value to hold the (constant) element size
- Value* tsizeVal = ConstantSInt::get(Type::IntTy, tsize);
-
- // Create temporary values to hold the result of MUL, SLL, SRL
- // THIS CASE IS INCOMPLETE AND WILL BE FIXED SHORTLY.
- TmpInstruction* tmpProd = new TmpInstruction(numElementsVal, tsizeVal);
- TmpInstruction* tmpSLL = new TmpInstruction(numElementsVal, tmpProd);
- TmpInstruction* tmpSRL = new TmpInstruction(numElementsVal, tmpSLL);
- mcfi.addTemp(tmpProd);
- mcfi.addTemp(tmpSLL);
- mcfi.addTemp(tmpSRL);
-
- // Instruction 1: mul numElements, typeSize -> tmpProd
- // This will optimize the MUL as far as possible.
- CreateMulInstruction(target, F, numElementsVal, tsizeVal, tmpProd,getMvec,
- mcfi, INVALID_MACHINE_OPCODE);
-
- assert(0 && "Need to insert padding instructions here!");
-
- totalSizeVal = tmpProd;
- }
+ if (isa<Constant>(numElementsVal)) {
+ bool isValid;
+ int64_t numElem = (int64_t) target.getInstrInfo().
+ ConvertConstantToIntType(target, numElementsVal,
+ numElementsVal->getType(), isValid);
+ assert(isValid && "Unexpectedly large array dimension in alloca!");
+ int64_t total = numElem * tsize;
+ if (int extra= total % target.getFrameInfo().getStackFrameSizeAlignment())
+ total += target.getFrameInfo().getStackFrameSizeAlignment() - extra;
+ totalSizeVal = ConstantSInt::get(Type::IntTy, total);
+ } else {
+ // The size is not a constant. Generate code to compute it and
+ // code to pad the size for stack alignment.
+ // Create a Value to hold the (constant) element size
+ Value* tsizeVal = ConstantSInt::get(Type::IntTy, tsize);
+
+ // Create temporary values to hold the result of MUL, SLL, SRL
+ // To pad `size' to next smallest multiple of 16:
+ // size = (size + 15) & (-16 = 0xfffffffffffffff0)
+ //
+ TmpInstruction* tmpProd = new TmpInstruction(mcfi,numElementsVal, tsizeVal);
+ TmpInstruction* tmpAdd15= new TmpInstruction(mcfi,numElementsVal, tmpProd);
+ TmpInstruction* tmpAndf0= new TmpInstruction(mcfi,numElementsVal, tmpAdd15);
+
+ // Instruction 1: mul numElements, typeSize -> tmpProd
+ // This will optimize the MUL as far as possible.
+ CreateMulInstruction(target, F, numElementsVal, tsizeVal, tmpProd, getMvec,
+ mcfi, INVALID_MACHINE_OPCODE);
+
+ // Instruction 2: andn tmpProd, 0x0f -> tmpAndn
+ getMvec.push_back(BuildMI(V9::ADDi, 3).addReg(tmpProd).addSImm(15)
+ .addReg(tmpAdd15, MOTy::Def));
+
+ // Instruction 3: add tmpAndn, 0x10 -> tmpAdd16
+ getMvec.push_back(BuildMI(V9::ANDi, 3).addReg(tmpAdd15).addSImm(-16)
+ .addReg(tmpAndf0, MOTy::Def));
+
+ totalSizeVal = tmpAndf0;
+ }
// Get the constant offset from SP for dynamically allocated storage
// and create a temporary Value to hold it.
unsigned SPReg = target.getRegInfo().getStackPointer();
// Instruction 2: sub %sp, totalSizeVal -> %sp
- getMvec.push_back(BuildMI(V9::SUB, 3).addMReg(SPReg).addReg(totalSizeVal)
+ getMvec.push_back(BuildMI(V9::SUBr, 3).addMReg(SPReg).addReg(totalSizeVal)
.addMReg(SPReg,MOTy::Def));
// Instruction 3: add %sp, frameSizeBelowDynamicArea -> result
- getMvec.push_back(BuildMI(V9::ADD, 3).addMReg(SPReg).addReg(dynamicAreaOffset)
+ getMvec.push_back(BuildMI(V9::ADDr,3).addMReg(SPReg).addReg(dynamicAreaOffset)
.addRegDef(result));
}
Instruction* result,
unsigned tsize,
unsigned numElements,
- vector<MachineInstr*>& getMvec)
+ std::vector<MachineInstr*>& getMvec)
{
assert(tsize > 0 && "Illegal (zero) type size for alloca");
assert(result && result->getParent() &&
Function *F = result->getParent()->getParent();
MachineFunction &mcInfo = MachineFunction::get(F);
- // Check if the offset would small enough to use as an immediate in
- // load/stores (check LDX because all load/stores have the same-size immediate
- // field). If not, put the variable in the dynamically sized area of the
- // frame.
- unsigned paddedSizeIgnored;
+ // Put the variable in the dynamically sized area of the frame if either:
+ // (a) The offset is too large to use as an immediate in load/stores
+ // (check LDX because all load/stores have the same-size immed. field).
+ // (b) The object is "large", so it could cause many other locals,
+ // spills, and temporaries to have large offsets.
+ // NOTE: We use LARGE = 8 * argSlotSize = 64 bytes.
+ // You've gotta love having only 13 bits for constant offset values :-|.
+ //
+ unsigned paddedSize;
int offsetFromFP = mcInfo.getInfo()->computeOffsetforLocalVar(result,
- paddedSizeIgnored,
- tsize * numElements);
- if (! target.getInstrInfo().constantFitsInImmedField(V9::LDX, offsetFromFP)) {
+ paddedSize,
+ tsize * numElements);
+
+ if (((int)paddedSize) > 8 * target.getFrameInfo().getSizeOfEachArgOnStack() ||
+ ! target.getInstrInfo().constantFitsInImmedField(V9::LDXi,offsetFromFP)) {
CreateCodeForVariableSizeAlloca(target, result, tsize,
ConstantSInt::get(Type::IntTy,numElements),
getMvec);
// Instruction 1: add %fp, offsetFromFP -> result
unsigned FPReg = target.getRegInfo().getFramePointer();
- getMvec.push_back(BuildMI(V9::ADD, 3).addMReg(FPReg).addReg(offsetVal)
+ getMvec.push_back(BuildMI(V9::ADDr, 3).addMReg(FPReg).addReg(offsetVal)
.addRegDef(result));
}
static void
SetOperandsForMemInstr(unsigned Opcode,
- vector<MachineInstr*>& mvec,
+ std::vector<MachineInstr*>& mvec,
InstructionNode* vmInstrNode,
const TargetMachine& target)
{
Instruction* memInst = vmInstrNode->getInstruction();
// Index vector, ptr value, and flag if all indices are const.
- vector<Value*> idxVec;
+ std::vector<Value*> idxVec;
bool allConstantIndices;
Value* ptrVal = GetMemInstArgs(vmInstrNode, idxVec, allConstantIndices);
// Check if there is an index vector and if so, compute the
// right offset for structures and for arrays
//
- if (!idxVec.empty())
- {
- const PointerType* ptrType = cast<PointerType>(ptrVal->getType());
+ if (!idxVec.empty()) {
+ const PointerType* ptrType = cast<PointerType>(ptrVal->getType());
- // If all indices are constant, compute the combined offset directly.
- if (allConstantIndices)
- {
- // Compute the offset value using the index vector. Create a
- // virtual reg. for it since it may not fit in the immed field.
- uint64_t offset = target.getTargetData().getIndexedOffset(ptrType,idxVec);
- valueForRegOffset = ConstantSInt::get(Type::LongTy, offset);
- }
- else
- {
- // There is at least one non-constant offset. Therefore, this must
- // be an array ref, and must have been lowered to a single non-zero
- // offset. (An extra leading zero offset, if any, can be ignored.)
- // Generate code sequence to compute address from index.
- //
- bool firstIdxIsZero = IsZero(idxVec[0]);
- assert(idxVec.size() == 1U + firstIdxIsZero
- && "Array refs must be lowered before Instruction Selection");
+ // If all indices are constant, compute the combined offset directly.
+ if (allConstantIndices) {
+ // Compute the offset value using the index vector. Create a
+ // virtual reg. for it since it may not fit in the immed field.
+ uint64_t offset = target.getTargetData().getIndexedOffset(ptrType,idxVec);
+ valueForRegOffset = ConstantSInt::get(Type::LongTy, offset);
+ } else {
+ // There is at least one non-constant offset. Therefore, this must
+ // be an array ref, and must have been lowered to a single non-zero
+ // offset. (An extra leading zero offset, if any, can be ignored.)
+ // Generate code sequence to compute address from index.
+ //
+ bool firstIdxIsZero = IsZero(idxVec[0]);
+ assert(idxVec.size() == 1U + firstIdxIsZero
+ && "Array refs must be lowered before Instruction Selection");
- Value* idxVal = idxVec[firstIdxIsZero];
+ Value* idxVal = idxVec[firstIdxIsZero];
- vector<MachineInstr*> mulVec;
- Instruction* addr = new TmpInstruction(Type::ULongTy, memInst);
- MachineCodeForInstruction::get(memInst).addTemp(addr);
+ std::vector<MachineInstr*> mulVec;
+ Instruction* addr =
+ new TmpInstruction(MachineCodeForInstruction::get(memInst),
+ Type::ULongTy, memInst);
- // Get the array type indexed by idxVal, and compute its element size.
- // The call to getTypeSize() will fail if size is not constant.
- const Type* vecType = (firstIdxIsZero
- ? GetElementPtrInst::getIndexedType(ptrType,
+ // Get the array type indexed by idxVal, and compute its element size.
+ // The call to getTypeSize() will fail if size is not constant.
+ const Type* vecType = (firstIdxIsZero
+ ? GetElementPtrInst::getIndexedType(ptrType,
std::vector<Value*>(1U, idxVec[0]),
/*AllowCompositeLeaf*/ true)
: ptrType);
- const Type* eltType = cast<SequentialType>(vecType)->getElementType();
- ConstantUInt* eltSizeVal = ConstantUInt::get(Type::ULongTy,
- target.getTargetData().getTypeSize(eltType));
-
- // CreateMulInstruction() folds constants intelligently enough.
- CreateMulInstruction(target, memInst->getParent()->getParent(),
- idxVal, /* lval, not likely to be const*/
- eltSizeVal, /* rval, likely to be constant */
- addr, /* result */
- mulVec, MachineCodeForInstruction::get(memInst),
- INVALID_MACHINE_OPCODE);
-
- assert(mulVec.size() > 0 && "No multiply code created?");
- mvec.insert(mvec.end(), mulVec.begin(), mulVec.end());
-
- valueForRegOffset = addr;
- }
- }
- else
- {
- offsetOpType = MachineOperand::MO_SignExtendedImmed;
- smallConstOffset = 0;
+ const Type* eltType = cast<SequentialType>(vecType)->getElementType();
+ ConstantUInt* eltSizeVal = ConstantUInt::get(Type::ULongTy,
+ target.getTargetData().getTypeSize(eltType));
+
+ // CreateMulInstruction() folds constants intelligently enough.
+ CreateMulInstruction(target, memInst->getParent()->getParent(),
+ idxVal, /* lval, not likely to be const*/
+ eltSizeVal, /* rval, likely to be constant */
+ addr, /* result */
+ mulVec, MachineCodeForInstruction::get(memInst),
+ INVALID_MACHINE_OPCODE);
+
+ assert(mulVec.size() > 0 && "No multiply code created?");
+ mvec.insert(mvec.end(), mulVec.begin(), mulVec.end());
+
+ valueForRegOffset = addr;
}
+ } else {
+ offsetOpType = MachineOperand::MO_SignExtendedImmed;
+ smallConstOffset = 0;
+ }
// For STORE:
// Operand 0 is value, operand 1 is ptr, operand 2 is offset
unsigned offsetOpNum, ptrOpNum;
MachineInstr *MI;
if (memInst->getOpcode() == Instruction::Store) {
- if (offsetOpType == MachineOperand::MO_VirtualRegister)
+ if (offsetOpType == MachineOperand::MO_VirtualRegister) {
MI = BuildMI(Opcode, 3).addReg(vmInstrNode->leftChild()->getValue())
.addReg(ptrVal).addReg(valueForRegOffset);
- else
+ } else {
+ Opcode = convertOpcodeFromRegToImm(Opcode);
MI = BuildMI(Opcode, 3).addReg(vmInstrNode->leftChild()->getValue())
.addReg(ptrVal).addSImm(smallConstOffset);
+ }
} else {
- if (offsetOpType == MachineOperand::MO_VirtualRegister)
+ if (offsetOpType == MachineOperand::MO_VirtualRegister) {
MI = BuildMI(Opcode, 3).addReg(ptrVal).addReg(valueForRegOffset)
.addRegDef(memInst);
- else
+ } else {
+ Opcode = convertOpcodeFromRegToImm(Opcode);
MI = BuildMI(Opcode, 3).addReg(ptrVal).addSImm(smallConstOffset)
.addRegDef(memInst);
+ }
}
mvec.push_back(MI);
}
// The parent's mvec would be empty if it was itself forwarded.
// Recursively call ForwardOperand in that case...
//
- if (mvec.size() == 0)
- {
- assert(parent->parent() != NULL &&
- "Parent could not have been forwarded, yet has no instructions?");
- ForwardOperand(treeNode, parent->parent(), operandNum);
- }
- else
- {
- for (unsigned i=0, N=mvec.size(); i < N; i++)
+ if (mvec.size() == 0) {
+ assert(parent->parent() != NULL &&
+ "Parent could not have been forwarded, yet has no instructions?");
+ ForwardOperand(treeNode, parent->parent(), operandNum);
+ } else {
+ for (unsigned i=0, N=mvec.size(); i < N; i++) {
+ MachineInstr* minstr = mvec[i];
+ for (unsigned i=0, numOps=minstr->getNumOperands(); i < numOps; ++i) {
+ const MachineOperand& mop = minstr->getOperand(i);
+ if (mop.getType() == MachineOperand::MO_VirtualRegister &&
+ mop.getVRegValue() == unusedOp)
{
- MachineInstr* minstr = mvec[i];
- for (unsigned i=0, numOps=minstr->getNumOperands(); i < numOps; ++i)
- {
- const MachineOperand& mop = minstr->getOperand(i);
- if (mop.getType() == MachineOperand::MO_VirtualRegister &&
- mop.getVRegValue() == unusedOp)
- minstr->SetMachineOperandVal(i,
- MachineOperand::MO_VirtualRegister, fwdOp);
- }
-
- for (unsigned i=0,numOps=minstr->getNumImplicitRefs(); i<numOps; ++i)
- if (minstr->getImplicitRef(i) == unusedOp)
- minstr->setImplicitRef(i, fwdOp,
- minstr->implicitRefIsDefined(i),
- minstr->implicitRefIsDefinedAndUsed(i));
+ minstr->SetMachineOperandVal(i, MachineOperand::MO_VirtualRegister,
+ fwdOp);
}
+ }
+
+ for (unsigned i=0,numOps=minstr->getNumImplicitRefs(); i<numOps; ++i)
+ if (minstr->getImplicitRef(i) == unusedOp)
+ minstr->setImplicitRef(i, fwdOp);
}
+ }
}
return true;
}
+// Generate code for any intrinsic that needs a special code sequence
+// instead of a regular call. If not that kind of intrinsic, do nothing.
+// Returns true if code was generated, otherwise false.
+//
+bool CodeGenIntrinsic(LLVMIntrinsic::ID iid, CallInst &callInstr,
+ TargetMachine &target,
+ std::vector<MachineInstr*>& mvec)
+{
+ switch (iid) {
+ case LLVMIntrinsic::va_start: {
+ // Get the address of the first incoming vararg argument on the stack
+ bool ignore;
+ Function* func = cast<Function>(callInstr.getParent()->getParent());
+ int numFixedArgs = func->getFunctionType()->getNumParams();
+ int fpReg = target.getFrameInfo().getIncomingArgBaseRegNum();
+ int argSize = target.getFrameInfo().getSizeOfEachArgOnStack();
+ int firstVarArgOff = numFixedArgs * argSize + target.getFrameInfo().
+ getFirstIncomingArgOffset(MachineFunction::get(func), ignore);
+ mvec.push_back(BuildMI(V9::ADDi, 3).addMReg(fpReg).addSImm(firstVarArgOff).
+ addRegDef(&callInstr));
+ return true;
+ }
+
+ case LLVMIntrinsic::va_end:
+ return true; // no-op on Sparc
+
+ case LLVMIntrinsic::va_copy:
+ // Simple copy of current va_list (arg1) to new va_list (result)
+ mvec.push_back(BuildMI(V9::ORr, 3).
+ addMReg(target.getRegInfo().getZeroRegNum()).
+ addReg(callInstr.getOperand(1)).
+ addRegDef(&callInstr));
+ return true;
+
+ case LLVMIntrinsic::sigsetjmp:
+ case LLVMIntrinsic::setjmp: {
+ // act as if we return 0
+ unsigned g0 = target.getRegInfo().getZeroRegNum();
+ mvec.push_back(BuildMI(V9::ORr,3).addMReg(g0).addMReg(g0)
+ .addReg(&callInstr, MOTy::Def));
+ return true;
+ }
+
+ case LLVMIntrinsic::siglongjmp:
+ case LLVMIntrinsic::longjmp: {
+ // call abort()
+ Module* M = callInstr.getParent()->getParent()->getParent();
+ const FunctionType *voidvoidFuncTy =
+ FunctionType::get(Type::VoidTy, std::vector<const Type*>(), false);
+ Function *F = M->getOrInsertFunction("abort", voidvoidFuncTy);
+ assert(F && "Unable to get or create `abort' function declaration");
+
+ // Create hidden virtual register for return address with type void*
+ TmpInstruction* retAddrReg =
+ new TmpInstruction(MachineCodeForInstruction::get(&callInstr),
+ PointerType::get(Type::VoidTy), &callInstr);
+
+ // Use a descriptor to pass information about call arguments
+ // to the register allocator. This descriptor will be "owned"
+ // and freed automatically when the MachineCodeForInstruction
+ // object for the callInstr goes away.
+ CallArgsDescriptor* argDesc =
+ new CallArgsDescriptor(&callInstr, retAddrReg, false, false);
+
+ MachineInstr* callMI = BuildMI(V9::CALL, 1).addPCDisp(F);
+ callMI->addImplicitRef(retAddrReg, /*isDef*/ true);
+
+ mvec.push_back(callMI);
+ mvec.push_back(BuildMI(V9::NOP, 0));
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
//******************* Externally Visible Functions *************************/
//------------------------------------------------------------------------
int ruleForNode,
short* nts,
TargetMachine &target,
- vector<MachineInstr*>& mvec)
+ std::vector<MachineInstr*>& mvec)
{
bool checkCast = false; // initialize here to use fall-through
bool maskUnsignedResult = false;
unsigned allocaSize = 0;
MachineInstr* M, *M2;
unsigned L;
+ bool foldCase = false;
mvec.clear();
// Let's check for chain rules outside the switch so that we don't have
// to duplicate the list of chain rule production numbers here again
//
- if (ThisIsAChainRule(ruleForNode))
- {
- // Chain rules have a single nonterminal on the RHS.
- // Get the rule that matches the RHS non-terminal and use that instead.
- //
- assert(nts[0] && ! nts[1]
- && "A chain rule should have only one RHS non-terminal!");
- nextRule = burm_rule(subtreeRoot->state, nts[0]);
- nts = burm_nts[nextRule];
- GetInstructionsByRule(subtreeRoot, nextRule, nts, target, mvec);
- }
- else
- {
- switch(ruleForNode) {
- case 1: // stmt: Ret
- case 2: // stmt: RetValue(reg)
+ if (ThisIsAChainRule(ruleForNode)) {
+ // Chain rules have a single nonterminal on the RHS.
+ // Get the rule that matches the RHS non-terminal and use that instead.
+ //
+ assert(nts[0] && ! nts[1]
+ && "A chain rule should have only one RHS non-terminal!");
+ nextRule = burm_rule(subtreeRoot->state, nts[0]);
+ nts = burm_nts[nextRule];
+ GetInstructionsByRule(subtreeRoot, nextRule, nts, target, mvec);
+ } else {
+ switch(ruleForNode) {
+ case 1: // stmt: Ret
+ case 2: // stmt: RetValue(reg)
{ // NOTE: Prepass of register allocation is responsible
// for moving return value to appropriate register.
+ // Copy the return value to the required return register.
+ // Mark the return Value as an implicit ref of the RET instr..
// Mark the return-address register as a hidden virtual reg.
- // Mark the return value register as an implicit ref of
- // the machine instruction.
// Finally put a NOP in the delay slot.
- ReturnInst *returnInstr =
- cast<ReturnInst>(subtreeRoot->getInstruction());
- assert(returnInstr->getOpcode() == Instruction::Ret);
-
- Instruction* returnReg = new TmpInstruction(returnInstr);
- MachineCodeForInstruction::get(returnInstr).addTemp(returnReg);
-
- M = BuildMI(V9::JMPLRET, 3).addReg(returnReg).addSImm(8)
+ ReturnInst *returnInstr=cast<ReturnInst>(subtreeRoot->getInstruction());
+ Value* retVal = returnInstr->getReturnValue();
+ MachineCodeForInstruction& mcfi =
+ MachineCodeForInstruction::get(returnInstr);
+
+ // Create a hidden virtual reg to represent the return address register
+ // used by the machine instruction but not represented in LLVM.
+ //
+ Instruction* returnAddrTmp = new TmpInstruction(mcfi, returnInstr);
+
+ MachineInstr* retMI =
+ BuildMI(V9::JMPLRETi, 3).addReg(returnAddrTmp).addSImm(8)
.addMReg(target.getRegInfo().getZeroRegNum(), MOTy::Def);
+
+ // If there is a value to return, we need to:
+ // (a) Sign-extend the value if it is smaller than 8 bytes (reg size)
+ // (b) Insert a copy to copy the return value to the appropriate reg.
+ // -- For FP values, create a FMOVS or FMOVD instruction
+ // -- For non-FP values, create an add-with-0 instruction
+ //
+ if (retVal != NULL) {
+ const UltraSparcRegInfo& regInfo =
+ (UltraSparcRegInfo&) target.getRegInfo();
+ const Type* retType = retVal->getType();
+ unsigned regClassID = regInfo.getRegClassIDOfType(retType);
+ unsigned retRegNum = (retType->isFloatingPoint()
+ ? (unsigned) SparcFloatRegClass::f0
+ : (unsigned) SparcIntRegClass::i0);
+ retRegNum = regInfo.getUnifiedRegNum(regClassID, retRegNum);
+
+ // () Insert sign-extension instructions for small signed values.
+ //
+ Value* retValToUse = retVal;
+ if (retType->isIntegral() && retType->isSigned()) {
+ unsigned retSize = target.getTargetData().getTypeSize(retType);
+ if (retSize <= 4) {
+ // create a temporary virtual reg. to hold the sign-extension
+ retValToUse = new TmpInstruction(mcfi, retVal);
+
+ // sign-extend retVal and put the result in the temporary reg.
+ target.getInstrInfo().CreateSignExtensionInstructions
+ (target, returnInstr->getParent()->getParent(),
+ retVal, retValToUse, 8*retSize, mvec, mcfi);
+ }
+ }
+
+ // (b) Now, insert a copy to to the appropriate register:
+ // -- For FP values, create a FMOVS or FMOVD instruction
+ // -- For non-FP values, create an add-with-0 instruction
+ //
+ // First, create a virtual register to represent the register and
+ // mark this vreg as being an implicit operand of the ret MI.
+ TmpInstruction* retVReg =
+ new TmpInstruction(mcfi, retValToUse, NULL, "argReg");
+
+ retMI->addImplicitRef(retVReg);
+
+ if (retType->isFloatingPoint())
+ M = (BuildMI(retType==Type::FloatTy? V9::FMOVS : V9::FMOVD, 2)
+ .addReg(retValToUse).addReg(retVReg, MOTy::Def));
+ else
+ M = (BuildMI(ChooseAddInstructionByType(retType), 3)
+ .addReg(retValToUse).addSImm((int64_t) 0)
+ .addReg(retVReg, MOTy::Def));
+
+ // Mark the operand with the register it should be assigned
+ M->SetRegForOperand(M->getNumOperands()-1, retRegNum);
+ retMI->SetRegForImplicitRef(retMI->getNumImplicitRefs()-1, retRegNum);
+
+ mvec.push_back(M);
+ }
- if (returnInstr->getReturnValue() != NULL)
- M->addImplicitRef(returnInstr->getReturnValue());
-
- mvec.push_back(M);
+ // Now insert the RET instruction and a NOP for the delay slot
+ mvec.push_back(retMI);
mvec.push_back(BuildMI(V9::NOP, 0));
break;
if ((constVal->getType()->isInteger()
|| isa<PointerType>(constVal->getType()))
- && GetConstantValueAsSignedInt(constVal, isValidConst) == 0
+ && target.getInstrInfo().ConvertConstantToIntType(target,
+ constVal, constVal->getType(), isValidConst) == 0
&& isValidConst)
{
// That constant is a zero after all...
// TmpInstruction representing that CC.
//
BranchInst* brInst = cast<BranchInst>(subtreeRoot->getInstruction());
- bool isFPBranch;
- unsigned Opcode = ChooseBccInstruction(subtreeRoot, isFPBranch);
+ const Type* setCCType;
+ unsigned Opcode = ChooseBccInstruction(subtreeRoot, setCCType);
Value* ccValue = GetTmpForCC(subtreeRoot->leftChild()->getValue(),
brInst->getParent()->getParent(),
- isFPBranch? Type::FloatTy : Type::IntTy);
+ setCCType,
+ MachineCodeForInstruction::get(brInst));
M = BuildMI(Opcode, 2).addCCReg(ccValue)
.addPCDisp(brInst->getSuccessor(0));
mvec.push_back(M);
}
case 8: // stmt: BrCond(boolreg)
- { // boolreg => boolean is stored in an existing register.
- // Just use the branch-on-integer-register instruction!
+ { // boolreg => boolean is recorded in an integer register.
+ // Use branch-on-integer-register instruction.
//
BranchInst *BI = cast<BranchInst>(subtreeRoot->getInstruction());
M = BuildMI(V9::BRNZ, 2).addReg(subtreeRoot->leftChild()->getValue())
assert(0 && "VRegList should never be the topmost non-chain rule");
break;
- case 21: // bool: Not(bool,reg): Both these are implemented as:
- case 421: // reg: BNot(reg,reg): reg = reg XOR-NOT 0
+ case 21: // bool: Not(bool,reg): Compute with a conditional-move-on-reg
+ { // First find the unary operand. It may be left or right, usually right.
+ Instruction* notI = subtreeRoot->getInstruction();
+ Value* notArg = BinaryOperator::getNotArgument(
+ cast<BinaryOperator>(subtreeRoot->getInstruction()));
+ unsigned ZeroReg = target.getRegInfo().getZeroRegNum();
+
+ // Unconditionally set register to 0
+ mvec.push_back(BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(notI));
+
+ // Now conditionally move 1 into the register.
+ // Mark the register as a use (as well as a def) because the old
+ // value will be retained if the condition is false.
+ mvec.push_back(BuildMI(V9::MOVRZi, 3).addReg(notArg).addZImm(1)
+ .addReg(notI, MOTy::UseAndDef));
+
+ break;
+ }
+
+ case 421: // reg: BNot(reg,reg): Compute as reg = reg XOR-NOT 0
{ // First find the unary operand. It may be left or right, usually right.
Value* notArg = BinaryOperator::getNotArgument(
cast<BinaryOperator>(subtreeRoot->getInstruction()));
unsigned ZeroReg = target.getRegInfo().getZeroRegNum();
- mvec.push_back(BuildMI(V9::XNOR, 3).addReg(notArg).addMReg(ZeroReg)
+ mvec.push_back(BuildMI(V9::XNORr, 3).addReg(notArg).addMReg(ZeroReg)
.addRegDef(subtreeRoot->getValue()));
break;
}
+ case 322: // reg: Not(tobool, reg):
+ // Fold CAST-TO-BOOL with NOT by inverting the sense of cast-to-bool
+ foldCase = true;
+ // Just fall through!
+
case 22: // reg: ToBoolTy(reg):
{
- const Type* opType = subtreeRoot->leftChild()->getValue()->getType();
- assert(opType->isIntegral() || isa<PointerType>(opType));
- forwardOperandNum = 0; // forward first operand to user
+ Instruction* castI = subtreeRoot->getInstruction();
+ Value* opVal = subtreeRoot->leftChild()->getValue();
+ assert(opVal->getType()->isIntegral() ||
+ isa<PointerType>(opVal->getType()));
+
+ // Unconditionally set register to 0
+ mvec.push_back(BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(castI));
+
+ // Now conditionally move 1 into the register.
+ // Mark the register as a use (as well as a def) because the old
+ // value will be retained if the condition is false.
+ MachineOpCode opCode = foldCase? V9::MOVRZi : V9::MOVRNZi;
+ mvec.push_back(BuildMI(opCode, 3).addReg(opVal).addZImm(1)
+ .addReg(castI, MOTy::UseAndDef));
+
break;
}
case 26: // reg: ToShortTy(reg)
case 27: // reg: ToUIntTy(reg)
case 28: // reg: ToIntTy(reg)
+ case 29: // reg: ToULongTy(reg)
+ case 30: // reg: ToLongTy(reg)
{
//======================================================================
// Rules for integer conversions:
//
// Since we assume 2s complement representations, this implies:
//
- // -- if operand is smaller than destination, zero-extend or sign-extend
- // according to the signedness of the *operand*: source decides.
- // ==> we have to do nothing here!
+ // -- If operand is smaller than destination, zero-extend or sign-extend
+ // according to the signedness of the *operand*: source decides:
+ // (1) If operand is signed, sign-extend it.
+ // If dest is unsigned, zero-ext the result!
+ // (2) If operand is unsigned, our current invariant is that
+ // it's high bits are correct, so zero-extension is not needed.
//
- // -- if operand is same size as or larger than destination, and the
- // destination is *unsigned*, zero-extend the operand: dest. decides
- //
- // -- if operand is same size as or larger than destination, and the
- // destination is *signed*, the choice is implementation defined:
- // we sign-extend the operand: i.e., again dest. decides.
- // Note: this matches both Sun's cc and gcc3.2.
+ // -- If operand is same size as or larger than destination,
+ // zero-extend or sign-extend according to the signedness of
+ // the *destination*: destination decides:
+ // (1) If destination is signed, sign-extend (truncating if needed)
+ // This choice is implementation defined. We sign-extend the
+ // operand, which matches both Sun's cc and gcc3.2.
+ // (2) If destination is unsigned, zero-extend (truncating if needed)
//======================================================================
Instruction* destI = subtreeRoot->getInstruction();
+ Function* currentFunc = destI->getParent()->getParent();
+ MachineCodeForInstruction& mcfi=MachineCodeForInstruction::get(destI);
+
Value* opVal = subtreeRoot->leftChild()->getValue();
const Type* opType = opVal->getType();
- if (opType->isIntegral() || isa<PointerType>(opType))
- {
- unsigned opSize = target.getTargetData().getTypeSize(opType);
- unsigned destSize = target.getTargetData().getTypeSize(destI->getType());
- if (opSize >= destSize)
- { // Operand is same size as or larger than dest:
- // zero- or sign-extend, according to the signeddness of
- // the destination (see above).
- if (destI->getType()->isSigned())
- target.getInstrInfo().CreateSignExtensionInstructions(target,
- destI->getParent()->getParent(), opVal, destI, 8*destSize,
- mvec, MachineCodeForInstruction::get(destI));
- else
- target.getInstrInfo().CreateZeroExtensionInstructions(target,
- destI->getParent()->getParent(), opVal, destI, 8*destSize,
- mvec, MachineCodeForInstruction::get(destI));
- }
- else
- forwardOperandNum = 0; // forward first operand to user
+ const Type* destType = destI->getType();
+ unsigned opSize = target.getTargetData().getTypeSize(opType);
+ unsigned destSize = target.getTargetData().getTypeSize(destType);
+
+ bool isIntegral = opType->isIntegral() || isa<PointerType>(opType);
+
+ if (opType == Type::BoolTy ||
+ opType == destType ||
+ isIntegral && opSize == destSize && opSize == 8) {
+ // nothing to do in all these cases
+ forwardOperandNum = 0; // forward first operand to user
+
+ } else if (opType->isFloatingPoint()) {
+
+ CreateCodeToConvertFloatToInt(target, opVal, destI, mvec, mcfi);
+ if (destI->getType()->isUnsigned() && destI->getType() !=Type::UIntTy)
+ maskUnsignedResult = true; // not handled by fp->int code
+
+ } else if (isIntegral) {
+
+ bool opSigned = opType->isSigned();
+ bool destSigned = destType->isSigned();
+ unsigned extSourceInBits = 8 * std::min<unsigned>(opSize, destSize);
+
+ assert(! (opSize == destSize && opSigned == destSigned) &&
+ "How can different int types have same size and signedness?");
+
+ bool signExtend = (opSize < destSize && opSigned ||
+ opSize >= destSize && destSigned);
+
+ bool signAndZeroExtend = (opSize < destSize && destSize < 8u &&
+ opSigned && !destSigned);
+ assert(!signAndZeroExtend || signExtend);
+
+ bool zeroExtendOnly = opSize >= destSize && !destSigned;
+ assert(!zeroExtendOnly || !signExtend);
+
+ if (signExtend) {
+ Value* signExtDest = (signAndZeroExtend
+ ? new TmpInstruction(mcfi, destType, opVal)
+ : destI);
+
+ target.getInstrInfo().CreateSignExtensionInstructions
+ (target, currentFunc,opVal,signExtDest,extSourceInBits,mvec,mcfi);
+
+ if (signAndZeroExtend)
+ target.getInstrInfo().CreateZeroExtensionInstructions
+ (target, currentFunc, signExtDest, destI, 8*destSize, mvec, mcfi);
}
- else if (opType->isFloatingPoint())
- {
- CreateCodeToConvertFloatToInt(target, opVal, destI, mvec,
- MachineCodeForInstruction::get(destI));
- if (destI->getType()->isUnsigned())
- maskUnsignedResult = true; // not handled by fp->int code
+ else if (zeroExtendOnly) {
+ target.getInstrInfo().CreateZeroExtensionInstructions
+ (target, currentFunc, opVal, destI, extSourceInBits, mvec, mcfi);
}
- else
- assert(0 && "Unrecognized operand type for convert-to-unsigned");
+ else
+ forwardOperandNum = 0; // forward first operand to user
- break;
- }
+ } else
+ assert(0 && "Unrecognized operand type for convert-to-integer");
- case 29: // reg: ToULongTy(reg)
- case 30: // reg: ToLongTy(reg)
- {
- Value* opVal = subtreeRoot->leftChild()->getValue();
- const Type* opType = opVal->getType();
- if (opType->isIntegral() || isa<PointerType>(opType))
- forwardOperandNum = 0; // forward first operand to user
- else if (opType->isFloatingPoint())
- {
- Instruction* destI = subtreeRoot->getInstruction();
- CreateCodeToConvertFloatToInt(target, opVal, destI, mvec,
- MachineCodeForInstruction::get(destI));
- }
- else
- assert(0 && "Unrecognized operand type for convert-to-signed");
break;
}
// In the future, we'll want to do the same for the FdMULq instruction,
// so do the check here instead of only for ToFloatTy(reg).
//
- if (subtreeRoot->parent() != NULL)
- {
- const MachineCodeForInstruction& mcfi =
- MachineCodeForInstruction::get(
+ if (subtreeRoot->parent() != NULL) {
+ const MachineCodeForInstruction& mcfi =
+ MachineCodeForInstruction::get(
cast<InstructionNode>(subtreeRoot->parent())->getInstruction());
- if (mcfi.size() == 0 || mcfi.front()->getOpCode() == V9::FSMULD)
- forwardOperandNum = 0; // forward first operand to user
- }
+ if (mcfi.size() == 0 || mcfi.front()->getOpCode() == V9::FSMULD)
+ forwardOperandNum = 0; // forward first operand to user
+ }
- if (forwardOperandNum != 0) // we do need the cast
- {
- Value* leftVal = subtreeRoot->leftChild()->getValue();
- const Type* opType = leftVal->getType();
- MachineOpCode opCode=ChooseConvertToFloatInstr(
+ if (forwardOperandNum != 0) { // we do need the cast
+ Value* leftVal = subtreeRoot->leftChild()->getValue();
+ const Type* opType = leftVal->getType();
+ MachineOpCode opCode=ChooseConvertToFloatInstr(target,
subtreeRoot->getOpLabel(), opType);
- if (opCode == V9::INVALID_OPCODE) // no conversion needed
- {
- forwardOperandNum = 0; // forward first operand to user
- }
- else
- {
- // If the source operand is a non-FP type it must be
- // first copied from int to float register via memory!
- Instruction *dest = subtreeRoot->getInstruction();
- Value* srcForCast;
- int n = 0;
- if (! opType->isFloatingPoint())
- {
- // Create a temporary to represent the FP register
- // into which the integer will be copied via memory.
- // The type of this temporary will determine the FP
- // register used: single-prec for a 32-bit int or smaller,
- // double-prec for a 64-bit int.
- //
- uint64_t srcSize =
- target.getTargetData().getTypeSize(leftVal->getType());
- Type* tmpTypeToUse =
- (srcSize <= 4)? Type::FloatTy : Type::DoubleTy;
- srcForCast = new TmpInstruction(tmpTypeToUse, dest);
- MachineCodeForInstruction &destMCFI =
- MachineCodeForInstruction::get(dest);
- destMCFI.addTemp(srcForCast);
-
- target.getInstrInfo().CreateCodeToCopyIntToFloat(target,
+ if (opCode == V9::NOP) { // no conversion needed
+ forwardOperandNum = 0; // forward first operand to user
+ } else {
+ // If the source operand is a non-FP type it must be
+ // first copied from int to float register via memory!
+ Instruction *dest = subtreeRoot->getInstruction();
+ Value* srcForCast;
+ int n = 0;
+ if (! opType->isFloatingPoint()) {
+ // Create a temporary to represent the FP register
+ // into which the integer will be copied via memory.
+ // The type of this temporary will determine the FP
+ // register used: single-prec for a 32-bit int or smaller,
+ // double-prec for a 64-bit int.
+ //
+ uint64_t srcSize =
+ target.getTargetData().getTypeSize(leftVal->getType());
+ Type* tmpTypeToUse =
+ (srcSize <= 4)? Type::FloatTy : Type::DoubleTy;
+ MachineCodeForInstruction &destMCFI =
+ MachineCodeForInstruction::get(dest);
+ srcForCast = new TmpInstruction(destMCFI, tmpTypeToUse, dest);
+
+ target.getInstrInfo().CreateCodeToCopyIntToFloat(target,
dest->getParent()->getParent(),
leftVal, cast<Instruction>(srcForCast),
mvec, destMCFI);
- }
- else
- srcForCast = leftVal;
+ } else
+ srcForCast = leftVal;
- M = BuildMI(opCode, 2).addReg(srcForCast).addRegDef(dest);
- mvec.push_back(M);
- }
+ M = BuildMI(opCode, 2).addReg(srcForCast).addRegDef(dest);
+ mvec.push_back(M);
}
+ }
break;
case 19: // reg: ToArrayTy(reg):
case 233: // reg: Add(reg, Constant)
maskUnsignedResult = true;
M = CreateAddConstInstruction(subtreeRoot);
- if (M != NULL)
- {
- mvec.push_back(M);
- break;
- }
+ if (M != NULL) {
+ mvec.push_back(M);
+ break;
+ }
// ELSE FALL THROUGH
case 33: // reg: Add(reg, reg)
case 234: // reg: Sub(reg, Constant)
maskUnsignedResult = true;
M = CreateSubConstInstruction(subtreeRoot);
- if (M != NULL)
- {
- mvec.push_back(M);
- break;
- }
+ if (M != NULL) {
+ mvec.push_back(M);
+ break;
+ }
// ELSE FALL THROUGH
case 34: // reg: Sub(reg, reg)
// ELSE FALL THROUGH
case 36: // reg: Div(reg, reg)
+ {
maskUnsignedResult = true;
- Add3OperandInstr(ChooseDivInstruction(target, subtreeRoot),
- subtreeRoot, mvec);
+
+ // If either operand of divide is smaller than 64 bits, we have
+ // to make sure the unused top bits are correct because they affect
+ // the result. These bits are already correct for unsigned values.
+ // They may be incorrect for signed values, so sign extend to fill in.
+ Instruction* divI = subtreeRoot->getInstruction();
+ Value* divOp1 = subtreeRoot->leftChild()->getValue();
+ Value* divOp2 = subtreeRoot->rightChild()->getValue();
+ Value* divOp1ToUse = divOp1;
+ Value* divOp2ToUse = divOp2;
+ if (divI->getType()->isSigned()) {
+ unsigned opSize=target.getTargetData().getTypeSize(divI->getType());
+ if (opSize < 8) {
+ MachineCodeForInstruction& mcfi=MachineCodeForInstruction::get(divI);
+ divOp1ToUse = new TmpInstruction(mcfi, divOp1);
+ divOp2ToUse = new TmpInstruction(mcfi, divOp2);
+ target.getInstrInfo().
+ CreateSignExtensionInstructions(target,
+ divI->getParent()->getParent(),
+ divOp1, divOp1ToUse,
+ 8*opSize, mvec, mcfi);
+ target.getInstrInfo().
+ CreateSignExtensionInstructions(target,
+ divI->getParent()->getParent(),
+ divOp2, divOp2ToUse,
+ 8*opSize, mvec, mcfi);
+ }
+ }
+
+ mvec.push_back(BuildMI(ChooseDivInstruction(target, subtreeRoot), 3)
+ .addReg(divOp1ToUse)
+ .addReg(divOp2ToUse)
+ .addRegDef(divI));
+
break;
+ }
case 37: // reg: Rem(reg, reg)
case 237: // reg: Rem(reg, Constant)
{
maskUnsignedResult = true;
- Instruction* remInstr = subtreeRoot->getInstruction();
+
+ Instruction* remI = subtreeRoot->getInstruction();
+ Value* divOp1 = subtreeRoot->leftChild()->getValue();
+ Value* divOp2 = subtreeRoot->rightChild()->getValue();
+
+ MachineCodeForInstruction& mcfi = MachineCodeForInstruction::get(remI);
- TmpInstruction* quot = new TmpInstruction(
- subtreeRoot->leftChild()->getValue(),
- subtreeRoot->rightChild()->getValue());
- TmpInstruction* prod = new TmpInstruction(
- quot,
- subtreeRoot->rightChild()->getValue());
- MachineCodeForInstruction::get(remInstr).addTemp(quot).addTemp(prod);
+ // If second operand of divide is smaller than 64 bits, we have
+ // to make sure the unused top bits are correct because they affect
+ // the result. These bits are already correct for unsigned values.
+ // They may be incorrect for signed values, so sign extend to fill in.
+ //
+ Value* divOpToUse = divOp2;
+ if (divOp2->getType()->isSigned()) {
+ unsigned opSize=target.getTargetData().getTypeSize(divOp2->getType());
+ if (opSize < 8) {
+ divOpToUse = new TmpInstruction(mcfi, divOp2);
+ target.getInstrInfo().
+ CreateSignExtensionInstructions(target,
+ remI->getParent()->getParent(),
+ divOp2, divOpToUse,
+ 8*opSize, mvec, mcfi);
+ }
+ }
+
+ // Now compute: result = rem V1, V2 as:
+ // result = V1 - (V1 / signExtend(V2)) * signExtend(V2)
+ //
+ TmpInstruction* quot = new TmpInstruction(mcfi, divOp1, divOpToUse);
+ TmpInstruction* prod = new TmpInstruction(mcfi, quot, divOpToUse);
+
+ mvec.push_back(BuildMI(ChooseDivInstruction(target, subtreeRoot), 3)
+ .addReg(divOp1).addReg(divOpToUse).addRegDef(quot));
- M = BuildMI(ChooseDivInstruction(target, subtreeRoot), 3)
- .addReg(subtreeRoot->leftChild()->getValue())
- .addReg(subtreeRoot->rightChild()->getValue())
- .addRegDef(quot);
- mvec.push_back(M);
+ mvec.push_back(BuildMI(ChooseMulInstructionByType(remI->getType()), 3)
+ .addReg(quot).addReg(divOpToUse).addRegDef(prod));
- unsigned MulOpcode =
- ChooseMulInstructionByType(subtreeRoot->getInstruction()->getType());
- Value *MulRHS = subtreeRoot->rightChild()->getValue();
- M = BuildMI(MulOpcode, 3).addReg(quot).addReg(MulRHS).addReg(prod,
- MOTy::Def);
- mvec.push_back(M);
+ mvec.push_back(BuildMI(ChooseSubInstructionByType(remI->getType()), 3)
+ .addReg(divOp1).addReg(prod).addRegDef(remI));
- unsigned Opcode = ChooseSubInstructionByType(
- subtreeRoot->getInstruction()->getType());
- M = BuildMI(Opcode, 3).addReg(subtreeRoot->leftChild()->getValue())
- .addReg(prod).addRegDef(subtreeRoot->getValue());
- mvec.push_back(M);
break;
}
case 38: // bool: And(bool, bool)
+ case 138: // bool: And(bool, not)
case 238: // bool: And(bool, boolconst)
case 338: // reg : BAnd(reg, reg)
case 538: // reg : BAnd(reg, Constant)
- Add3OperandInstr(V9::AND, subtreeRoot, mvec);
+ Add3OperandInstr(V9::ANDr, subtreeRoot, mvec);
break;
- case 138: // bool: And(bool, not)
case 438: // bool: BAnd(bool, bnot)
{ // Use the argument of NOT as the second argument!
// Mark the NOT node so that no code is generated for it.
+ // If the type is boolean, set 1 or 0 in the result register.
InstructionNode* notNode = (InstructionNode*) subtreeRoot->rightChild();
Value* notArg = BinaryOperator::getNotArgument(
cast<BinaryOperator>(notNode->getInstruction()));
notNode->markFoldedIntoParent();
- Value *LHS = subtreeRoot->leftChild()->getValue();
- Value *Dest = subtreeRoot->getValue();
- mvec.push_back(BuildMI(V9::ANDN, 3).addReg(LHS).addReg(notArg)
- .addReg(Dest, MOTy::Def));
+ Value *lhs = subtreeRoot->leftChild()->getValue();
+ Value *dest = subtreeRoot->getValue();
+ mvec.push_back(BuildMI(V9::ANDNr, 3).addReg(lhs).addReg(notArg)
+ .addReg(dest, MOTy::Def));
+
+ if (notArg->getType() == Type::BoolTy) {
+ // set 1 in result register if result of above is non-zero
+ mvec.push_back(BuildMI(V9::MOVRNZi, 3).addReg(dest).addZImm(1)
+ .addReg(dest, MOTy::UseAndDef));
+ }
+
break;
}
case 39: // bool: Or(bool, bool)
+ case 139: // bool: Or(bool, not)
case 239: // bool: Or(bool, boolconst)
case 339: // reg : BOr(reg, reg)
case 539: // reg : BOr(reg, Constant)
- Add3OperandInstr(V9::OR, subtreeRoot, mvec);
+ Add3OperandInstr(V9::ORr, subtreeRoot, mvec);
break;
- case 139: // bool: Or(bool, not)
case 439: // bool: BOr(bool, bnot)
{ // Use the argument of NOT as the second argument!
// Mark the NOT node so that no code is generated for it.
+ // If the type is boolean, set 1 or 0 in the result register.
InstructionNode* notNode = (InstructionNode*) subtreeRoot->rightChild();
Value* notArg = BinaryOperator::getNotArgument(
cast<BinaryOperator>(notNode->getInstruction()));
notNode->markFoldedIntoParent();
- Value *LHS = subtreeRoot->leftChild()->getValue();
- Value *Dest = subtreeRoot->getValue();
- mvec.push_back(BuildMI(V9::ORN, 3).addReg(LHS).addReg(notArg)
- .addReg(Dest, MOTy::Def));
+ Value *lhs = subtreeRoot->leftChild()->getValue();
+ Value *dest = subtreeRoot->getValue();
+
+ mvec.push_back(BuildMI(V9::ORNr, 3).addReg(lhs).addReg(notArg)
+ .addReg(dest, MOTy::Def));
+
+ if (notArg->getType() == Type::BoolTy) {
+ // set 1 in result register if result of above is non-zero
+ mvec.push_back(BuildMI(V9::MOVRNZi, 3).addReg(dest).addZImm(1)
+ .addReg(dest, MOTy::UseAndDef));
+ }
+
break;
}
case 40: // bool: Xor(bool, bool)
+ case 140: // bool: Xor(bool, not)
case 240: // bool: Xor(bool, boolconst)
case 340: // reg : BXor(reg, reg)
case 540: // reg : BXor(reg, Constant)
- Add3OperandInstr(V9::XOR, subtreeRoot, mvec);
+ Add3OperandInstr(V9::XORr, subtreeRoot, mvec);
break;
- case 140: // bool: Xor(bool, not)
case 440: // bool: BXor(bool, bnot)
{ // Use the argument of NOT as the second argument!
// Mark the NOT node so that no code is generated for it.
+ // If the type is boolean, set 1 or 0 in the result register.
InstructionNode* notNode = (InstructionNode*) subtreeRoot->rightChild();
Value* notArg = BinaryOperator::getNotArgument(
cast<BinaryOperator>(notNode->getInstruction()));
notNode->markFoldedIntoParent();
- Value *LHS = subtreeRoot->leftChild()->getValue();
- Value *Dest = subtreeRoot->getValue();
- mvec.push_back(BuildMI(V9::XNOR, 3).addReg(LHS).addReg(notArg)
- .addReg(Dest, MOTy::Def));
+ Value *lhs = subtreeRoot->leftChild()->getValue();
+ Value *dest = subtreeRoot->getValue();
+ mvec.push_back(BuildMI(V9::XNORr, 3).addReg(lhs).addReg(notArg)
+ .addReg(dest, MOTy::Def));
+
+ if (notArg->getType() == Type::BoolTy) {
+ // set 1 in result register if result of above is non-zero
+ mvec.push_back(BuildMI(V9::MOVRNZi, 3).addReg(dest).addZImm(1)
+ .addReg(dest, MOTy::UseAndDef));
+ }
break;
}
- case 41: // boolconst: SetCC(reg, Constant)
+ case 41: // setCCconst: SetCC(reg, Constant)
+ { // Comparison is with a constant:
//
- // If the SetCC was folded into the user (parent), it will be
- // caught above. All other cases are the same as case 42,
- // so just fall through.
+ // If the bool result must be computed into a register (see below),
+ // and the constant is int ZERO, we can use the MOVR[op] instructions
+ // and avoid the SUBcc instruction entirely.
+ // Otherwise this is just the same as case 42, so just fall through.
//
+ // The result of the SetCC must be computed and stored in a register if
+ // it is used outside the current basic block (so it must be computed
+ // as a boolreg) or it is used by anything other than a branch.
+ // We will use a conditional move to do this.
+ //
+ Instruction* setCCInstr = subtreeRoot->getInstruction();
+ bool computeBoolVal = (subtreeRoot->parent() == NULL ||
+ ! AllUsesAreBranches(setCCInstr));
+
+ if (computeBoolVal) {
+ InstrTreeNode* constNode = subtreeRoot->rightChild();
+ assert(constNode &&
+ constNode->getNodeType() ==InstrTreeNode::NTConstNode);
+ Constant *constVal = cast<Constant>(constNode->getValue());
+ bool isValidConst;
+
+ if ((constVal->getType()->isInteger()
+ || isa<PointerType>(constVal->getType()))
+ && target.getInstrInfo().ConvertConstantToIntType(target,
+ constVal, constVal->getType(), isValidConst) == 0
+ && isValidConst)
+ {
+ // That constant is an integer zero after all...
+ // Use a MOVR[op] to compute the boolean result
+ // Unconditionally set register to 0
+ mvec.push_back(BuildMI(V9::SETHI, 2).addZImm(0)
+ .addRegDef(setCCInstr));
+
+ // Now conditionally move 1 into the register.
+ // Mark the register as a use (as well as a def) because the old
+ // value will be retained if the condition is false.
+ MachineOpCode movOpCode = ChooseMovpregiForSetCC(subtreeRoot);
+ mvec.push_back(BuildMI(movOpCode, 3)
+ .addReg(subtreeRoot->leftChild()->getValue())
+ .addZImm(1).addReg(setCCInstr, MOTy::UseAndDef));
+
+ break;
+ }
+ }
+ // ELSE FALL THROUGH
+ }
+
case 42: // bool: SetCC(reg, reg):
{
- // This generates a SUBCC instruction, putting the difference in
- // a result register, and setting a condition code.
+ // This generates a SUBCC instruction, putting the difference in a
+ // result reg. if needed, and/or setting a condition code if needed.
//
- // If the boolean result of the SetCC is used by anything other
- // than a branch instruction, or if it is used outside the current
- // basic block, the boolean must be
- // computed and stored in the result register. Otherwise, discard
- // the difference (by using %g0) and keep only the condition code.
- //
- // To compute the boolean result in a register we use a conditional
- // move, unless the result of the SUBCC instruction can be used as
- // the bool! This assumes that zero is FALSE and any non-zero
- // integer is TRUE.
- //
- InstructionNode* parentNode = (InstructionNode*) subtreeRoot->parent();
Instruction* setCCInstr = subtreeRoot->getInstruction();
+ Value* leftVal = subtreeRoot->leftChild()->getValue();
+ Value* rightVal = subtreeRoot->rightChild()->getValue();
+ const Type* opType = leftVal->getType();
+ bool isFPCompare = opType->isFloatingPoint();
- bool keepBoolVal = parentNode == NULL ||
- ! AllUsesAreBranches(setCCInstr);
- bool subValIsBoolVal = setCCInstr->getOpcode() == Instruction::SetNE;
- bool keepSubVal = keepBoolVal && subValIsBoolVal;
- bool computeBoolVal = keepBoolVal && ! subValIsBoolVal;
-
- bool mustClearReg;
- int valueToMove;
- MachineOpCode movOpCode = 0;
+ // If the boolean result of the SetCC is used outside the current basic
+ // block (so it must be computed as a boolreg) or is used by anything
+ // other than a branch, the boolean must be computed and stored
+ // in a result register. We will use a conditional move to do this.
+ //
+ bool computeBoolVal = (subtreeRoot->parent() == NULL ||
+ ! AllUsesAreBranches(setCCInstr));
- // Mark the 4th operand as being a CC register, and as a def
// A TmpInstruction is created to represent the CC "result".
// Unlike other instances of TmpInstruction, this one is used
// by machine code of multiple LLVM instructions, viz.,
// needs to be a floating point condition code, not an integer
// condition code. Think of this as casting the bool result to
// a FP condition code register.
+ // Later, we mark the 4th operand as being a CC register, and as a def.
//
- Value* leftVal = subtreeRoot->leftChild()->getValue();
- bool isFPCompare = leftVal->getType()->isFloatingPoint();
-
TmpInstruction* tmpForCC = GetTmpForCC(setCCInstr,
- setCCInstr->getParent()->getParent(),
- isFPCompare ? Type::FloatTy : Type::IntTy);
- MachineCodeForInstruction::get(setCCInstr).addTemp(tmpForCC);
-
- if (! isFPCompare)
- {
- // Integer condition: dest. should be %g0 or an integer register.
- // If result must be saved but condition is not SetEQ then we need
- // a separate instruction to compute the bool result, so discard
- // result of SUBcc instruction anyway.
- //
- if (keepSubVal) {
- M = BuildMI(V9::SUBcc, 4)
- .addReg(subtreeRoot->leftChild()->getValue())
- .addReg(subtreeRoot->rightChild()->getValue())
- .addRegDef(subtreeRoot->getValue())
- .addCCReg(tmpForCC, MOTy::Def);
- } else {
- M = BuildMI(V9::SUBcc, 4)
- .addReg(subtreeRoot->leftChild()->getValue())
- .addReg(subtreeRoot->rightChild()->getValue())
- .addMReg(target.getRegInfo().getZeroRegNum(), MOTy::Def)
- .addCCReg(tmpForCC, MOTy::Def);
- }
- mvec.push_back(M);
+ setCCInstr->getParent()->getParent(),
+ leftVal->getType(),
+ MachineCodeForInstruction::get(setCCInstr));
+
+ // If the operands are signed values smaller than 4 bytes, then they
+ // must be sign-extended in order to do a valid 32-bit comparison
+ // and get the right result in the 32-bit CC register (%icc).
+ //
+ Value* leftOpToUse = leftVal;
+ Value* rightOpToUse = rightVal;
+ if (opType->isIntegral() && opType->isSigned()) {
+ unsigned opSize = target.getTargetData().getTypeSize(opType);
+ if (opSize < 4) {
+ MachineCodeForInstruction& mcfi =
+ MachineCodeForInstruction::get(setCCInstr);
+
+ // create temporary virtual regs. to hold the sign-extensions
+ leftOpToUse = new TmpInstruction(mcfi, leftVal);
+ rightOpToUse = new TmpInstruction(mcfi, rightVal);
- if (computeBoolVal)
- { // recompute bool using the integer condition codes
- movOpCode =
- ChooseMovpccAfterSub(subtreeRoot,mustClearReg,valueToMove);
- }
+ // sign-extend each operand and put the result in the temporary reg.
+ target.getInstrInfo().CreateSignExtensionInstructions
+ (target, setCCInstr->getParent()->getParent(),
+ leftVal, leftOpToUse, 8*opSize, mvec, mcfi);
+ target.getInstrInfo().CreateSignExtensionInstructions
+ (target, setCCInstr->getParent()->getParent(),
+ rightVal, rightOpToUse, 8*opSize, mvec, mcfi);
}
- else
- {
- // FP condition: dest of FCMP should be some FCCn register
- M = BuildMI(ChooseFcmpInstruction(subtreeRoot), 3)
+ }
+
+ if (! isFPCompare) {
+ // Integer condition: set CC and discard result.
+ mvec.push_back(BuildMI(V9::SUBccr, 4)
+ .addReg(leftOpToUse)
+ .addReg(rightOpToUse)
+ .addMReg(target.getRegInfo().getZeroRegNum(),MOTy::Def)
+ .addCCReg(tmpForCC, MOTy::Def));
+ } else {
+ // FP condition: dest of FCMP should be some FCCn register
+ mvec.push_back(BuildMI(ChooseFcmpInstruction(subtreeRoot), 3)
.addCCReg(tmpForCC, MOTy::Def)
- .addReg(subtreeRoot->leftChild()->getValue())
- .addRegDef(subtreeRoot->rightChild()->getValue());
- mvec.push_back(M);
-
- if (computeBoolVal)
- {// recompute bool using the FP condition codes
- mustClearReg = true;
- valueToMove = 1;
- movOpCode = ChooseMovFpccInstruction(subtreeRoot);
- }
- }
+ .addReg(leftOpToUse)
+ .addReg(rightOpToUse));
+ }
- if (computeBoolVal)
- {
- if (mustClearReg)
- {// Unconditionally set register to 0
- M = BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(setCCInstr);
- mvec.push_back(M);
- }
-
- // Now conditionally move `valueToMove' (0 or 1) into the register
- // Mark the register as a use (as well as a def) because the old
- // value should be retained if the condition is false.
- M = BuildMI(movOpCode, 3).addCCReg(tmpForCC).addZImm(valueToMove)
- .addReg(setCCInstr, MOTy::UseAndDef);
- mvec.push_back(M);
- }
+ if (computeBoolVal) {
+ MachineOpCode movOpCode = (isFPCompare
+ ? ChooseMovFpcciInstruction(subtreeRoot)
+ : ChooseMovpcciForSetCC(subtreeRoot));
+
+ // Unconditionally set register to 0
+ M = BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(setCCInstr);
+ mvec.push_back(M);
+
+ // Now conditionally move 1 into the register.
+ // Mark the register as a use (as well as a def) because the old
+ // value will be retained if the condition is false.
+ M = (BuildMI(movOpCode, 3).addCCReg(tmpForCC).addZImm(1)
+ .addReg(setCCInstr, MOTy::UseAndDef));
+ mvec.push_back(M);
+ }
break;
}
-
+
case 51: // reg: Load(reg)
case 52: // reg: Load(ptrreg)
SetOperandsForMemInstr(ChooseLoadInstruction(
case 56: // reg: GetElemPtrIdx(reg,reg)
// If the GetElemPtr was folded into the user (parent), it will be
// caught above. For other cases, we have to compute the address.
- SetOperandsForMemInstr(V9::ADD, mvec, subtreeRoot, target);
+ SetOperandsForMemInstr(V9::ADDr, mvec, subtreeRoot, target);
break;
case 57: // reg: Alloca: Implement as 1 instruction:
Value* numElementsVal = NULL;
bool isArray = instr->isArrayAllocation();
- if (!isArray ||
- isa<Constant>(numElementsVal = instr->getArraySize()))
- { // total size is constant: generate code for fixed-size alloca
- unsigned numElements = isArray?
- cast<ConstantUInt>(numElementsVal)->getValue() : 1;
- CreateCodeForFixedSizeAlloca(target, instr, tsize,
- numElements, mvec);
- }
- else // total size is not constant.
+ if (!isArray || isa<Constant>(numElementsVal = instr->getArraySize())) {
+ // total size is constant: generate code for fixed-size alloca
+ unsigned numElements = isArray?
+ cast<ConstantUInt>(numElementsVal)->getValue() : 1;
+ CreateCodeForFixedSizeAlloca(target, instr, tsize,
+ numElements, mvec);
+ } else {
+ // total size is not constant.
CreateCodeForVariableSizeAlloca(target, instr, tsize,
numElementsVal, mvec);
+ }
break;
}
//
CallInst *callInstr = cast<CallInst>(subtreeRoot->getInstruction());
Value *callee = callInstr->getCalledValue();
+ Function* calledFunc = dyn_cast<Function>(callee);
- // Create hidden virtual register for return address with type void*
- TmpInstruction* retAddrReg =
- new TmpInstruction(PointerType::get(Type::VoidTy), callInstr);
- MachineCodeForInstruction::get(callInstr).addTemp(retAddrReg);
-
- // Generate the machine instruction and its operands.
- // Use CALL for direct function calls; this optimistically assumes
- // the PC-relative address fits in the CALL address field (22 bits).
- // Use JMPL for indirect calls.
+ // Check if this is an intrinsic function that needs a special code
+ // sequence (e.g., va_start). Indirect calls cannot be special.
//
- if (isa<Function>(callee)) // direct function call
- M = BuildMI(V9::CALL, 1).addPCDisp(callee);
- else // indirect function call
- M = BuildMI(V9::JMPLCALL, 3).addReg(callee).addSImm((int64_t)0)
- .addRegDef(retAddrReg);
- mvec.push_back(M);
+ bool specialIntrinsic = false;
+ LLVMIntrinsic::ID iid;
+ if (calledFunc && (iid=(LLVMIntrinsic::ID)calledFunc->getIntrinsicID()))
+ specialIntrinsic = CodeGenIntrinsic(iid, *callInstr, target, mvec);
- const FunctionType* funcType =
- cast<FunctionType>(cast<PointerType>(callee->getType())
- ->getElementType());
- bool isVarArgs = funcType->isVarArg();
- bool noPrototype = isVarArgs && funcType->getNumParams() == 0;
-
- // Use a descriptor to pass information about call arguments
- // to the register allocator. This descriptor will be "owned"
- // and freed automatically when the MachineCodeForInstruction
- // object for the callInstr goes away.
- CallArgsDescriptor* argDesc = new CallArgsDescriptor(callInstr,
- retAddrReg, isVarArgs, noPrototype);
-
- assert(callInstr->getOperand(0) == callee
- && "This is assumed in the loop below!");
+ // If not, generate the normal call sequence for the function.
+ // This can also handle any intrinsics that are just function calls.
+ //
+ if (! specialIntrinsic) {
+ Function* currentFunc = callInstr->getParent()->getParent();
+ MachineFunction& MF = MachineFunction::get(currentFunc);
+ MachineCodeForInstruction& mcfi =
+ MachineCodeForInstruction::get(callInstr);
+ const UltraSparcRegInfo& regInfo =
+ (UltraSparcRegInfo&) target.getRegInfo();
+ const TargetFrameInfo& frameInfo = target.getFrameInfo();
+
+ // Create hidden virtual register for return address with type void*
+ TmpInstruction* retAddrReg =
+ new TmpInstruction(mcfi, PointerType::get(Type::VoidTy), callInstr);
+
+ // Generate the machine instruction and its operands.
+ // Use CALL for direct function calls; this optimistically assumes
+ // the PC-relative address fits in the CALL address field (22 bits).
+ // Use JMPL for indirect calls.
+ // This will be added to mvec later, after operand copies.
+ //
+ MachineInstr* callMI;
+ if (calledFunc) // direct function call
+ callMI = BuildMI(V9::CALL, 1).addPCDisp(callee);
+ else // indirect function call
+ callMI = (BuildMI(V9::JMPLCALLi,3).addReg(callee)
+ .addSImm((int64_t)0).addRegDef(retAddrReg));
+
+ const FunctionType* funcType =
+ cast<FunctionType>(cast<PointerType>(callee->getType())
+ ->getElementType());
+ bool isVarArgs = funcType->isVarArg();
+ bool noPrototype = isVarArgs && funcType->getNumParams() == 0;
- for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i)
- {
- Value* argVal = callInstr->getOperand(i);
- Instruction* intArgReg = NULL;
-
+ // Use a descriptor to pass information about call arguments
+ // to the register allocator. This descriptor will be "owned"
+ // and freed automatically when the MachineCodeForInstruction
+ // object for the callInstr goes away.
+ CallArgsDescriptor* argDesc =
+ new CallArgsDescriptor(callInstr, retAddrReg,isVarArgs,noPrototype);
+ assert(callInstr->getOperand(0) == callee
+ && "This is assumed in the loop below!");
+
+ // Insert sign-extension instructions for small signed values,
+ // if this is an unknown function (i.e., called via a funcptr)
+ // or an external one (i.e., which may not be compiled by llc).
+ //
+ if (calledFunc == NULL || calledFunc->isExternal()) {
+ for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i) {
+ Value* argVal = callInstr->getOperand(i);
+ const Type* argType = argVal->getType();
+ if (argType->isIntegral() && argType->isSigned()) {
+ unsigned argSize = target.getTargetData().getTypeSize(argType);
+ if (argSize <= 4) {
+ // create a temporary virtual reg. to hold the sign-extension
+ TmpInstruction* argExtend = new TmpInstruction(mcfi, argVal);
+
+ // sign-extend argVal and put the result in the temporary reg.
+ target.getInstrInfo().CreateSignExtensionInstructions
+ (target, currentFunc, argVal, argExtend,
+ 8*argSize, mvec, mcfi);
+
+ // replace argVal with argExtend in CallArgsDescriptor
+ argDesc->getArgInfo(i-1).replaceArgVal(argExtend);
+ }
+ }
+ }
+ }
+
+ // Insert copy instructions to get all the arguments into
+ // all the places that they need to be.
+ //
+ for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i) {
+ int argNo = i-1;
+ CallArgInfo& argInfo = argDesc->getArgInfo(argNo);
+ Value* argVal = argInfo.getArgVal(); // don't use callInstr arg here
+ const Type* argType = argVal->getType();
+ unsigned regType = regInfo.getRegTypeForDataType(argType);
+ unsigned argSize = target.getTargetData().getTypeSize(argType);
+ int regNumForArg = TargetRegInfo::getInvalidRegNum();
+ unsigned regClassIDOfArgReg;
+
// Check for FP arguments to varargs functions.
// Any such argument in the first $K$ args must be passed in an
- // integer register, where K = #integer argument registers.
- if (isVarArgs && argVal->getType()->isFloatingPoint())
- {
- // If it is a function with no prototype, pass value
- // as an FP value as well as a varargs value
- if (noPrototype)
- argDesc->getArgInfo(i-1).setUseFPArgReg();
+ // integer register. If there is no prototype, it must also
+ // be passed as an FP register.
+ // K = #integer argument registers.
+ bool isFPArg = argVal->getType()->isFloatingPoint();
+ if (isVarArgs && isFPArg) {
+
+ if (noPrototype) {
+ // It is a function with no prototype: pass value
+ // as an FP value as well as a varargs value. The FP value
+ // may go in a register or on the stack. The copy instruction
+ // to the outgoing reg/stack is created by the normal argument
+ // handling code since this is the "normal" passing mode.
+ //
+ regNumForArg = regInfo.regNumForFPArg(regType,
+ false, false, argNo,
+ regClassIDOfArgReg);
+ if (regNumForArg == regInfo.getInvalidRegNum())
+ argInfo.setUseStackSlot();
+ else
+ argInfo.setUseFPArgReg();
+ }
+
+ // If this arg. is in the first $K$ regs, add special copy-
+ // float-to-int instructions to pass the value as an int.
+ // To check if it is in the first $K$, get the register
+ // number for the arg #i. These copy instructions are
+ // generated here because they are extra cases and not needed
+ // for the normal argument handling (some code reuse is
+ // possible though -- later).
+ //
+ int copyRegNum = regInfo.regNumForIntArg(false, false, argNo,
+ regClassIDOfArgReg);
+ if (copyRegNum != regInfo.getInvalidRegNum()) {
+ // Create a virtual register to represent copyReg. Mark
+ // this vreg as being an implicit operand of the call MI
+ const Type* loadTy = (argType == Type::FloatTy
+ ? Type::IntTy : Type::LongTy);
+ TmpInstruction* argVReg = new TmpInstruction(mcfi, loadTy,
+ argVal, NULL,
+ "argRegCopy");
+ callMI->addImplicitRef(argVReg);
- // If this arg. is in the first $K$ regs, add a copy
- // float-to-int instruction to pass the value as an integer.
- if (i <= target.getRegInfo().GetNumOfIntArgRegs())
- {
- MachineCodeForInstruction &destMCFI =
- MachineCodeForInstruction::get(callInstr);
- intArgReg = new TmpInstruction(Type::IntTy, argVal);
- destMCFI.addTemp(intArgReg);
-
- vector<MachineInstr*> copyMvec;
- target.getInstrInfo().CreateCodeToCopyFloatToInt(target,
- callInstr->getParent()->getParent(),
- argVal, (TmpInstruction*) intArgReg,
- copyMvec, destMCFI);
- mvec.insert(mvec.begin(),copyMvec.begin(),copyMvec.end());
+ // Get a temp stack location to use to copy
+ // float-to-int via the stack.
+ //
+ // FIXME: For now, we allocate permanent space because
+ // the stack frame manager does not allow locals to be
+ // allocated (e.g., for alloca) after a temp is
+ // allocated!
+ //
+ // int tmpOffset = MF.getInfo()->pushTempValue(argSize);
+ int tmpOffset = MF.getInfo()->allocateLocalVar(argVReg);
- argDesc->getArgInfo(i-1).setUseIntArgReg();
- argDesc->getArgInfo(i-1).setArgCopy(intArgReg);
- }
- else
- // Cannot fit in first $K$ regs so pass the arg on the stack
- argDesc->getArgInfo(i-1).setUseStackSlot();
+ // Generate the store from FP reg to stack
+ unsigned StoreOpcode = ChooseStoreInstruction(argType);
+ M = BuildMI(convertOpcodeFromRegToImm(StoreOpcode), 3)
+ .addReg(argVal).addMReg(regInfo.getFramePointer())
+ .addSImm(tmpOffset);
+ mvec.push_back(M);
+
+ // Generate the load from stack to int arg reg
+ unsigned LoadOpcode = ChooseLoadInstruction(loadTy);
+ M = BuildMI(convertOpcodeFromRegToImm(LoadOpcode), 3)
+ .addMReg(regInfo.getFramePointer()).addSImm(tmpOffset)
+ .addReg(argVReg, MOTy::Def);
+
+ // Mark operand with register it should be assigned
+ // both for copy and for the callMI
+ M->SetRegForOperand(M->getNumOperands()-1, copyRegNum);
+ callMI->SetRegForImplicitRef(callMI->getNumImplicitRefs()-1,
+ copyRegNum);
+ mvec.push_back(M);
+
+ // Add info about the argument to the CallArgsDescriptor
+ argInfo.setUseIntArgReg();
+ argInfo.setArgCopy(copyRegNum);
+ } else {
+ // Cannot fit in first $K$ regs so pass arg on stack
+ argInfo.setUseStackSlot();
}
-
- if (intArgReg)
- mvec.back()->addImplicitRef(intArgReg);
-
- mvec.back()->addImplicitRef(argVal);
+ } else if (isFPArg) {
+ // Get the outgoing arg reg to see if there is one.
+ regNumForArg = regInfo.regNumForFPArg(regType, false, false,
+ argNo, regClassIDOfArgReg);
+ if (regNumForArg == regInfo.getInvalidRegNum())
+ argInfo.setUseStackSlot();
+ else {
+ argInfo.setUseFPArgReg();
+ regNumForArg =regInfo.getUnifiedRegNum(regClassIDOfArgReg,
+ regNumForArg);
+ }
+ } else {
+ // Get the outgoing arg reg to see if there is one.
+ regNumForArg = regInfo.regNumForIntArg(false,false,
+ argNo, regClassIDOfArgReg);
+ if (regNumForArg == regInfo.getInvalidRegNum())
+ argInfo.setUseStackSlot();
+ else {
+ argInfo.setUseIntArgReg();
+ regNumForArg =regInfo.getUnifiedRegNum(regClassIDOfArgReg,
+ regNumForArg);
+ }
+ }
+
+ //
+ // Now insert copy instructions to stack slot or arg. register
+ //
+ if (argInfo.usesStackSlot()) {
+ // Get the stack offset for this argument slot.
+ // FP args on stack are right justified so adjust offset!
+ // int arguments are also right justified but they are
+ // always loaded as a full double-word so the offset does
+ // not need to be adjusted.
+ int argOffset = frameInfo.getOutgoingArgOffset(MF, argNo);
+ if (argType->isFloatingPoint()) {
+ unsigned slotSize = frameInfo.getSizeOfEachArgOnStack();
+ assert(argSize <= slotSize && "Insufficient slot size!");
+ argOffset += slotSize - argSize;
+ }
+
+ // Now generate instruction to copy argument to stack
+ MachineOpCode storeOpCode =
+ (argType->isFloatingPoint()
+ ? ((argSize == 4)? V9::STFi : V9::STDFi) : V9::STXi);
+
+ M = BuildMI(storeOpCode, 3).addReg(argVal)
+ .addMReg(regInfo.getStackPointer()).addSImm(argOffset);
+ mvec.push_back(M);
+ }
+ else if (regNumForArg != regInfo.getInvalidRegNum()) {
+
+ // Create a virtual register to represent the arg reg. Mark
+ // this vreg as being an implicit operand of the call MI.
+ TmpInstruction* argVReg =
+ new TmpInstruction(mcfi, argVal, NULL, "argReg");
+
+ callMI->addImplicitRef(argVReg);
+
+ // Generate the reg-to-reg copy into the outgoing arg reg.
+ // -- For FP values, create a FMOVS or FMOVD instruction
+ // -- For non-FP values, create an add-with-0 instruction
+ if (argType->isFloatingPoint())
+ M=(BuildMI(argType==Type::FloatTy? V9::FMOVS :V9::FMOVD,2)
+ .addReg(argVal).addReg(argVReg, MOTy::Def));
+ else
+ M = (BuildMI(ChooseAddInstructionByType(argType), 3)
+ .addReg(argVal).addSImm((int64_t) 0)
+ .addReg(argVReg, MOTy::Def));
+
+ // Mark the operand with the register it should be assigned
+ M->SetRegForOperand(M->getNumOperands()-1, regNumForArg);
+ callMI->SetRegForImplicitRef(callMI->getNumImplicitRefs()-1,
+ regNumForArg);
+
+ mvec.push_back(M);
+ }
+ else
+ assert(argInfo.getArgCopy() != regInfo.getInvalidRegNum() &&
+ "Arg. not in stack slot, primary or secondary register?");
}
-
- // Add the return value as an implicit ref. The call operands
- // were added above.
- if (callInstr->getType() != Type::VoidTy)
- mvec.back()->addImplicitRef(callInstr, /*isDef*/ true);
-
- // For the CALL instruction, the ret. addr. reg. is also implicit
- if (isa<Function>(callee))
- mvec.back()->addImplicitRef(retAddrReg, /*isDef*/ true);
-
- // delay slot
- mvec.push_back(BuildMI(V9::NOP, 0));
+
+ // add call instruction and delay slot before copying return value
+ mvec.push_back(callMI);
+ mvec.push_back(BuildMI(V9::NOP, 0));
+
+ // Add the return value as an implicit ref. The call operands
+ // were added above. Also, add code to copy out the return value.
+ // This is always register-to-register for int or FP return values.
+ //
+ if (callInstr->getType() != Type::VoidTy) {
+ // Get the return value reg.
+ const Type* retType = callInstr->getType();
+
+ int regNum = (retType->isFloatingPoint()
+ ? (unsigned) SparcFloatRegClass::f0
+ : (unsigned) SparcIntRegClass::o0);
+ unsigned regClassID = regInfo.getRegClassIDOfType(retType);
+ regNum = regInfo.getUnifiedRegNum(regClassID, regNum);
+
+ // Create a virtual register to represent it and mark
+ // this vreg as being an implicit operand of the call MI
+ TmpInstruction* retVReg =
+ new TmpInstruction(mcfi, callInstr, NULL, "argReg");
+
+ callMI->addImplicitRef(retVReg, /*isDef*/ true);
+
+ // Generate the reg-to-reg copy from the return value reg.
+ // -- For FP values, create a FMOVS or FMOVD instruction
+ // -- For non-FP values, create an add-with-0 instruction
+ if (retType->isFloatingPoint())
+ M = (BuildMI(retType==Type::FloatTy? V9::FMOVS : V9::FMOVD, 2)
+ .addReg(retVReg).addReg(callInstr, MOTy::Def));
+ else
+ M = (BuildMI(ChooseAddInstructionByType(retType), 3)
+ .addReg(retVReg).addSImm((int64_t) 0)
+ .addReg(callInstr, MOTy::Def));
+
+ // Mark the operand with the register it should be assigned
+ // Also mark the implicit ref of the call defining this operand
+ M->SetRegForOperand(0, regNum);
+ callMI->SetRegForImplicitRef(callMI->getNumImplicitRefs()-1,regNum);
+
+ mvec.push_back(M);
+ }
+
+ // For the CALL instruction, the ret. addr. reg. is also implicit
+ if (isa<Function>(callee))
+ callMI->addImplicitRef(retAddrReg, /*isDef*/ true);
+
+ MF.getInfo()->popAllTempValues(); // free temps used for this inst
+ }
+
break;
}
const Type* opType = argVal1->getType();
assert((opType->isInteger() || isa<PointerType>(opType)) &&
"Shl unsupported for other types");
+ unsigned opSize = target.getTargetData().getTypeSize(opType);
CreateShiftInstructions(target, shlInstr->getParent()->getParent(),
- (opType == Type::LongTy)? V9::SLLX : V9::SLL,
+ (opSize > 4)? V9::SLLXr6:V9::SLLr5,
argVal1, argVal2, 0, shlInstr, mvec,
MachineCodeForInstruction::get(shlInstr));
break;
}
case 63: // reg: Shr(reg, reg)
- { const Type* opType = subtreeRoot->leftChild()->getValue()->getType();
+ {
+ const Type* opType = subtreeRoot->leftChild()->getValue()->getType();
assert((opType->isInteger() || isa<PointerType>(opType)) &&
"Shr unsupported for other types");
+ unsigned opSize = target.getTargetData().getTypeSize(opType);
Add3OperandInstr(opType->isSigned()
- ? (opType == Type::LongTy ? V9::SRAX : V9::SRA)
- : (opType == Type::LongTy ? V9::SRLX : V9::SRL),
+ ? (opSize > 4? V9::SRAXr6 : V9::SRAr5)
+ : (opSize > 4? V9::SRLXr6 : V9::SRLr5),
subtreeRoot, mvec);
break;
}
case 64: // reg: Phi(reg,reg)
break; // don't forward the value
+ case 65: // reg: VANext(reg): the va_next(va_list, type) instruction
+ { // Increment the va_list pointer register according to the type.
+ // All LLVM argument types are <= 64 bits, so use one doubleword.
+ Instruction* vaNextI = subtreeRoot->getInstruction();
+ assert(target.getTargetData().getTypeSize(vaNextI->getType()) <= 8 &&
+ "We assumed that all LLVM parameter types <= 8 bytes!");
+ int argSize = target.getFrameInfo().getSizeOfEachArgOnStack();
+ mvec.push_back(BuildMI(V9::ADDi, 3).addReg(vaNextI->getOperand(0)).
+ addSImm(argSize).addRegDef(vaNextI));
+ break;
+ }
+
+ case 66: // reg: VAArg (reg): the va_arg instruction
+ { // Load argument from stack using current va_list pointer value.
+ // Use 64-bit load for all non-FP args, and LDDF or double for FP.
+ Instruction* vaArgI = subtreeRoot->getInstruction();
+ MachineOpCode loadOp = (vaArgI->getType()->isFloatingPoint()
+ ? (vaArgI->getType() == Type::FloatTy
+ ? V9::LDFi : V9::LDDFi)
+ : V9::LDXi);
+ mvec.push_back(BuildMI(loadOp, 3).addReg(vaArgI->getOperand(0)).
+ addSImm(0).addRegDef(vaArgI));
+ break;
+ }
+
case 71: // reg: VReg
case 72: // reg: Constant
break; // don't forward the value
}
}
- if (forwardOperandNum >= 0)
- { // We did not generate a machine instruction but need to use operand.
- // If user is in the same tree, replace Value in its machine operand.
- // If not, insert a copy instruction which should get coalesced away
- // by register allocation.
- if (subtreeRoot->parent() != NULL)
- ForwardOperand(subtreeRoot, subtreeRoot->parent(), forwardOperandNum);
- else
- {
- vector<MachineInstr*> minstrVec;
- Instruction* instr = subtreeRoot->getInstruction();
- target.getInstrInfo().
- CreateCopyInstructionsByType(target,
- instr->getParent()->getParent(),
- instr->getOperand(forwardOperandNum),
- instr, minstrVec,
- MachineCodeForInstruction::get(instr));
- assert(minstrVec.size() > 0);
- mvec.insert(mvec.end(), minstrVec.begin(), minstrVec.end());
- }
+ if (forwardOperandNum >= 0) {
+ // We did not generate a machine instruction but need to use operand.
+ // If user is in the same tree, replace Value in its machine operand.
+ // If not, insert a copy instruction which should get coalesced away
+ // by register allocation.
+ if (subtreeRoot->parent() != NULL)
+ ForwardOperand(subtreeRoot, subtreeRoot->parent(), forwardOperandNum);
+ else {
+ std::vector<MachineInstr*> minstrVec;
+ Instruction* instr = subtreeRoot->getInstruction();
+ target.getInstrInfo().
+ CreateCopyInstructionsByType(target,
+ instr->getParent()->getParent(),
+ instr->getOperand(forwardOperandNum),
+ instr, minstrVec,
+ MachineCodeForInstruction::get(instr));
+ assert(minstrVec.size() > 0);
+ mvec.insert(mvec.end(), minstrVec.begin(), minstrVec.end());
}
+ }
- if (maskUnsignedResult)
- { // If result is unsigned and smaller than int reg size,
- // we need to clear high bits of result value.
- assert(forwardOperandNum < 0 && "Need mask but no instruction generated");
- Instruction* dest = subtreeRoot->getInstruction();
- if (dest->getType()->isUnsigned())
- {
- unsigned destSize=target.getTargetData().getTypeSize(dest->getType());
- if (destSize <= 4)
- { // Mask high bits. Use a TmpInstruction to represent the
- // intermediate result before masking. Since those instructions
- // have already been generated, go back and substitute tmpI
- // for dest in the result position of each one of them.
- TmpInstruction *tmpI = new TmpInstruction(dest->getType(), dest,
- NULL, "maskHi");
- MachineCodeForInstruction::get(dest).addTemp(tmpI);
-
- for (unsigned i=0, N=mvec.size(); i < N; ++i)
- mvec[i]->substituteValue(dest, tmpI);
-
- M = BuildMI(V9::SRL, 3).addReg(tmpI).addZImm(8*(4-destSize))
- .addReg(dest, MOTy::Def);
- mvec.push_back(M);
- }
- else if (destSize < 8)
- assert(0 && "Unsupported type size: 32 < size < 64 bits");
+ if (maskUnsignedResult) {
+ // If result is unsigned and smaller than int reg size,
+ // we need to clear high bits of result value.
+ assert(forwardOperandNum < 0 && "Need mask but no instruction generated");
+ Instruction* dest = subtreeRoot->getInstruction();
+ if (dest->getType()->isUnsigned()) {
+ unsigned destSize=target.getTargetData().getTypeSize(dest->getType());
+ if (destSize <= 4) {
+ // Mask high 64 - N bits, where N = 4*destSize.
+
+ // Use a TmpInstruction to represent the
+ // intermediate result before masking. Since those instructions
+ // have already been generated, go back and substitute tmpI
+ // for dest in the result position of each one of them.
+ //
+ MachineCodeForInstruction& mcfi = MachineCodeForInstruction::get(dest);
+ TmpInstruction *tmpI = new TmpInstruction(mcfi, dest->getType(),
+ dest, NULL, "maskHi");
+ Value* srlArgToUse = tmpI;
+
+ unsigned numSubst = 0;
+ for (unsigned i=0, N=mvec.size(); i < N; ++i) {
+
+ // Make sure we substitute all occurrences of dest in these instrs.
+ // Otherwise, we will have bogus code.
+ bool someArgsWereIgnored = false;
+
+ // Make sure not to substitute an upwards-exposed use -- that would
+ // introduce a use of `tmpI' with no preceding def. Therefore,
+ // substitute a use or def-and-use operand only if a previous def
+ // operand has already been substituted (i.e., numSusbt > 0).
+ //
+ numSubst += mvec[i]->substituteValue(dest, tmpI,
+ /*defsOnly*/ numSubst == 0,
+ /*notDefsAndUses*/ numSubst > 0,
+ someArgsWereIgnored);
+ assert(!someArgsWereIgnored &&
+ "Operand `dest' exists but not replaced: probably bogus!");
}
+ assert(numSubst > 0 && "Operand `dest' not replaced: probably bogus!");
+
+ // Left shift 32-N if size (N) is less than 32 bits.
+ // Use another tmp. virtual register to represent this result.
+ if (destSize < 4) {
+ srlArgToUse = new TmpInstruction(mcfi, dest->getType(),
+ tmpI, NULL, "maskHi2");
+ mvec.push_back(BuildMI(V9::SLLXi6, 3).addReg(tmpI)
+ .addZImm(8*(4-destSize))
+ .addReg(srlArgToUse, MOTy::Def));
+ }
+
+ // Logical right shift 32-N to get zero extension in top 64-N bits.
+ mvec.push_back(BuildMI(V9::SRLi5, 3).addReg(srlArgToUse)
+ .addZImm(8*(4-destSize)).addReg(dest, MOTy::Def));
+
+ } else if (destSize < 8) {
+ assert(0 && "Unsupported type size: 32 < size < 64 bits");
+ }
}
+ }
}