1 //===-- SparcInstrSelection.cpp -------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // BURS instruction selection for SPARC V9 architecture.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Constants.h"
15 #include "llvm/DerivedTypes.h"
16 #include "llvm/Instructions.h"
17 #include "llvm/Intrinsics.h"
18 #include "llvm/Module.h"
19 #include "llvm/CodeGen/InstrForest.h"
20 #include "llvm/CodeGen/InstrSelection.h"
21 #include "llvm/CodeGen/InstrSelectionSupport.h"
22 #include "llvm/CodeGen/MachineCodeForInstruction.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineFunctionInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineInstrAnnot.h"
27 #include "SparcInstrSelectionSupport.h"
28 #include "SparcInternals.h"
29 #include "SparcRegClassInfo.h"
30 #include "SparcRegInfo.h"
31 #include "Support/MathExtras.h"
37 static inline void Add3OperandInstr(unsigned Opcode, InstructionNode* Node,
38 std::vector<MachineInstr*>& mvec) {
39 mvec.push_back(BuildMI(Opcode, 3).addReg(Node->leftChild()->getValue())
40 .addReg(Node->rightChild()->getValue())
41 .addRegDef(Node->getValue()));
45 //---------------------------------------------------------------------------
46 // Function: FoldGetElemChain
49 // Fold a chain of GetElementPtr instructions containing only
50 // constant offsets into an equivalent (Pointer, IndexVector) pair.
51 // Returns the pointer Value, and stores the resulting IndexVector
52 // in argument chainIdxVec. This is a helper function for
53 // FoldConstantIndices that does the actual folding.
54 //---------------------------------------------------------------------------
57 // Check for a constant 0.
61 return (idx == ConstantSInt::getNullValue(idx->getType()));
65 FoldGetElemChain(InstrTreeNode* ptrNode, std::vector<Value*>& chainIdxVec,
66 bool lastInstHasLeadingNonZero)
68 InstructionNode* gepNode = dyn_cast<InstructionNode>(ptrNode);
69 GetElementPtrInst* gepInst =
70 dyn_cast_or_null<GetElementPtrInst>(gepNode ? gepNode->getInstruction() :0);
72 // ptr value is not computed in this tree or ptr value does not come from GEP
77 // Return NULL if we don't fold any instructions in.
80 // Now chase the chain of getElementInstr instructions, if any.
81 // Check for any non-constant indices and stop there.
82 // Also, stop if the first index of child is a non-zero array index
83 // and the last index of the current node is a non-array index:
84 // in that case, a non-array declared type is being accessed as an array
85 // which is not type-safe, but could be legal.
87 InstructionNode* ptrChild = gepNode;
88 while (ptrChild && (ptrChild->getOpLabel() == Instruction::GetElementPtr ||
89 ptrChild->getOpLabel() == GetElemPtrIdx))
91 // Child is a GetElemPtr instruction
92 gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
93 User::op_iterator OI, firstIdx = gepInst->idx_begin();
94 User::op_iterator lastIdx = gepInst->idx_end();
95 bool allConstantOffsets = true;
97 // The first index of every GEP must be an array index.
98 assert((*firstIdx)->getType() == Type::LongTy &&
99 "INTERNAL ERROR: Structure index for a pointer type!");
101 // If the last instruction had a leading non-zero index, check if the
102 // current one references a sequential (i.e., indexable) type.
103 // If not, the code is not type-safe and we would create an illegal GEP
104 // by folding them, so don't fold any more instructions.
106 if (lastInstHasLeadingNonZero)
107 if (! isa<SequentialType>(gepInst->getType()->getElementType()))
108 break; // cannot fold in any preceding getElementPtr instrs.
110 // Check that all offsets are constant for this instruction
111 for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
112 allConstantOffsets = isa<ConstantInt>(*OI);
114 if (allConstantOffsets) {
115 // Get pointer value out of ptrChild.
116 ptrVal = gepInst->getPointerOperand();
118 // Insert its index vector at the start, skipping any leading [0]
119 // Remember the old size to check if anything was inserted.
120 unsigned oldSize = chainIdxVec.size();
121 int firstIsZero = IsZero(*firstIdx);
122 chainIdxVec.insert(chainIdxVec.begin(), firstIdx + firstIsZero, lastIdx);
124 // Remember if it has leading zero index: it will be discarded later.
125 if (oldSize < chainIdxVec.size())
126 lastInstHasLeadingNonZero = !firstIsZero;
128 // Mark the folded node so no code is generated for it.
129 ((InstructionNode*) ptrChild)->markFoldedIntoParent();
131 // Get the previous GEP instruction and continue trying to fold
132 ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
133 } else // cannot fold this getElementPtr instr. or any preceding ones
137 // If the first getElementPtr instruction had a leading [0], add it back.
138 // Note that this instruction is the *last* one that was successfully
139 // folded *and* contributed any indices, in the loop above.
141 if (ptrVal && ! lastInstHasLeadingNonZero)
142 chainIdxVec.insert(chainIdxVec.begin(), ConstantSInt::get(Type::LongTy,0));
148 //---------------------------------------------------------------------------
149 // Function: GetGEPInstArgs
152 // Helper function for GetMemInstArgs that handles the final getElementPtr
153 // instruction used by (or same as) the memory operation.
154 // Extracts the indices of the current instruction and tries to fold in
155 // preceding ones if all indices of the current one are constant.
156 //---------------------------------------------------------------------------
159 GetGEPInstArgs(InstructionNode* gepNode,
160 std::vector<Value*>& idxVec,
161 bool& allConstantIndices)
163 allConstantIndices = true;
164 GetElementPtrInst* gepI = cast<GetElementPtrInst>(gepNode->getInstruction());
166 // Default pointer is the one from the current instruction.
167 Value* ptrVal = gepI->getPointerOperand();
168 InstrTreeNode* ptrChild = gepNode->leftChild();
170 // Extract the index vector of the GEP instruction.
171 // If all indices are constant and first index is zero, try to fold
172 // in preceding GEPs with all constant indices.
173 for (User::op_iterator OI=gepI->idx_begin(), OE=gepI->idx_end();
174 allConstantIndices && OI != OE; ++OI)
175 if (! isa<Constant>(*OI))
176 allConstantIndices = false; // note: this also terminates loop!
178 // If we have only constant indices, fold chains of constant indices
179 // in this and any preceding GetElemPtr instructions.
180 bool foldedGEPs = false;
181 bool leadingNonZeroIdx = gepI && ! IsZero(*gepI->idx_begin());
182 if (allConstantIndices)
183 if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx)) {
188 // Append the index vector of the current instruction.
189 // Skip the leading [0] index if preceding GEPs were folded into this.
190 idxVec.insert(idxVec.end(),
191 gepI->idx_begin() + (foldedGEPs && !leadingNonZeroIdx),
197 //---------------------------------------------------------------------------
198 // Function: GetMemInstArgs
201 // Get the pointer value and the index vector for a memory operation
202 // (GetElementPtr, Load, or Store). If all indices of the given memory
203 // operation are constant, fold in constant indices in a chain of
204 // preceding GetElementPtr instructions (if any), and return the
205 // pointer value of the first instruction in the chain.
206 // All folded instructions are marked so no code is generated for them.
209 // Returns the pointer Value to use.
210 // Returns the resulting IndexVector in idxVec.
211 // Returns true/false in allConstantIndices if all indices are/aren't const.
212 //---------------------------------------------------------------------------
215 GetMemInstArgs(InstructionNode* memInstrNode,
216 std::vector<Value*>& idxVec,
217 bool& allConstantIndices)
219 allConstantIndices = false;
220 Instruction* memInst = memInstrNode->getInstruction();
221 assert(idxVec.size() == 0 && "Need empty vector to return indices");
223 // If there is a GetElemPtr instruction to fold in to this instr,
224 // it must be in the left child for Load and GetElemPtr, and in the
225 // right child for Store instructions.
226 InstrTreeNode* ptrChild = (memInst->getOpcode() == Instruction::Store
227 ? memInstrNode->rightChild()
228 : memInstrNode->leftChild());
230 // Default pointer is the one from the current instruction.
231 Value* ptrVal = ptrChild->getValue();
233 // Find the "last" GetElemPtr instruction: this one or the immediate child.
234 // There will be none if this is a load or a store from a scalar pointer.
235 InstructionNode* gepNode = NULL;
236 if (isa<GetElementPtrInst>(memInst))
237 gepNode = memInstrNode;
238 else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal)) {
239 // Child of load/store is a GEP and memInst is its only use.
240 // Use its indices and mark it as folded.
241 gepNode = cast<InstructionNode>(ptrChild);
242 gepNode->markFoldedIntoParent();
245 // If there are no indices, return the current pointer.
246 // Else extract the pointer from the GEP and fold the indices.
247 return gepNode ? GetGEPInstArgs(gepNode, idxVec, allConstantIndices)
252 //************************ Internal Functions ******************************/
255 static inline MachineOpCode
256 ChooseBprInstruction(const InstructionNode* instrNode)
258 MachineOpCode opCode;
260 Instruction* setCCInstr =
261 ((InstructionNode*) instrNode->leftChild())->getInstruction();
263 switch(setCCInstr->getOpcode())
265 case Instruction::SetEQ: opCode = V9::BRZ; break;
266 case Instruction::SetNE: opCode = V9::BRNZ; break;
267 case Instruction::SetLE: opCode = V9::BRLEZ; break;
268 case Instruction::SetGE: opCode = V9::BRGEZ; break;
269 case Instruction::SetLT: opCode = V9::BRLZ; break;
270 case Instruction::SetGT: opCode = V9::BRGZ; break;
272 assert(0 && "Unrecognized VM instruction!");
273 opCode = V9::INVALID_OPCODE;
281 static inline MachineOpCode
282 ChooseBpccInstruction(const InstructionNode* instrNode,
283 const BinaryOperator* setCCInstr)
285 MachineOpCode opCode = V9::INVALID_OPCODE;
287 bool isSigned = setCCInstr->getOperand(0)->getType()->isSigned();
290 switch(setCCInstr->getOpcode())
292 case Instruction::SetEQ: opCode = V9::BE; break;
293 case Instruction::SetNE: opCode = V9::BNE; break;
294 case Instruction::SetLE: opCode = V9::BLE; break;
295 case Instruction::SetGE: opCode = V9::BGE; break;
296 case Instruction::SetLT: opCode = V9::BL; break;
297 case Instruction::SetGT: opCode = V9::BG; break;
299 assert(0 && "Unrecognized VM instruction!");
303 switch(setCCInstr->getOpcode())
305 case Instruction::SetEQ: opCode = V9::BE; break;
306 case Instruction::SetNE: opCode = V9::BNE; break;
307 case Instruction::SetLE: opCode = V9::BLEU; break;
308 case Instruction::SetGE: opCode = V9::BCC; break;
309 case Instruction::SetLT: opCode = V9::BCS; break;
310 case Instruction::SetGT: opCode = V9::BGU; break;
312 assert(0 && "Unrecognized VM instruction!");
320 static inline MachineOpCode
321 ChooseBFpccInstruction(const InstructionNode* instrNode,
322 const BinaryOperator* setCCInstr)
324 MachineOpCode opCode = V9::INVALID_OPCODE;
326 switch(setCCInstr->getOpcode())
328 case Instruction::SetEQ: opCode = V9::FBE; break;
329 case Instruction::SetNE: opCode = V9::FBNE; break;
330 case Instruction::SetLE: opCode = V9::FBLE; break;
331 case Instruction::SetGE: opCode = V9::FBGE; break;
332 case Instruction::SetLT: opCode = V9::FBL; break;
333 case Instruction::SetGT: opCode = V9::FBG; break;
335 assert(0 && "Unrecognized VM instruction!");
343 // Create a unique TmpInstruction for a boolean value,
344 // representing the CC register used by a branch on that value.
345 // For now, hack this using a little static cache of TmpInstructions.
346 // Eventually the entire BURG instruction selection should be put
347 // into a separate class that can hold such information.
348 // The static cache is not too bad because the memory for these
349 // TmpInstructions will be freed along with the rest of the Function anyway.
351 static TmpInstruction*
352 GetTmpForCC(Value* boolVal, const Function *F, const Type* ccType,
353 MachineCodeForInstruction& mcfi)
355 typedef hash_map<const Value*, TmpInstruction*> BoolTmpCache;
356 static BoolTmpCache boolToTmpCache; // Map boolVal -> TmpInstruction*
357 static const Function *lastFunction = 0;// Use to flush cache between funcs
359 assert(boolVal->getType() == Type::BoolTy && "Weird but ok! Delete assert");
361 if (lastFunction != F) {
363 boolToTmpCache.clear();
366 // Look for tmpI and create a new one otherwise. The new value is
367 // directly written to map using the ref returned by operator[].
368 TmpInstruction*& tmpI = boolToTmpCache[boolVal];
370 tmpI = new TmpInstruction(mcfi, ccType, boolVal);
376 static inline MachineOpCode
377 ChooseBccInstruction(const InstructionNode* instrNode,
378 const Type*& setCCType)
380 InstructionNode* setCCNode = (InstructionNode*) instrNode->leftChild();
381 assert(setCCNode->getOpLabel() == SetCCOp);
382 BinaryOperator* setCCInstr =cast<BinaryOperator>(setCCNode->getInstruction());
383 setCCType = setCCInstr->getOperand(0)->getType();
385 if (setCCType->isFloatingPoint())
386 return ChooseBFpccInstruction(instrNode, setCCInstr);
388 return ChooseBpccInstruction(instrNode, setCCInstr);
392 // WARNING: since this function has only one caller, it always returns
393 // the opcode that expects an immediate and a register. If this function
394 // is ever used in cases where an opcode that takes two registers is required,
395 // then modify this function and use convertOpcodeFromRegToImm() where required.
397 // It will be necessary to expand convertOpcodeFromRegToImm() to handle the
398 // new cases of opcodes.
399 static inline MachineOpCode
400 ChooseMovFpcciInstruction(const InstructionNode* instrNode)
402 MachineOpCode opCode = V9::INVALID_OPCODE;
404 switch(instrNode->getInstruction()->getOpcode())
406 case Instruction::SetEQ: opCode = V9::MOVFEi; break;
407 case Instruction::SetNE: opCode = V9::MOVFNEi; break;
408 case Instruction::SetLE: opCode = V9::MOVFLEi; break;
409 case Instruction::SetGE: opCode = V9::MOVFGEi; break;
410 case Instruction::SetLT: opCode = V9::MOVFLi; break;
411 case Instruction::SetGT: opCode = V9::MOVFGi; break;
413 assert(0 && "Unrecognized VM instruction!");
421 // ChooseMovpcciForSetCC -- Choose a conditional-move instruction
422 // based on the type of SetCC operation.
424 // WARNING: since this function has only one caller, it always returns
425 // the opcode that expects an immediate and a register. If this function
426 // is ever used in cases where an opcode that takes two registers is required,
427 // then modify this function and use convertOpcodeFromRegToImm() where required.
429 // It will be necessary to expand convertOpcodeFromRegToImm() to handle the
430 // new cases of opcodes.
433 ChooseMovpcciForSetCC(const InstructionNode* instrNode)
435 MachineOpCode opCode = V9::INVALID_OPCODE;
437 const Type* opType = instrNode->leftChild()->getValue()->getType();
438 assert(opType->isIntegral() || isa<PointerType>(opType));
439 bool noSign = opType->isUnsigned() || isa<PointerType>(opType);
441 switch(instrNode->getInstruction()->getOpcode())
443 case Instruction::SetEQ: opCode = V9::MOVEi; break;
444 case Instruction::SetLE: opCode = noSign? V9::MOVLEUi : V9::MOVLEi; break;
445 case Instruction::SetGE: opCode = noSign? V9::MOVCCi : V9::MOVGEi; break;
446 case Instruction::SetLT: opCode = noSign? V9::MOVCSi : V9::MOVLi; break;
447 case Instruction::SetGT: opCode = noSign? V9::MOVGUi : V9::MOVGi; break;
448 case Instruction::SetNE: opCode = V9::MOVNEi; break;
449 default: assert(0 && "Unrecognized LLVM instr!"); break;
456 // ChooseMovpregiForSetCC -- Choose a conditional-move-on-register-value
457 // instruction based on the type of SetCC operation. These instructions
458 // compare a register with 0 and perform the move is the comparison is true.
460 // WARNING: like the previous function, this function it always returns
461 // the opcode that expects an immediate and a register. See above.
464 ChooseMovpregiForSetCC(const InstructionNode* instrNode)
466 MachineOpCode opCode = V9::INVALID_OPCODE;
468 switch(instrNode->getInstruction()->getOpcode())
470 case Instruction::SetEQ: opCode = V9::MOVRZi; break;
471 case Instruction::SetLE: opCode = V9::MOVRLEZi; break;
472 case Instruction::SetGE: opCode = V9::MOVRGEZi; break;
473 case Instruction::SetLT: opCode = V9::MOVRLZi; break;
474 case Instruction::SetGT: opCode = V9::MOVRGZi; break;
475 case Instruction::SetNE: opCode = V9::MOVRNZi; break;
476 default: assert(0 && "Unrecognized VM instr!"); break;
483 static inline MachineOpCode
484 ChooseConvertToFloatInstr(const TargetMachine& target,
485 OpLabel vopCode, const Type* opType)
487 assert((vopCode == ToFloatTy || vopCode == ToDoubleTy) &&
488 "Unrecognized convert-to-float opcode!");
489 assert((opType->isIntegral() || opType->isFloatingPoint() ||
490 isa<PointerType>(opType))
491 && "Trying to convert a non-scalar type to FLOAT/DOUBLE?");
493 MachineOpCode opCode = V9::INVALID_OPCODE;
495 unsigned opSize = target.getTargetData().getTypeSize(opType);
497 if (opType == Type::FloatTy)
498 opCode = (vopCode == ToFloatTy? V9::NOP : V9::FSTOD);
499 else if (opType == Type::DoubleTy)
500 opCode = (vopCode == ToFloatTy? V9::FDTOS : V9::NOP);
501 else if (opSize <= 4)
502 opCode = (vopCode == ToFloatTy? V9::FITOS : V9::FITOD);
504 assert(opSize == 8 && "Unrecognized type size > 4 and < 8!");
505 opCode = (vopCode == ToFloatTy? V9::FXTOS : V9::FXTOD);
511 static inline MachineOpCode
512 ChooseConvertFPToIntInstr(const TargetMachine& target,
513 const Type* destType, const Type* opType)
515 assert((opType == Type::FloatTy || opType == Type::DoubleTy)
516 && "This function should only be called for FLOAT or DOUBLE");
517 assert((destType->isIntegral() || isa<PointerType>(destType))
518 && "Trying to convert FLOAT/DOUBLE to a non-scalar type?");
520 MachineOpCode opCode = V9::INVALID_OPCODE;
522 unsigned destSize = target.getTargetData().getTypeSize(destType);
524 if (destType == Type::UIntTy)
525 assert(destType != Type::UIntTy && "Expand FP-to-uint beforehand.");
526 else if (destSize <= 4)
527 opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
529 assert(destSize == 8 && "Unrecognized type size > 4 and < 8!");
530 opCode = (opType == Type::FloatTy)? V9::FSTOX : V9::FDTOX;
537 CreateConvertFPToIntInstr(const TargetMachine& target,
540 const Type* destType)
542 MachineOpCode opCode = ChooseConvertFPToIntInstr(target, destType,
544 assert(opCode != V9::INVALID_OPCODE && "Expected to need conversion!");
545 return BuildMI(opCode, 2).addReg(srcVal).addRegDef(destVal);
548 // CreateCodeToConvertFloatToInt: Convert FP value to signed or unsigned integer
549 // The FP value must be converted to the dest type in an FP register,
550 // and the result is then copied from FP to int register via memory.
551 // SPARC does not have a float-to-uint conversion, only a float-to-int (fdtoi).
552 // Since fdtoi converts to signed integers, any FP value V between MAXINT+1
553 // and MAXUNSIGNED (i.e., 2^31 <= V <= 2^32-1) would be converted incorrectly.
554 // Therefore, for converting an FP value to uint32_t, we first need to convert
555 // to uint64_t and then to uint32_t.
558 CreateCodeToConvertFloatToInt(const TargetMachine& target,
561 std::vector<MachineInstr*>& mvec,
562 MachineCodeForInstruction& mcfi)
564 Function* F = destI->getParent()->getParent();
566 // Create a temporary to represent the FP register into which the
567 // int value will placed after conversion. The type of this temporary
568 // depends on the type of FP register to use: single-prec for a 32-bit
569 // int or smaller; double-prec for a 64-bit int.
571 size_t destSize = target.getTargetData().getTypeSize(destI->getType());
573 const Type* castDestType = destI->getType(); // type for the cast instr result
574 const Type* castDestRegType; // type for cast instruction result reg
575 TmpInstruction* destForCast; // dest for cast instruction
576 Instruction* fpToIntCopyDest = destI; // dest for fp-reg-to-int-reg copy instr
578 // For converting an FP value to uint32_t, we first need to convert to
579 // uint64_t and then to uint32_t, as explained above.
580 if (destI->getType() == Type::UIntTy) {
581 castDestType = Type::ULongTy; // use this instead of type of destI
582 castDestRegType = Type::DoubleTy; // uint64_t needs 64-bit FP register.
583 destForCast = new TmpInstruction(mcfi, castDestRegType, opVal);
584 fpToIntCopyDest = new TmpInstruction(mcfi, castDestType, destForCast);
587 castDestRegType = (destSize > 4)? Type::DoubleTy : Type::FloatTy;
588 destForCast = new TmpInstruction(mcfi, castDestRegType, opVal);
591 // Create the fp-to-int conversion instruction (src and dest regs are FP regs)
592 mvec.push_back(CreateConvertFPToIntInstr(target, opVal, destForCast,
595 // Create the fpreg-to-intreg copy code
596 target.getInstrInfo().CreateCodeToCopyFloatToInt(target, F, destForCast,
597 fpToIntCopyDest, mvec, mcfi);
599 // Create the uint64_t to uint32_t conversion, if needed
600 if (destI->getType() == Type::UIntTy)
601 target.getInstrInfo().
602 CreateZeroExtensionInstructions(target, F, fpToIntCopyDest, destI,
603 /*numLowBits*/ 32, mvec, mcfi);
607 static inline MachineOpCode
608 ChooseAddInstruction(const InstructionNode* instrNode)
610 return ChooseAddInstructionByType(instrNode->getInstruction()->getType());
614 static inline MachineInstr*
615 CreateMovFloatInstruction(const InstructionNode* instrNode,
616 const Type* resultType)
618 return BuildMI((resultType == Type::FloatTy) ? V9::FMOVS : V9::FMOVD, 2)
619 .addReg(instrNode->leftChild()->getValue())
620 .addRegDef(instrNode->getValue());
623 static inline MachineInstr*
624 CreateAddConstInstruction(const InstructionNode* instrNode)
626 MachineInstr* minstr = NULL;
628 Value* constOp = ((InstrTreeNode*) instrNode->rightChild())->getValue();
629 assert(isa<Constant>(constOp));
631 // Cases worth optimizing are:
632 // (1) Add with 0 for float or double: use an FMOV of appropriate type,
633 // instead of an FADD (1 vs 3 cycles). There is no integer MOV.
635 if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
636 double dval = FPC->getValue();
638 minstr = CreateMovFloatInstruction(instrNode,
639 instrNode->getInstruction()->getType());
646 static inline MachineOpCode
647 ChooseSubInstructionByType(const Type* resultType)
649 MachineOpCode opCode = V9::INVALID_OPCODE;
651 if (resultType->isInteger() || isa<PointerType>(resultType)) {
654 switch(resultType->getPrimitiveID())
656 case Type::FloatTyID: opCode = V9::FSUBS; break;
657 case Type::DoubleTyID: opCode = V9::FSUBD; break;
658 default: assert(0 && "Invalid type for SUB instruction"); break;
666 static inline MachineInstr*
667 CreateSubConstInstruction(const InstructionNode* instrNode)
669 MachineInstr* minstr = NULL;
671 Value* constOp = ((InstrTreeNode*) instrNode->rightChild())->getValue();
672 assert(isa<Constant>(constOp));
674 // Cases worth optimizing are:
675 // (1) Sub with 0 for float or double: use an FMOV of appropriate type,
676 // instead of an FSUB (1 vs 3 cycles). There is no integer MOV.
678 if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
679 double dval = FPC->getValue();
681 minstr = CreateMovFloatInstruction(instrNode,
682 instrNode->getInstruction()->getType());
689 static inline MachineOpCode
690 ChooseFcmpInstruction(const InstructionNode* instrNode)
692 MachineOpCode opCode = V9::INVALID_OPCODE;
694 Value* operand = ((InstrTreeNode*) instrNode->leftChild())->getValue();
695 switch(operand->getType()->getPrimitiveID()) {
696 case Type::FloatTyID: opCode = V9::FCMPS; break;
697 case Type::DoubleTyID: opCode = V9::FCMPD; break;
698 default: assert(0 && "Invalid type for FCMP instruction"); break;
705 // Assumes that leftArg and rightArg are both cast instructions.
708 BothFloatToDouble(const InstructionNode* instrNode)
710 InstrTreeNode* leftArg = instrNode->leftChild();
711 InstrTreeNode* rightArg = instrNode->rightChild();
712 InstrTreeNode* leftArgArg = leftArg->leftChild();
713 InstrTreeNode* rightArgArg = rightArg->leftChild();
714 assert(leftArg->getValue()->getType() == rightArg->getValue()->getType());
716 // Check if both arguments are floats cast to double
717 return (leftArg->getValue()->getType() == Type::DoubleTy &&
718 leftArgArg->getValue()->getType() == Type::FloatTy &&
719 rightArgArg->getValue()->getType() == Type::FloatTy);
723 static inline MachineOpCode
724 ChooseMulInstructionByType(const Type* resultType)
726 MachineOpCode opCode = V9::INVALID_OPCODE;
728 if (resultType->isInteger())
731 switch(resultType->getPrimitiveID())
733 case Type::FloatTyID: opCode = V9::FMULS; break;
734 case Type::DoubleTyID: opCode = V9::FMULD; break;
735 default: assert(0 && "Invalid type for MUL instruction"); break;
743 static inline MachineInstr*
744 CreateIntNegInstruction(const TargetMachine& target,
747 return BuildMI(V9::SUBr, 3).addMReg(target.getRegInfo().getZeroRegNum())
748 .addReg(vreg).addRegDef(vreg);
752 // Create instruction sequence for any shift operation.
753 // SLL or SLLX on an operand smaller than the integer reg. size (64bits)
754 // requires a second instruction for explicit sign-extension.
755 // Note that we only have to worry about a sign-bit appearing in the
756 // most significant bit of the operand after shifting (e.g., bit 32 of
757 // Int or bit 16 of Short), so we do not have to worry about results
758 // that are as large as a normal integer register.
761 CreateShiftInstructions(const TargetMachine& target,
763 MachineOpCode shiftOpCode,
765 Value* optArgVal2, /* Use optArgVal2 if not NULL */
766 unsigned optShiftNum, /* else use optShiftNum */
767 Instruction* destVal,
768 std::vector<MachineInstr*>& mvec,
769 MachineCodeForInstruction& mcfi)
771 assert((optArgVal2 != NULL || optShiftNum <= 64) &&
772 "Large shift sizes unexpected, but can be handled below: "
773 "You need to check whether or not it fits in immed field below");
775 // If this is a logical left shift of a type smaller than the standard
776 // integer reg. size, we have to extend the sign-bit into upper bits
777 // of dest, so we need to put the result of the SLL into a temporary.
779 Value* shiftDest = destVal;
780 unsigned opSize = target.getTargetData().getTypeSize(argVal1->getType());
782 if ((shiftOpCode == V9::SLLr5 || shiftOpCode == V9::SLLXr6) && opSize < 8) {
783 // put SLL result into a temporary
784 shiftDest = new TmpInstruction(mcfi, argVal1, optArgVal2, "sllTmp");
787 MachineInstr* M = (optArgVal2 != NULL)
788 ? BuildMI(shiftOpCode, 3).addReg(argVal1).addReg(optArgVal2)
789 .addReg(shiftDest, MachineOperand::Def)
790 : BuildMI(shiftOpCode, 3).addReg(argVal1).addZImm(optShiftNum)
791 .addReg(shiftDest, MachineOperand::Def);
794 if (shiftDest != destVal) {
795 // extend the sign-bit of the result into all upper bits of dest
796 assert(8*opSize <= 32 && "Unexpected type size > 4 and < IntRegSize?");
797 target.getInstrInfo().
798 CreateSignExtensionInstructions(target, F, shiftDest, destVal,
799 8*opSize, mvec, mcfi);
804 // Does not create any instructions if we cannot exploit constant to
805 // create a cheaper instruction.
806 // This returns the approximate cost of the instructions generated,
807 // which is used to pick the cheapest when both operands are constant.
809 CreateMulConstInstruction(const TargetMachine &target, Function* F,
810 Value* lval, Value* rval, Instruction* destVal,
811 std::vector<MachineInstr*>& mvec,
812 MachineCodeForInstruction& mcfi)
814 /* Use max. multiply cost, viz., cost of MULX */
815 unsigned cost = target.getInstrInfo().minLatency(V9::MULXr);
816 unsigned firstNewInstr = mvec.size();
818 Value* constOp = rval;
819 if (! isa<Constant>(constOp))
822 // Cases worth optimizing are:
823 // (1) Multiply by 0 or 1 for any type: replace with copy (ADD or FMOV)
824 // (2) Multiply by 2^x for integer types: replace with Shift
826 const Type* resultType = destVal->getType();
828 if (resultType->isInteger() || isa<PointerType>(resultType)) {
830 int64_t C = (int64_t) target.getInstrInfo().ConvertConstantToIntType(target,
831 constOp, constOp->getType(), isValidConst);
834 bool needNeg = false;
840 if (C == 0 || C == 1) {
841 cost = target.getInstrInfo().minLatency(V9::ADDr);
842 unsigned Zero = target.getRegInfo().getZeroRegNum();
845 M =BuildMI(V9::ADDr,3).addMReg(Zero).addMReg(Zero).addRegDef(destVal);
847 M = BuildMI(V9::ADDr,3).addReg(lval).addMReg(Zero).addRegDef(destVal);
849 } else if (isPowerOf2(C, pow)) {
850 unsigned opSize = target.getTargetData().getTypeSize(resultType);
851 MachineOpCode opCode = (opSize <= 32)? V9::SLLr5 : V9::SLLXr6;
852 CreateShiftInstructions(target, F, opCode, lval, NULL, pow,
853 destVal, mvec, mcfi);
856 if (mvec.size() > 0 && needNeg) {
857 // insert <reg = SUB 0, reg> after the instr to flip the sign
858 MachineInstr* M = CreateIntNegInstruction(target, destVal);
863 if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
864 double dval = FPC->getValue();
865 if (fabs(dval) == 1) {
866 MachineOpCode opCode = (dval < 0)
867 ? (resultType == Type::FloatTy? V9::FNEGS : V9::FNEGD)
868 : (resultType == Type::FloatTy? V9::FMOVS : V9::FMOVD);
869 mvec.push_back(BuildMI(opCode,2).addReg(lval).addRegDef(destVal));
874 if (firstNewInstr < mvec.size()) {
876 for (unsigned i=firstNewInstr; i < mvec.size(); ++i)
877 cost += target.getInstrInfo().minLatency(mvec[i]->getOpcode());
884 // Does not create any instructions if we cannot exploit constant to
885 // create a cheaper instruction.
888 CreateCheapestMulConstInstruction(const TargetMachine &target,
890 Value* lval, Value* rval,
891 Instruction* destVal,
892 std::vector<MachineInstr*>& mvec,
893 MachineCodeForInstruction& mcfi)
896 if (isa<Constant>(lval) && isa<Constant>(rval)) {
897 // both operands are constant: evaluate and "set" in dest
898 Constant* P = ConstantExpr::get(Instruction::Mul,
899 cast<Constant>(lval),
900 cast<Constant>(rval));
901 target.getInstrInfo().CreateCodeToLoadConst(target,F,P,destVal,mvec,mcfi);
903 else if (isa<Constant>(rval)) // rval is constant, but not lval
904 CreateMulConstInstruction(target, F, lval, rval, destVal, mvec, mcfi);
905 else if (isa<Constant>(lval)) // lval is constant, but not rval
906 CreateMulConstInstruction(target, F, lval, rval, destVal, mvec, mcfi);
908 // else neither is constant
912 // Return NULL if we cannot exploit constant to create a cheaper instruction
914 CreateMulInstruction(const TargetMachine &target, Function* F,
915 Value* lval, Value* rval, Instruction* destVal,
916 std::vector<MachineInstr*>& mvec,
917 MachineCodeForInstruction& mcfi,
918 MachineOpCode forceMulOp = INVALID_MACHINE_OPCODE)
920 unsigned L = mvec.size();
921 CreateCheapestMulConstInstruction(target,F, lval, rval, destVal, mvec, mcfi);
922 if (mvec.size() == L) {
923 // no instructions were added so create MUL reg, reg, reg.
924 // Use FSMULD if both operands are actually floats cast to doubles.
925 // Otherwise, use the default opcode for the appropriate type.
926 MachineOpCode mulOp = ((forceMulOp != INVALID_MACHINE_OPCODE)
928 : ChooseMulInstructionByType(destVal->getType()));
929 mvec.push_back(BuildMI(mulOp, 3).addReg(lval).addReg(rval)
930 .addRegDef(destVal));
935 // Generate a divide instruction for Div or Rem.
936 // For Rem, this assumes that the operand type will be signed if the result
937 // type is signed. This is correct because they must have the same sign.
939 static inline MachineOpCode
940 ChooseDivInstruction(TargetMachine &target,
941 const InstructionNode* instrNode)
943 MachineOpCode opCode = V9::INVALID_OPCODE;
945 const Type* resultType = instrNode->getInstruction()->getType();
947 if (resultType->isInteger())
948 opCode = resultType->isSigned()? V9::SDIVXr : V9::UDIVXr;
950 switch(resultType->getPrimitiveID())
952 case Type::FloatTyID: opCode = V9::FDIVS; break;
953 case Type::DoubleTyID: opCode = V9::FDIVD; break;
954 default: assert(0 && "Invalid type for DIV instruction"); break;
961 // Return if we cannot exploit constant to create a cheaper instruction
963 CreateDivConstInstruction(TargetMachine &target,
964 const InstructionNode* instrNode,
965 std::vector<MachineInstr*>& mvec)
967 Value* LHS = instrNode->leftChild()->getValue();
968 Value* constOp = ((InstrTreeNode*) instrNode->rightChild())->getValue();
969 if (!isa<Constant>(constOp))
972 Instruction* destVal = instrNode->getInstruction();
973 unsigned ZeroReg = target.getRegInfo().getZeroRegNum();
975 // Cases worth optimizing are:
976 // (1) Divide by 1 for any type: replace with copy (ADD or FMOV)
977 // (2) Divide by 2^x for integer types: replace with SR[L or A]{X}
979 const Type* resultType = instrNode->getInstruction()->getType();
981 if (resultType->isInteger()) {
984 int64_t C = (int64_t) target.getInstrInfo().ConvertConstantToIntType(target,
985 constOp, constOp->getType(), isValidConst);
987 bool needNeg = false;
994 mvec.push_back(BuildMI(V9::ADDr, 3).addReg(LHS).addMReg(ZeroReg)
995 .addRegDef(destVal));
996 } else if (isPowerOf2(C, pow)) {
999 unsigned opSize = target.getTargetData().getTypeSize(resultType);
1001 if (resultType->isSigned()) {
1002 // For N / 2^k, if the operand N is negative,
1003 // we need to add (2^k - 1) before right-shifting by k, i.e.,
1005 // (N / 2^k) = N >> k, if N >= 0;
1006 // (N + 2^k - 1) >> k, if N < 0
1008 // If N is <= 32 bits, use:
1009 // sra N, 31, t1 // t1 = ~0, if N < 0, 0 else
1010 // srl t1, 32-k, t2 // t2 = 2^k - 1, if N < 0, 0 else
1011 // add t2, N, t3 // t3 = N + 2^k -1, if N < 0, N else
1012 // sra t3, k, result // result = N / 2^k
1014 // If N is 64 bits, use:
1015 // srax N, k-1, t1 // t1 = sign bit in high k positions
1016 // srlx t1, 64-k, t2 // t2 = 2^k - 1, if N < 0, 0 else
1017 // add t2, N, t3 // t3 = N + 2^k -1, if N < 0, N else
1018 // sra t3, k, result // result = N / 2^k
1020 TmpInstruction *sraTmp, *srlTmp, *addTmp;
1021 MachineCodeForInstruction& mcfi
1022 = MachineCodeForInstruction::get(destVal);
1023 sraTmp = new TmpInstruction(mcfi, resultType, LHS, 0, "getSign");
1024 srlTmp = new TmpInstruction(mcfi, resultType, LHS, 0, "getPlus2km1");
1025 addTmp = new TmpInstruction(mcfi, resultType, LHS, srlTmp,"incIfNeg");
1027 // Create the SRA or SRAX instruction to get the sign bit
1028 mvec.push_back(BuildMI((opSize > 4)? V9::SRAXi6 : V9::SRAi5, 3)
1030 .addSImm((resultType==Type::LongTy)? pow-1 : 31)
1031 .addRegDef(sraTmp));
1033 // Create the SRL or SRLX instruction to get the sign bit
1034 mvec.push_back(BuildMI((opSize > 4)? V9::SRLXi6 : V9::SRLi5, 3)
1036 .addSImm((resultType==Type::LongTy)? 64-pow : 32-pow)
1037 .addRegDef(srlTmp));
1039 // Create the ADD instruction to add 2^pow-1 for negative values
1040 mvec.push_back(BuildMI(V9::ADDr, 3).addReg(LHS).addReg(srlTmp)
1041 .addRegDef(addTmp));
1043 // Get the shift operand and "right-shift" opcode to do the divide
1044 shiftOperand = addTmp;
1045 opCode = (opSize > 4)? V9::SRAXi6 : V9::SRAi5;
1047 // Get the shift operand and "right-shift" opcode to do the divide
1049 opCode = (opSize > 4)? V9::SRLXi6 : V9::SRLi5;
1052 // Now do the actual shift!
1053 mvec.push_back(BuildMI(opCode, 3).addReg(shiftOperand).addZImm(pow)
1054 .addRegDef(destVal));
1057 if (needNeg && (C == 1 || isPowerOf2(C, pow))) {
1058 // insert <reg = SUB 0, reg> after the instr to flip the sign
1059 mvec.push_back(CreateIntNegInstruction(target, destVal));
1063 if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
1064 double dval = FPC->getValue();
1065 if (fabs(dval) == 1) {
1067 (dval < 0) ? (resultType == Type::FloatTy? V9::FNEGS : V9::FNEGD)
1068 : (resultType == Type::FloatTy? V9::FMOVS : V9::FMOVD);
1070 mvec.push_back(BuildMI(opCode, 2).addReg(LHS).addRegDef(destVal));
1078 CreateCodeForVariableSizeAlloca(const TargetMachine& target,
1079 Instruction* result,
1081 Value* numElementsVal,
1082 std::vector<MachineInstr*>& getMvec)
1084 Value* totalSizeVal;
1086 MachineCodeForInstruction& mcfi = MachineCodeForInstruction::get(result);
1087 Function *F = result->getParent()->getParent();
1089 // Enforce the alignment constraints on the stack pointer at
1090 // compile time if the total size is a known constant.
1091 if (isa<Constant>(numElementsVal)) {
1093 int64_t numElem = (int64_t) target.getInstrInfo().
1094 ConvertConstantToIntType(target, numElementsVal,
1095 numElementsVal->getType(), isValid);
1096 assert(isValid && "Unexpectedly large array dimension in alloca!");
1097 int64_t total = numElem * tsize;
1098 if (int extra= total % target.getFrameInfo().getStackFrameSizeAlignment())
1099 total += target.getFrameInfo().getStackFrameSizeAlignment() - extra;
1100 totalSizeVal = ConstantSInt::get(Type::IntTy, total);
1102 // The size is not a constant. Generate code to compute it and
1103 // code to pad the size for stack alignment.
1104 // Create a Value to hold the (constant) element size
1105 Value* tsizeVal = ConstantSInt::get(Type::IntTy, tsize);
1107 // Create temporary values to hold the result of MUL, SLL, SRL
1108 // To pad `size' to next smallest multiple of 16:
1109 // size = (size + 15) & (-16 = 0xfffffffffffffff0)
1111 TmpInstruction* tmpProd = new TmpInstruction(mcfi,numElementsVal, tsizeVal);
1112 TmpInstruction* tmpAdd15= new TmpInstruction(mcfi,numElementsVal, tmpProd);
1113 TmpInstruction* tmpAndf0= new TmpInstruction(mcfi,numElementsVal, tmpAdd15);
1115 // Instruction 1: mul numElements, typeSize -> tmpProd
1116 // This will optimize the MUL as far as possible.
1117 CreateMulInstruction(target, F, numElementsVal, tsizeVal, tmpProd, getMvec,
1118 mcfi, INVALID_MACHINE_OPCODE);
1120 // Instruction 2: andn tmpProd, 0x0f -> tmpAndn
1121 getMvec.push_back(BuildMI(V9::ADDi, 3).addReg(tmpProd).addSImm(15)
1122 .addReg(tmpAdd15, MachineOperand::Def));
1124 // Instruction 3: add tmpAndn, 0x10 -> tmpAdd16
1125 getMvec.push_back(BuildMI(V9::ANDi, 3).addReg(tmpAdd15).addSImm(-16)
1126 .addReg(tmpAndf0, MachineOperand::Def));
1128 totalSizeVal = tmpAndf0;
1131 // Get the constant offset from SP for dynamically allocated storage
1132 // and create a temporary Value to hold it.
1133 MachineFunction& mcInfo = MachineFunction::get(F);
1135 ConstantSInt* dynamicAreaOffset =
1136 ConstantSInt::get(Type::IntTy,
1137 target.getFrameInfo().getDynamicAreaOffset(mcInfo,growUp));
1138 assert(! growUp && "Has SPARC v9 stack frame convention changed?");
1140 unsigned SPReg = target.getRegInfo().getStackPointer();
1142 // Instruction 2: sub %sp, totalSizeVal -> %sp
1143 getMvec.push_back(BuildMI(V9::SUBr, 3).addMReg(SPReg).addReg(totalSizeVal)
1144 .addMReg(SPReg,MachineOperand::Def));
1146 // Instruction 3: add %sp, frameSizeBelowDynamicArea -> result
1147 getMvec.push_back(BuildMI(V9::ADDr,3).addMReg(SPReg).addReg(dynamicAreaOffset)
1148 .addRegDef(result));
1153 CreateCodeForFixedSizeAlloca(const TargetMachine& target,
1154 Instruction* result,
1156 unsigned numElements,
1157 std::vector<MachineInstr*>& getMvec)
1159 assert(tsize > 0 && "Illegal (zero) type size for alloca");
1160 assert(result && result->getParent() &&
1161 "Result value is not part of a function?");
1162 Function *F = result->getParent()->getParent();
1163 MachineFunction &mcInfo = MachineFunction::get(F);
1165 // Put the variable in the dynamically sized area of the frame if either:
1166 // (a) The offset is too large to use as an immediate in load/stores
1167 // (check LDX because all load/stores have the same-size immed. field).
1168 // (b) The object is "large", so it could cause many other locals,
1169 // spills, and temporaries to have large offsets.
1170 // NOTE: We use LARGE = 8 * argSlotSize = 64 bytes.
1171 // You've gotta love having only 13 bits for constant offset values :-|.
1173 unsigned paddedSize;
1174 int offsetFromFP = mcInfo.getInfo()->computeOffsetforLocalVar(result,
1176 tsize * numElements);
1178 if (((int)paddedSize) > 8 * target.getFrameInfo().getSizeOfEachArgOnStack() ||
1179 ! target.getInstrInfo().constantFitsInImmedField(V9::LDXi,offsetFromFP)) {
1180 CreateCodeForVariableSizeAlloca(target, result, tsize,
1181 ConstantSInt::get(Type::IntTy,numElements),
1186 // else offset fits in immediate field so go ahead and allocate it.
1187 offsetFromFP = mcInfo.getInfo()->allocateLocalVar(result, tsize *numElements);
1189 // Create a temporary Value to hold the constant offset.
1190 // This is needed because it may not fit in the immediate field.
1191 ConstantSInt* offsetVal = ConstantSInt::get(Type::IntTy, offsetFromFP);
1193 // Instruction 1: add %fp, offsetFromFP -> result
1194 unsigned FPReg = target.getRegInfo().getFramePointer();
1195 getMvec.push_back(BuildMI(V9::ADDr, 3).addMReg(FPReg).addReg(offsetVal)
1196 .addRegDef(result));
1200 //------------------------------------------------------------------------
1201 // Function SetOperandsForMemInstr
1203 // Choose addressing mode for the given load or store instruction.
1204 // Use [reg+reg] if it is an indexed reference, and the index offset is
1205 // not a constant or if it cannot fit in the offset field.
1206 // Use [reg+offset] in all other cases.
1208 // This assumes that all array refs are "lowered" to one of these forms:
1209 // %x = load (subarray*) ptr, constant ; single constant offset
1210 // %x = load (subarray*) ptr, offsetVal ; single non-constant offset
1211 // Generally, this should happen via strength reduction + LICM.
1212 // Also, strength reduction should take care of using the same register for
1213 // the loop index variable and an array index, when that is profitable.
1214 //------------------------------------------------------------------------
1217 SetOperandsForMemInstr(unsigned Opcode,
1218 std::vector<MachineInstr*>& mvec,
1219 InstructionNode* vmInstrNode,
1220 const TargetMachine& target)
1222 Instruction* memInst = vmInstrNode->getInstruction();
1223 // Index vector, ptr value, and flag if all indices are const.
1224 std::vector<Value*> idxVec;
1225 bool allConstantIndices;
1226 Value* ptrVal = GetMemInstArgs(vmInstrNode, idxVec, allConstantIndices);
1228 // Now create the appropriate operands for the machine instruction.
1229 // First, initialize so we default to storing the offset in a register.
1230 int64_t smallConstOffset = 0;
1231 Value* valueForRegOffset = NULL;
1232 MachineOperand::MachineOperandType offsetOpType =
1233 MachineOperand::MO_VirtualRegister;
1235 // Check if there is an index vector and if so, compute the
1236 // right offset for structures and for arrays
1238 if (!idxVec.empty()) {
1239 const PointerType* ptrType = cast<PointerType>(ptrVal->getType());
1241 // If all indices are constant, compute the combined offset directly.
1242 if (allConstantIndices) {
1243 // Compute the offset value using the index vector. Create a
1244 // virtual reg. for it since it may not fit in the immed field.
1245 uint64_t offset = target.getTargetData().getIndexedOffset(ptrType,idxVec);
1246 valueForRegOffset = ConstantSInt::get(Type::LongTy, offset);
1248 // There is at least one non-constant offset. Therefore, this must
1249 // be an array ref, and must have been lowered to a single non-zero
1250 // offset. (An extra leading zero offset, if any, can be ignored.)
1251 // Generate code sequence to compute address from index.
1253 bool firstIdxIsZero = IsZero(idxVec[0]);
1254 assert(idxVec.size() == 1U + firstIdxIsZero
1255 && "Array refs must be lowered before Instruction Selection");
1257 Value* idxVal = idxVec[firstIdxIsZero];
1259 std::vector<MachineInstr*> mulVec;
1261 new TmpInstruction(MachineCodeForInstruction::get(memInst),
1262 Type::ULongTy, memInst);
1264 // Get the array type indexed by idxVal, and compute its element size.
1265 // The call to getTypeSize() will fail if size is not constant.
1266 const Type* vecType = (firstIdxIsZero
1267 ? GetElementPtrInst::getIndexedType(ptrType,
1268 std::vector<Value*>(1U, idxVec[0]),
1269 /*AllowCompositeLeaf*/ true)
1271 const Type* eltType = cast<SequentialType>(vecType)->getElementType();
1272 ConstantUInt* eltSizeVal = ConstantUInt::get(Type::ULongTy,
1273 target.getTargetData().getTypeSize(eltType));
1275 // CreateMulInstruction() folds constants intelligently enough.
1276 CreateMulInstruction(target, memInst->getParent()->getParent(),
1277 idxVal, /* lval, not likely to be const*/
1278 eltSizeVal, /* rval, likely to be constant */
1280 mulVec, MachineCodeForInstruction::get(memInst),
1281 INVALID_MACHINE_OPCODE);
1283 assert(mulVec.size() > 0 && "No multiply code created?");
1284 mvec.insert(mvec.end(), mulVec.begin(), mulVec.end());
1286 valueForRegOffset = addr;
1289 offsetOpType = MachineOperand::MO_SignExtendedImmed;
1290 smallConstOffset = 0;
1294 // Operand 0 is value, operand 1 is ptr, operand 2 is offset
1295 // For LOAD or GET_ELEMENT_PTR,
1296 // Operand 0 is ptr, operand 1 is offset, operand 2 is result.
1298 unsigned offsetOpNum, ptrOpNum;
1300 if (memInst->getOpcode() == Instruction::Store) {
1301 if (offsetOpType == MachineOperand::MO_VirtualRegister) {
1302 MI = BuildMI(Opcode, 3).addReg(vmInstrNode->leftChild()->getValue())
1303 .addReg(ptrVal).addReg(valueForRegOffset);
1305 Opcode = convertOpcodeFromRegToImm(Opcode);
1306 MI = BuildMI(Opcode, 3).addReg(vmInstrNode->leftChild()->getValue())
1307 .addReg(ptrVal).addSImm(smallConstOffset);
1310 if (offsetOpType == MachineOperand::MO_VirtualRegister) {
1311 MI = BuildMI(Opcode, 3).addReg(ptrVal).addReg(valueForRegOffset)
1312 .addRegDef(memInst);
1314 Opcode = convertOpcodeFromRegToImm(Opcode);
1315 MI = BuildMI(Opcode, 3).addReg(ptrVal).addSImm(smallConstOffset)
1316 .addRegDef(memInst);
1324 // Substitute operand `operandNum' of the instruction in node `treeNode'
1325 // in place of the use(s) of that instruction in node `parent'.
1326 // Check both explicit and implicit operands!
1327 // Also make sure to skip over a parent who:
1328 // (1) is a list node in the Burg tree, or
1329 // (2) itself had its results forwarded to its parent
1332 ForwardOperand(InstructionNode* treeNode,
1333 InstrTreeNode* parent,
1336 assert(treeNode && parent && "Invalid invocation of ForwardOperand");
1338 Instruction* unusedOp = treeNode->getInstruction();
1339 Value* fwdOp = unusedOp->getOperand(operandNum);
1341 // The parent itself may be a list node, so find the real parent instruction
1342 while (parent->getNodeType() != InstrTreeNode::NTInstructionNode)
1344 parent = parent->parent();
1345 assert(parent && "ERROR: Non-instruction node has no parent in tree.");
1347 InstructionNode* parentInstrNode = (InstructionNode*) parent;
1349 Instruction* userInstr = parentInstrNode->getInstruction();
1350 MachineCodeForInstruction &mvec = MachineCodeForInstruction::get(userInstr);
1352 // The parent's mvec would be empty if it was itself forwarded.
1353 // Recursively call ForwardOperand in that case...
1355 if (mvec.size() == 0) {
1356 assert(parent->parent() != NULL &&
1357 "Parent could not have been forwarded, yet has no instructions?");
1358 ForwardOperand(treeNode, parent->parent(), operandNum);
1360 for (unsigned i=0, N=mvec.size(); i < N; i++) {
1361 MachineInstr* minstr = mvec[i];
1362 for (unsigned i=0, numOps=minstr->getNumOperands(); i < numOps; ++i) {
1363 const MachineOperand& mop = minstr->getOperand(i);
1364 if (mop.getType() == MachineOperand::MO_VirtualRegister &&
1365 mop.getVRegValue() == unusedOp)
1367 minstr->SetMachineOperandVal(i, MachineOperand::MO_VirtualRegister,
1372 for (unsigned i=0,numOps=minstr->getNumImplicitRefs(); i<numOps; ++i)
1373 if (minstr->getImplicitRef(i) == unusedOp)
1374 minstr->setImplicitRef(i, fwdOp);
1381 AllUsesAreBranches(const Instruction* setccI)
1383 for (Value::use_const_iterator UI=setccI->use_begin(), UE=setccI->use_end();
1385 if (! isa<TmpInstruction>(*UI) // ignore tmp instructions here
1386 && cast<Instruction>(*UI)->getOpcode() != Instruction::Br)
1391 // Generate code for any intrinsic that needs a special code sequence
1392 // instead of a regular call. If not that kind of intrinsic, do nothing.
1393 // Returns true if code was generated, otherwise false.
1395 static bool CodeGenIntrinsic(Intrinsic::ID iid, CallInst &callInstr,
1396 TargetMachine &target,
1397 std::vector<MachineInstr*>& mvec) {
1400 assert(0 && "Unknown intrinsic function call should have been lowered!");
1401 case Intrinsic::va_start: {
1402 // Get the address of the first incoming vararg argument on the stack
1404 Function* func = cast<Function>(callInstr.getParent()->getParent());
1405 int numFixedArgs = func->getFunctionType()->getNumParams();
1406 int fpReg = target.getFrameInfo().getIncomingArgBaseRegNum();
1407 int argSize = target.getFrameInfo().getSizeOfEachArgOnStack();
1408 int firstVarArgOff = numFixedArgs * argSize + target.getFrameInfo().
1409 getFirstIncomingArgOffset(MachineFunction::get(func), ignore);
1410 mvec.push_back(BuildMI(V9::ADDi, 3).addMReg(fpReg).addSImm(firstVarArgOff).
1411 addRegDef(&callInstr));
1415 case Intrinsic::va_end:
1416 return true; // no-op on Sparc
1418 case Intrinsic::va_copy:
1419 // Simple copy of current va_list (arg1) to new va_list (result)
1420 mvec.push_back(BuildMI(V9::ORr, 3).
1421 addMReg(target.getRegInfo().getZeroRegNum()).
1422 addReg(callInstr.getOperand(1)).
1423 addRegDef(&callInstr));
1428 //******************* Externally Visible Functions *************************/
1430 //------------------------------------------------------------------------
1431 // External Function: ThisIsAChainRule
1434 // Check if a given BURG rule is a chain rule.
1435 //------------------------------------------------------------------------
1438 ThisIsAChainRule(int eruleno)
1442 case 111: // stmt: reg
1466 return false; break;
1471 //------------------------------------------------------------------------
1472 // External Function: GetInstructionsByRule
1475 // Choose machine instructions for the SPARC according to the
1476 // patterns chosen by the BURG-generated parser.
1477 //------------------------------------------------------------------------
1480 GetInstructionsByRule(InstructionNode* subtreeRoot,
1483 TargetMachine &target,
1484 std::vector<MachineInstr*>& mvec)
1486 bool checkCast = false; // initialize here to use fall-through
1487 bool maskUnsignedResult = false;
1489 int forwardOperandNum = -1;
1490 unsigned allocaSize = 0;
1491 MachineInstr* M, *M2;
1493 bool foldCase = false;
1497 // If the code for this instruction was folded into the parent (user),
1499 if (subtreeRoot->isFoldedIntoParent())
1503 // Let's check for chain rules outside the switch so that we don't have
1504 // to duplicate the list of chain rule production numbers here again
1506 if (ThisIsAChainRule(ruleForNode)) {
1507 // Chain rules have a single nonterminal on the RHS.
1508 // Get the rule that matches the RHS non-terminal and use that instead.
1510 assert(nts[0] && ! nts[1]
1511 && "A chain rule should have only one RHS non-terminal!");
1512 nextRule = burm_rule(subtreeRoot->state, nts[0]);
1513 nts = burm_nts[nextRule];
1514 GetInstructionsByRule(subtreeRoot, nextRule, nts, target, mvec);
1516 switch(ruleForNode) {
1517 case 1: // stmt: Ret
1518 case 2: // stmt: RetValue(reg)
1519 { // NOTE: Prepass of register allocation is responsible
1520 // for moving return value to appropriate register.
1521 // Copy the return value to the required return register.
1522 // Mark the return Value as an implicit ref of the RET instr..
1523 // Mark the return-address register as a hidden virtual reg.
1524 // Finally put a NOP in the delay slot.
1525 ReturnInst *returnInstr=cast<ReturnInst>(subtreeRoot->getInstruction());
1526 Value* retVal = returnInstr->getReturnValue();
1527 MachineCodeForInstruction& mcfi =
1528 MachineCodeForInstruction::get(returnInstr);
1530 // Create a hidden virtual reg to represent the return address register
1531 // used by the machine instruction but not represented in LLVM.
1533 Instruction* returnAddrTmp = new TmpInstruction(mcfi, returnInstr);
1535 MachineInstr* retMI =
1536 BuildMI(V9::JMPLRETi, 3).addReg(returnAddrTmp).addSImm(8)
1537 .addMReg(target.getRegInfo().getZeroRegNum(), MachineOperand::Def);
1539 // If there is a value to return, we need to:
1540 // (a) Sign-extend the value if it is smaller than 8 bytes (reg size)
1541 // (b) Insert a copy to copy the return value to the appropriate reg.
1542 // -- For FP values, create a FMOVS or FMOVD instruction
1543 // -- For non-FP values, create an add-with-0 instruction
1545 if (retVal != NULL) {
1546 const SparcRegInfo& regInfo =
1547 (SparcRegInfo&) target.getRegInfo();
1548 const Type* retType = retVal->getType();
1549 unsigned regClassID = regInfo.getRegClassIDOfType(retType);
1550 unsigned retRegNum = (retType->isFloatingPoint()
1551 ? (unsigned) SparcFloatRegClass::f0
1552 : (unsigned) SparcIntRegClass::i0);
1553 retRegNum = regInfo.getUnifiedRegNum(regClassID, retRegNum);
1555 // () Insert sign-extension instructions for small signed values.
1557 Value* retValToUse = retVal;
1558 if (retType->isIntegral() && retType->isSigned()) {
1559 unsigned retSize = target.getTargetData().getTypeSize(retType);
1561 // create a temporary virtual reg. to hold the sign-extension
1562 retValToUse = new TmpInstruction(mcfi, retVal);
1564 // sign-extend retVal and put the result in the temporary reg.
1565 target.getInstrInfo().CreateSignExtensionInstructions
1566 (target, returnInstr->getParent()->getParent(),
1567 retVal, retValToUse, 8*retSize, mvec, mcfi);
1571 // (b) Now, insert a copy to to the appropriate register:
1572 // -- For FP values, create a FMOVS or FMOVD instruction
1573 // -- For non-FP values, create an add-with-0 instruction
1575 // First, create a virtual register to represent the register and
1576 // mark this vreg as being an implicit operand of the ret MI.
1577 TmpInstruction* retVReg =
1578 new TmpInstruction(mcfi, retValToUse, NULL, "argReg");
1580 retMI->addImplicitRef(retVReg);
1582 if (retType->isFloatingPoint())
1583 M = (BuildMI(retType==Type::FloatTy? V9::FMOVS : V9::FMOVD, 2)
1584 .addReg(retValToUse).addReg(retVReg, MachineOperand::Def));
1586 M = (BuildMI(ChooseAddInstructionByType(retType), 3)
1587 .addReg(retValToUse).addSImm((int64_t) 0)
1588 .addReg(retVReg, MachineOperand::Def));
1590 // Mark the operand with the register it should be assigned
1591 M->SetRegForOperand(M->getNumOperands()-1, retRegNum);
1592 retMI->SetRegForImplicitRef(retMI->getNumImplicitRefs()-1, retRegNum);
1597 // Now insert the RET instruction and a NOP for the delay slot
1598 mvec.push_back(retMI);
1599 mvec.push_back(BuildMI(V9::NOP, 0));
1604 case 3: // stmt: Store(reg,reg)
1605 case 4: // stmt: Store(reg,ptrreg)
1606 SetOperandsForMemInstr(ChooseStoreInstruction(
1607 subtreeRoot->leftChild()->getValue()->getType()),
1608 mvec, subtreeRoot, target);
1611 case 5: // stmt: BrUncond
1613 BranchInst *BI = cast<BranchInst>(subtreeRoot->getInstruction());
1614 mvec.push_back(BuildMI(V9::BA, 1).addPCDisp(BI->getSuccessor(0)));
1617 mvec.push_back(BuildMI(V9::NOP, 0));
1621 case 206: // stmt: BrCond(setCCconst)
1622 { // setCCconst => boolean was computed with `%b = setCC type reg1 const'
1623 // If the constant is ZERO, we can use the branch-on-integer-register
1624 // instructions and avoid the SUBcc instruction entirely.
1625 // Otherwise this is just the same as case 5, so just fall through.
1627 InstrTreeNode* constNode = subtreeRoot->leftChild()->rightChild();
1629 constNode->getNodeType() ==InstrTreeNode::NTConstNode);
1630 Constant *constVal = cast<Constant>(constNode->getValue());
1633 if ((constVal->getType()->isInteger()
1634 || isa<PointerType>(constVal->getType()))
1635 && target.getInstrInfo().ConvertConstantToIntType(target,
1636 constVal, constVal->getType(), isValidConst) == 0
1639 // That constant is a zero after all...
1640 // Use the left child of setCC as the first argument!
1641 // Mark the setCC node so that no code is generated for it.
1642 InstructionNode* setCCNode = (InstructionNode*)
1643 subtreeRoot->leftChild();
1644 assert(setCCNode->getOpLabel() == SetCCOp);
1645 setCCNode->markFoldedIntoParent();
1647 BranchInst* brInst=cast<BranchInst>(subtreeRoot->getInstruction());
1649 M = BuildMI(ChooseBprInstruction(subtreeRoot), 2)
1650 .addReg(setCCNode->leftChild()->getValue())
1651 .addPCDisp(brInst->getSuccessor(0));
1655 mvec.push_back(BuildMI(V9::NOP, 0));
1658 mvec.push_back(BuildMI(V9::BA, 1)
1659 .addPCDisp(brInst->getSuccessor(1)));
1662 mvec.push_back(BuildMI(V9::NOP, 0));
1665 // ELSE FALL THROUGH
1668 case 6: // stmt: BrCond(setCC)
1669 { // bool => boolean was computed with SetCC.
1670 // The branch to use depends on whether it is FP, signed, or unsigned.
1671 // If it is an integer CC, we also need to find the unique
1672 // TmpInstruction representing that CC.
1674 BranchInst* brInst = cast<BranchInst>(subtreeRoot->getInstruction());
1675 const Type* setCCType;
1676 unsigned Opcode = ChooseBccInstruction(subtreeRoot, setCCType);
1677 Value* ccValue = GetTmpForCC(subtreeRoot->leftChild()->getValue(),
1678 brInst->getParent()->getParent(),
1680 MachineCodeForInstruction::get(brInst));
1681 M = BuildMI(Opcode, 2).addCCReg(ccValue)
1682 .addPCDisp(brInst->getSuccessor(0));
1686 mvec.push_back(BuildMI(V9::NOP, 0));
1689 mvec.push_back(BuildMI(V9::BA, 1).addPCDisp(brInst->getSuccessor(1)));
1692 mvec.push_back(BuildMI(V9::NOP, 0));
1696 case 208: // stmt: BrCond(boolconst)
1698 // boolconst => boolean is a constant; use BA to first or second label
1699 Constant* constVal =
1700 cast<Constant>(subtreeRoot->leftChild()->getValue());
1701 unsigned dest = cast<ConstantBool>(constVal)->getValue()? 0 : 1;
1703 M = BuildMI(V9::BA, 1).addPCDisp(
1704 cast<BranchInst>(subtreeRoot->getInstruction())->getSuccessor(dest));
1708 mvec.push_back(BuildMI(V9::NOP, 0));
1712 case 8: // stmt: BrCond(boolreg)
1713 { // boolreg => boolean is recorded in an integer register.
1714 // Use branch-on-integer-register instruction.
1716 BranchInst *BI = cast<BranchInst>(subtreeRoot->getInstruction());
1717 M = BuildMI(V9::BRNZ, 2).addReg(subtreeRoot->leftChild()->getValue())
1718 .addPCDisp(BI->getSuccessor(0));
1722 mvec.push_back(BuildMI(V9::NOP, 0));
1725 mvec.push_back(BuildMI(V9::BA, 1).addPCDisp(BI->getSuccessor(1)));
1728 mvec.push_back(BuildMI(V9::NOP, 0));
1732 case 9: // stmt: Switch(reg)
1733 assert(0 && "*** SWITCH instruction is not implemented yet.");
1736 case 10: // reg: VRegList(reg, reg)
1737 assert(0 && "VRegList should never be the topmost non-chain rule");
1740 case 21: // bool: Not(bool,reg): Compute with a conditional-move-on-reg
1741 { // First find the unary operand. It may be left or right, usually right.
1742 Instruction* notI = subtreeRoot->getInstruction();
1743 Value* notArg = BinaryOperator::getNotArgument(
1744 cast<BinaryOperator>(subtreeRoot->getInstruction()));
1745 unsigned ZeroReg = target.getRegInfo().getZeroRegNum();
1747 // Unconditionally set register to 0
1748 mvec.push_back(BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(notI));
1750 // Now conditionally move 1 into the register.
1751 // Mark the register as a use (as well as a def) because the old
1752 // value will be retained if the condition is false.
1753 mvec.push_back(BuildMI(V9::MOVRZi, 3).addReg(notArg).addZImm(1)
1754 .addReg(notI, MachineOperand::UseAndDef));
1759 case 421: // reg: BNot(reg,reg): Compute as reg = reg XOR-NOT 0
1760 { // First find the unary operand. It may be left or right, usually right.
1761 Value* notArg = BinaryOperator::getNotArgument(
1762 cast<BinaryOperator>(subtreeRoot->getInstruction()));
1763 unsigned ZeroReg = target.getRegInfo().getZeroRegNum();
1764 mvec.push_back(BuildMI(V9::XNORr, 3).addReg(notArg).addMReg(ZeroReg)
1765 .addRegDef(subtreeRoot->getValue()));
1769 case 322: // reg: Not(tobool, reg):
1770 // Fold CAST-TO-BOOL with NOT by inverting the sense of cast-to-bool
1772 // Just fall through!
1774 case 22: // reg: ToBoolTy(reg):
1776 Instruction* castI = subtreeRoot->getInstruction();
1777 Value* opVal = subtreeRoot->leftChild()->getValue();
1778 assert(opVal->getType()->isIntegral() ||
1779 isa<PointerType>(opVal->getType()));
1781 // Unconditionally set register to 0
1782 mvec.push_back(BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(castI));
1784 // Now conditionally move 1 into the register.
1785 // Mark the register as a use (as well as a def) because the old
1786 // value will be retained if the condition is false.
1787 MachineOpCode opCode = foldCase? V9::MOVRZi : V9::MOVRNZi;
1788 mvec.push_back(BuildMI(opCode, 3).addReg(opVal).addZImm(1)
1789 .addReg(castI, MachineOperand::UseAndDef));
1794 case 23: // reg: ToUByteTy(reg)
1795 case 24: // reg: ToSByteTy(reg)
1796 case 25: // reg: ToUShortTy(reg)
1797 case 26: // reg: ToShortTy(reg)
1798 case 27: // reg: ToUIntTy(reg)
1799 case 28: // reg: ToIntTy(reg)
1800 case 29: // reg: ToULongTy(reg)
1801 case 30: // reg: ToLongTy(reg)
1803 //======================================================================
1804 // Rules for integer conversions:
1807 // From ISO 1998 C++ Standard, Sec. 4.7:
1809 // 2. If the destination type is unsigned, the resulting value is
1810 // the least unsigned integer congruent to the source integer
1811 // (modulo 2n where n is the number of bits used to represent the
1812 // unsigned type). [Note: In a two s complement representation,
1813 // this conversion is conceptual and there is no change in the
1814 // bit pattern (if there is no truncation). ]
1816 // 3. If the destination type is signed, the value is unchanged if
1817 // it can be represented in the destination type (and bitfield width);
1818 // otherwise, the value is implementation-defined.
1821 // Since we assume 2s complement representations, this implies:
1823 // -- If operand is smaller than destination, zero-extend or sign-extend
1824 // according to the signedness of the *operand*: source decides:
1825 // (1) If operand is signed, sign-extend it.
1826 // If dest is unsigned, zero-ext the result!
1827 // (2) If operand is unsigned, our current invariant is that
1828 // it's high bits are correct, so zero-extension is not needed.
1830 // -- If operand is same size as or larger than destination,
1831 // zero-extend or sign-extend according to the signedness of
1832 // the *destination*: destination decides:
1833 // (1) If destination is signed, sign-extend (truncating if needed)
1834 // This choice is implementation defined. We sign-extend the
1835 // operand, which matches both Sun's cc and gcc3.2.
1836 // (2) If destination is unsigned, zero-extend (truncating if needed)
1837 //======================================================================
1839 Instruction* destI = subtreeRoot->getInstruction();
1840 Function* currentFunc = destI->getParent()->getParent();
1841 MachineCodeForInstruction& mcfi=MachineCodeForInstruction::get(destI);
1843 Value* opVal = subtreeRoot->leftChild()->getValue();
1844 const Type* opType = opVal->getType();
1845 const Type* destType = destI->getType();
1846 unsigned opSize = target.getTargetData().getTypeSize(opType);
1847 unsigned destSize = target.getTargetData().getTypeSize(destType);
1849 bool isIntegral = opType->isIntegral() || isa<PointerType>(opType);
1851 if (opType == Type::BoolTy ||
1852 opType == destType ||
1853 isIntegral && opSize == destSize && opSize == 8) {
1854 // nothing to do in all these cases
1855 forwardOperandNum = 0; // forward first operand to user
1857 } else if (opType->isFloatingPoint()) {
1859 CreateCodeToConvertFloatToInt(target, opVal, destI, mvec, mcfi);
1860 if (destI->getType()->isUnsigned() && destI->getType() !=Type::UIntTy)
1861 maskUnsignedResult = true; // not handled by fp->int code
1863 } else if (isIntegral) {
1865 bool opSigned = opType->isSigned();
1866 bool destSigned = destType->isSigned();
1867 unsigned extSourceInBits = 8 * std::min<unsigned>(opSize, destSize);
1869 assert(! (opSize == destSize && opSigned == destSigned) &&
1870 "How can different int types have same size and signedness?");
1872 bool signExtend = (opSize < destSize && opSigned ||
1873 opSize >= destSize && destSigned);
1875 bool signAndZeroExtend = (opSize < destSize && destSize < 8u &&
1876 opSigned && !destSigned);
1877 assert(!signAndZeroExtend || signExtend);
1879 bool zeroExtendOnly = opSize >= destSize && !destSigned;
1880 assert(!zeroExtendOnly || !signExtend);
1883 Value* signExtDest = (signAndZeroExtend
1884 ? new TmpInstruction(mcfi, destType, opVal)
1887 target.getInstrInfo().CreateSignExtensionInstructions
1888 (target, currentFunc,opVal,signExtDest,extSourceInBits,mvec,mcfi);
1890 if (signAndZeroExtend)
1891 target.getInstrInfo().CreateZeroExtensionInstructions
1892 (target, currentFunc, signExtDest, destI, 8*destSize, mvec, mcfi);
1894 else if (zeroExtendOnly) {
1895 target.getInstrInfo().CreateZeroExtensionInstructions
1896 (target, currentFunc, opVal, destI, extSourceInBits, mvec, mcfi);
1899 forwardOperandNum = 0; // forward first operand to user
1902 assert(0 && "Unrecognized operand type for convert-to-integer");
1907 case 31: // reg: ToFloatTy(reg):
1908 case 32: // reg: ToDoubleTy(reg):
1909 case 232: // reg: ToDoubleTy(Constant):
1911 // If this instruction has a parent (a user) in the tree
1912 // and the user is translated as an FsMULd instruction,
1913 // then the cast is unnecessary. So check that first.
1914 // In the future, we'll want to do the same for the FdMULq instruction,
1915 // so do the check here instead of only for ToFloatTy(reg).
1917 if (subtreeRoot->parent() != NULL) {
1918 const MachineCodeForInstruction& mcfi =
1919 MachineCodeForInstruction::get(
1920 cast<InstructionNode>(subtreeRoot->parent())->getInstruction());
1921 if (mcfi.size() == 0 || mcfi.front()->getOpcode() == V9::FSMULD)
1922 forwardOperandNum = 0; // forward first operand to user
1925 if (forwardOperandNum != 0) { // we do need the cast
1926 Value* leftVal = subtreeRoot->leftChild()->getValue();
1927 const Type* opType = leftVal->getType();
1928 MachineOpCode opCode=ChooseConvertToFloatInstr(target,
1929 subtreeRoot->getOpLabel(), opType);
1930 if (opCode == V9::NOP) { // no conversion needed
1931 forwardOperandNum = 0; // forward first operand to user
1933 // If the source operand is a non-FP type it must be
1934 // first copied from int to float register via memory!
1935 Instruction *dest = subtreeRoot->getInstruction();
1938 if (! opType->isFloatingPoint()) {
1939 // Create a temporary to represent the FP register
1940 // into which the integer will be copied via memory.
1941 // The type of this temporary will determine the FP
1942 // register used: single-prec for a 32-bit int or smaller,
1943 // double-prec for a 64-bit int.
1946 target.getTargetData().getTypeSize(leftVal->getType());
1947 Type* tmpTypeToUse =
1948 (srcSize <= 4)? Type::FloatTy : Type::DoubleTy;
1949 MachineCodeForInstruction &destMCFI =
1950 MachineCodeForInstruction::get(dest);
1951 srcForCast = new TmpInstruction(destMCFI, tmpTypeToUse, dest);
1953 target.getInstrInfo().CreateCodeToCopyIntToFloat(target,
1954 dest->getParent()->getParent(),
1955 leftVal, cast<Instruction>(srcForCast),
1958 srcForCast = leftVal;
1960 M = BuildMI(opCode, 2).addReg(srcForCast).addRegDef(dest);
1966 case 19: // reg: ToArrayTy(reg):
1967 case 20: // reg: ToPointerTy(reg):
1968 forwardOperandNum = 0; // forward first operand to user
1971 case 233: // reg: Add(reg, Constant)
1972 maskUnsignedResult = true;
1973 M = CreateAddConstInstruction(subtreeRoot);
1978 // ELSE FALL THROUGH
1980 case 33: // reg: Add(reg, reg)
1981 maskUnsignedResult = true;
1982 Add3OperandInstr(ChooseAddInstruction(subtreeRoot), subtreeRoot, mvec);
1985 case 234: // reg: Sub(reg, Constant)
1986 maskUnsignedResult = true;
1987 M = CreateSubConstInstruction(subtreeRoot);
1992 // ELSE FALL THROUGH
1994 case 34: // reg: Sub(reg, reg)
1995 maskUnsignedResult = true;
1996 Add3OperandInstr(ChooseSubInstructionByType(
1997 subtreeRoot->getInstruction()->getType()),
2001 case 135: // reg: Mul(todouble, todouble)
2005 case 35: // reg: Mul(reg, reg)
2007 maskUnsignedResult = true;
2008 MachineOpCode forceOp = ((checkCast && BothFloatToDouble(subtreeRoot))
2009 ? (MachineOpCode)V9::FSMULD
2010 : INVALID_MACHINE_OPCODE);
2011 Instruction* mulInstr = subtreeRoot->getInstruction();
2012 CreateMulInstruction(target, mulInstr->getParent()->getParent(),
2013 subtreeRoot->leftChild()->getValue(),
2014 subtreeRoot->rightChild()->getValue(),
2016 MachineCodeForInstruction::get(mulInstr),forceOp);
2019 case 335: // reg: Mul(todouble, todoubleConst)
2023 case 235: // reg: Mul(reg, Constant)
2025 maskUnsignedResult = true;
2026 MachineOpCode forceOp = ((checkCast && BothFloatToDouble(subtreeRoot))
2027 ? (MachineOpCode)V9::FSMULD
2028 : INVALID_MACHINE_OPCODE);
2029 Instruction* mulInstr = subtreeRoot->getInstruction();
2030 CreateMulInstruction(target, mulInstr->getParent()->getParent(),
2031 subtreeRoot->leftChild()->getValue(),
2032 subtreeRoot->rightChild()->getValue(),
2034 MachineCodeForInstruction::get(mulInstr),
2038 case 236: // reg: Div(reg, Constant)
2039 maskUnsignedResult = true;
2041 CreateDivConstInstruction(target, subtreeRoot, mvec);
2042 if (mvec.size() > L)
2044 // ELSE FALL THROUGH
2046 case 36: // reg: Div(reg, reg)
2048 maskUnsignedResult = true;
2050 // If either operand of divide is smaller than 64 bits, we have
2051 // to make sure the unused top bits are correct because they affect
2052 // the result. These bits are already correct for unsigned values.
2053 // They may be incorrect for signed values, so sign extend to fill in.
2054 Instruction* divI = subtreeRoot->getInstruction();
2055 Value* divOp1 = subtreeRoot->leftChild()->getValue();
2056 Value* divOp2 = subtreeRoot->rightChild()->getValue();
2057 Value* divOp1ToUse = divOp1;
2058 Value* divOp2ToUse = divOp2;
2059 if (divI->getType()->isSigned()) {
2060 unsigned opSize=target.getTargetData().getTypeSize(divI->getType());
2062 MachineCodeForInstruction& mcfi=MachineCodeForInstruction::get(divI);
2063 divOp1ToUse = new TmpInstruction(mcfi, divOp1);
2064 divOp2ToUse = new TmpInstruction(mcfi, divOp2);
2065 target.getInstrInfo().
2066 CreateSignExtensionInstructions(target,
2067 divI->getParent()->getParent(),
2068 divOp1, divOp1ToUse,
2069 8*opSize, mvec, mcfi);
2070 target.getInstrInfo().
2071 CreateSignExtensionInstructions(target,
2072 divI->getParent()->getParent(),
2073 divOp2, divOp2ToUse,
2074 8*opSize, mvec, mcfi);
2078 mvec.push_back(BuildMI(ChooseDivInstruction(target, subtreeRoot), 3)
2079 .addReg(divOp1ToUse)
2080 .addReg(divOp2ToUse)
2086 case 37: // reg: Rem(reg, reg)
2087 case 237: // reg: Rem(reg, Constant)
2089 maskUnsignedResult = true;
2091 Instruction* remI = subtreeRoot->getInstruction();
2092 Value* divOp1 = subtreeRoot->leftChild()->getValue();
2093 Value* divOp2 = subtreeRoot->rightChild()->getValue();
2095 MachineCodeForInstruction& mcfi = MachineCodeForInstruction::get(remI);
2097 // If second operand of divide is smaller than 64 bits, we have
2098 // to make sure the unused top bits are correct because they affect
2099 // the result. These bits are already correct for unsigned values.
2100 // They may be incorrect for signed values, so sign extend to fill in.
2102 Value* divOpToUse = divOp2;
2103 if (divOp2->getType()->isSigned()) {
2104 unsigned opSize=target.getTargetData().getTypeSize(divOp2->getType());
2106 divOpToUse = new TmpInstruction(mcfi, divOp2);
2107 target.getInstrInfo().
2108 CreateSignExtensionInstructions(target,
2109 remI->getParent()->getParent(),
2111 8*opSize, mvec, mcfi);
2115 // Now compute: result = rem V1, V2 as:
2116 // result = V1 - (V1 / signExtend(V2)) * signExtend(V2)
2118 TmpInstruction* quot = new TmpInstruction(mcfi, divOp1, divOpToUse);
2119 TmpInstruction* prod = new TmpInstruction(mcfi, quot, divOpToUse);
2121 mvec.push_back(BuildMI(ChooseDivInstruction(target, subtreeRoot), 3)
2122 .addReg(divOp1).addReg(divOpToUse).addRegDef(quot));
2124 mvec.push_back(BuildMI(ChooseMulInstructionByType(remI->getType()), 3)
2125 .addReg(quot).addReg(divOpToUse).addRegDef(prod));
2127 mvec.push_back(BuildMI(ChooseSubInstructionByType(remI->getType()), 3)
2128 .addReg(divOp1).addReg(prod).addRegDef(remI));
2133 case 38: // bool: And(bool, bool)
2134 case 138: // bool: And(bool, not)
2135 case 238: // bool: And(bool, boolconst)
2136 case 338: // reg : BAnd(reg, reg)
2137 case 538: // reg : BAnd(reg, Constant)
2138 Add3OperandInstr(V9::ANDr, subtreeRoot, mvec);
2141 case 438: // bool: BAnd(bool, bnot)
2142 { // Use the argument of NOT as the second argument!
2143 // Mark the NOT node so that no code is generated for it.
2144 // If the type is boolean, set 1 or 0 in the result register.
2145 InstructionNode* notNode = (InstructionNode*) subtreeRoot->rightChild();
2146 Value* notArg = BinaryOperator::getNotArgument(
2147 cast<BinaryOperator>(notNode->getInstruction()));
2148 notNode->markFoldedIntoParent();
2149 Value *lhs = subtreeRoot->leftChild()->getValue();
2150 Value *dest = subtreeRoot->getValue();
2151 mvec.push_back(BuildMI(V9::ANDNr, 3).addReg(lhs).addReg(notArg)
2152 .addReg(dest, MachineOperand::Def));
2154 if (notArg->getType() == Type::BoolTy) {
2155 // set 1 in result register if result of above is non-zero
2156 mvec.push_back(BuildMI(V9::MOVRNZi, 3).addReg(dest).addZImm(1)
2157 .addReg(dest, MachineOperand::UseAndDef));
2163 case 39: // bool: Or(bool, bool)
2164 case 139: // bool: Or(bool, not)
2165 case 239: // bool: Or(bool, boolconst)
2166 case 339: // reg : BOr(reg, reg)
2167 case 539: // reg : BOr(reg, Constant)
2168 Add3OperandInstr(V9::ORr, subtreeRoot, mvec);
2171 case 439: // bool: BOr(bool, bnot)
2172 { // Use the argument of NOT as the second argument!
2173 // Mark the NOT node so that no code is generated for it.
2174 // If the type is boolean, set 1 or 0 in the result register.
2175 InstructionNode* notNode = (InstructionNode*) subtreeRoot->rightChild();
2176 Value* notArg = BinaryOperator::getNotArgument(
2177 cast<BinaryOperator>(notNode->getInstruction()));
2178 notNode->markFoldedIntoParent();
2179 Value *lhs = subtreeRoot->leftChild()->getValue();
2180 Value *dest = subtreeRoot->getValue();
2182 mvec.push_back(BuildMI(V9::ORNr, 3).addReg(lhs).addReg(notArg)
2183 .addReg(dest, MachineOperand::Def));
2185 if (notArg->getType() == Type::BoolTy) {
2186 // set 1 in result register if result of above is non-zero
2187 mvec.push_back(BuildMI(V9::MOVRNZi, 3).addReg(dest).addZImm(1)
2188 .addReg(dest, MachineOperand::UseAndDef));
2194 case 40: // bool: Xor(bool, bool)
2195 case 140: // bool: Xor(bool, not)
2196 case 240: // bool: Xor(bool, boolconst)
2197 case 340: // reg : BXor(reg, reg)
2198 case 540: // reg : BXor(reg, Constant)
2199 Add3OperandInstr(V9::XORr, subtreeRoot, mvec);
2202 case 440: // bool: BXor(bool, bnot)
2203 { // Use the argument of NOT as the second argument!
2204 // Mark the NOT node so that no code is generated for it.
2205 // If the type is boolean, set 1 or 0 in the result register.
2206 InstructionNode* notNode = (InstructionNode*) subtreeRoot->rightChild();
2207 Value* notArg = BinaryOperator::getNotArgument(
2208 cast<BinaryOperator>(notNode->getInstruction()));
2209 notNode->markFoldedIntoParent();
2210 Value *lhs = subtreeRoot->leftChild()->getValue();
2211 Value *dest = subtreeRoot->getValue();
2212 mvec.push_back(BuildMI(V9::XNORr, 3).addReg(lhs).addReg(notArg)
2213 .addReg(dest, MachineOperand::Def));
2215 if (notArg->getType() == Type::BoolTy) {
2216 // set 1 in result register if result of above is non-zero
2217 mvec.push_back(BuildMI(V9::MOVRNZi, 3).addReg(dest).addZImm(1)
2218 .addReg(dest, MachineOperand::UseAndDef));
2223 case 41: // setCCconst: SetCC(reg, Constant)
2224 { // Comparison is with a constant:
2226 // If the bool result must be computed into a register (see below),
2227 // and the constant is int ZERO, we can use the MOVR[op] instructions
2228 // and avoid the SUBcc instruction entirely.
2229 // Otherwise this is just the same as case 42, so just fall through.
2231 // The result of the SetCC must be computed and stored in a register if
2232 // it is used outside the current basic block (so it must be computed
2233 // as a boolreg) or it is used by anything other than a branch.
2234 // We will use a conditional move to do this.
2236 Instruction* setCCInstr = subtreeRoot->getInstruction();
2237 bool computeBoolVal = (subtreeRoot->parent() == NULL ||
2238 ! AllUsesAreBranches(setCCInstr));
2240 if (computeBoolVal) {
2241 InstrTreeNode* constNode = subtreeRoot->rightChild();
2243 constNode->getNodeType() ==InstrTreeNode::NTConstNode);
2244 Constant *constVal = cast<Constant>(constNode->getValue());
2247 if ((constVal->getType()->isInteger()
2248 || isa<PointerType>(constVal->getType()))
2249 && target.getInstrInfo().ConvertConstantToIntType(target,
2250 constVal, constVal->getType(), isValidConst) == 0
2253 // That constant is an integer zero after all...
2254 // Use a MOVR[op] to compute the boolean result
2255 // Unconditionally set register to 0
2256 mvec.push_back(BuildMI(V9::SETHI, 2).addZImm(0)
2257 .addRegDef(setCCInstr));
2259 // Now conditionally move 1 into the register.
2260 // Mark the register as a use (as well as a def) because the old
2261 // value will be retained if the condition is false.
2262 MachineOpCode movOpCode = ChooseMovpregiForSetCC(subtreeRoot);
2263 mvec.push_back(BuildMI(movOpCode, 3)
2264 .addReg(subtreeRoot->leftChild()->getValue())
2266 .addReg(setCCInstr, MachineOperand::UseAndDef));
2271 // ELSE FALL THROUGH
2274 case 42: // bool: SetCC(reg, reg):
2276 // This generates a SUBCC instruction, putting the difference in a
2277 // result reg. if needed, and/or setting a condition code if needed.
2279 Instruction* setCCInstr = subtreeRoot->getInstruction();
2280 Value* leftVal = subtreeRoot->leftChild()->getValue();
2281 Value* rightVal = subtreeRoot->rightChild()->getValue();
2282 const Type* opType = leftVal->getType();
2283 bool isFPCompare = opType->isFloatingPoint();
2285 // If the boolean result of the SetCC is used outside the current basic
2286 // block (so it must be computed as a boolreg) or is used by anything
2287 // other than a branch, the boolean must be computed and stored
2288 // in a result register. We will use a conditional move to do this.
2290 bool computeBoolVal = (subtreeRoot->parent() == NULL ||
2291 ! AllUsesAreBranches(setCCInstr));
2293 // A TmpInstruction is created to represent the CC "result".
2294 // Unlike other instances of TmpInstruction, this one is used
2295 // by machine code of multiple LLVM instructions, viz.,
2296 // the SetCC and the branch. Make sure to get the same one!
2297 // Note that we do this even for FP CC registers even though they
2298 // are explicit operands, because the type of the operand
2299 // needs to be a floating point condition code, not an integer
2300 // condition code. Think of this as casting the bool result to
2301 // a FP condition code register.
2302 // Later, we mark the 4th operand as being a CC register, and as a def.
2304 TmpInstruction* tmpForCC = GetTmpForCC(setCCInstr,
2305 setCCInstr->getParent()->getParent(),
2307 MachineCodeForInstruction::get(setCCInstr));
2309 // If the operands are signed values smaller than 4 bytes, then they
2310 // must be sign-extended in order to do a valid 32-bit comparison
2311 // and get the right result in the 32-bit CC register (%icc).
2313 Value* leftOpToUse = leftVal;
2314 Value* rightOpToUse = rightVal;
2315 if (opType->isIntegral() && opType->isSigned()) {
2316 unsigned opSize = target.getTargetData().getTypeSize(opType);
2318 MachineCodeForInstruction& mcfi =
2319 MachineCodeForInstruction::get(setCCInstr);
2321 // create temporary virtual regs. to hold the sign-extensions
2322 leftOpToUse = new TmpInstruction(mcfi, leftVal);
2323 rightOpToUse = new TmpInstruction(mcfi, rightVal);
2325 // sign-extend each operand and put the result in the temporary reg.
2326 target.getInstrInfo().CreateSignExtensionInstructions
2327 (target, setCCInstr->getParent()->getParent(),
2328 leftVal, leftOpToUse, 8*opSize, mvec, mcfi);
2329 target.getInstrInfo().CreateSignExtensionInstructions
2330 (target, setCCInstr->getParent()->getParent(),
2331 rightVal, rightOpToUse, 8*opSize, mvec, mcfi);
2335 if (! isFPCompare) {
2336 // Integer condition: set CC and discard result.
2337 mvec.push_back(BuildMI(V9::SUBccr, 4)
2338 .addReg(leftOpToUse)
2339 .addReg(rightOpToUse)
2340 .addMReg(target.getRegInfo()
2341 .getZeroRegNum(), MachineOperand::Def)
2342 .addCCReg(tmpForCC, MachineOperand::Def));
2344 // FP condition: dest of FCMP should be some FCCn register
2345 mvec.push_back(BuildMI(ChooseFcmpInstruction(subtreeRoot), 3)
2346 .addCCReg(tmpForCC, MachineOperand::Def)
2347 .addReg(leftOpToUse)
2348 .addReg(rightOpToUse));
2351 if (computeBoolVal) {
2352 MachineOpCode movOpCode = (isFPCompare
2353 ? ChooseMovFpcciInstruction(subtreeRoot)
2354 : ChooseMovpcciForSetCC(subtreeRoot));
2356 // Unconditionally set register to 0
2357 M = BuildMI(V9::SETHI, 2).addZImm(0).addRegDef(setCCInstr);
2360 // Now conditionally move 1 into the register.
2361 // Mark the register as a use (as well as a def) because the old
2362 // value will be retained if the condition is false.
2363 M = (BuildMI(movOpCode, 3).addCCReg(tmpForCC).addZImm(1)
2364 .addReg(setCCInstr, MachineOperand::UseAndDef));
2370 case 51: // reg: Load(reg)
2371 case 52: // reg: Load(ptrreg)
2372 SetOperandsForMemInstr(ChooseLoadInstruction(
2373 subtreeRoot->getValue()->getType()),
2374 mvec, subtreeRoot, target);
2377 case 55: // reg: GetElemPtr(reg)
2378 case 56: // reg: GetElemPtrIdx(reg,reg)
2379 // If the GetElemPtr was folded into the user (parent), it will be
2380 // caught above. For other cases, we have to compute the address.
2381 SetOperandsForMemInstr(V9::ADDr, mvec, subtreeRoot, target);
2384 case 57: // reg: Alloca: Implement as 1 instruction:
2385 { // add %fp, offsetFromFP -> result
2386 AllocationInst* instr =
2387 cast<AllocationInst>(subtreeRoot->getInstruction());
2389 target.getTargetData().getTypeSize(instr->getAllocatedType());
2391 CreateCodeForFixedSizeAlloca(target, instr, tsize, 1, mvec);
2395 case 58: // reg: Alloca(reg): Implement as 3 instructions:
2396 // mul num, typeSz -> tmp
2397 // sub %sp, tmp -> %sp
2398 { // add %sp, frameSizeBelowDynamicArea -> result
2399 AllocationInst* instr =
2400 cast<AllocationInst>(subtreeRoot->getInstruction());
2401 const Type* eltType = instr->getAllocatedType();
2403 // If #elements is constant, use simpler code for fixed-size allocas
2404 int tsize = (int) target.getTargetData().getTypeSize(eltType);
2405 Value* numElementsVal = NULL;
2406 bool isArray = instr->isArrayAllocation();
2408 if (!isArray || isa<Constant>(numElementsVal = instr->getArraySize())) {
2409 // total size is constant: generate code for fixed-size alloca
2410 unsigned numElements = isArray?
2411 cast<ConstantUInt>(numElementsVal)->getValue() : 1;
2412 CreateCodeForFixedSizeAlloca(target, instr, tsize,
2415 // total size is not constant.
2416 CreateCodeForVariableSizeAlloca(target, instr, tsize,
2417 numElementsVal, mvec);
2422 case 61: // reg: Call
2423 { // Generate a direct (CALL) or indirect (JMPL) call.
2424 // Mark the return-address register, the indirection
2425 // register (for indirect calls), the operands of the Call,
2426 // and the return value (if any) as implicit operands
2427 // of the machine instruction.
2429 // If this is a varargs function, floating point arguments
2430 // have to passed in integer registers so insert
2431 // copy-float-to-int instructions for each float operand.
2433 CallInst *callInstr = cast<CallInst>(subtreeRoot->getInstruction());
2434 Value *callee = callInstr->getCalledValue();
2435 Function* calledFunc = dyn_cast<Function>(callee);
2437 // Check if this is an intrinsic function that needs a special code
2438 // sequence (e.g., va_start). Indirect calls cannot be special.
2440 bool specialIntrinsic = false;
2442 if (calledFunc && (iid=(Intrinsic::ID)calledFunc->getIntrinsicID()))
2443 specialIntrinsic = CodeGenIntrinsic(iid, *callInstr, target, mvec);
2445 // If not, generate the normal call sequence for the function.
2446 // This can also handle any intrinsics that are just function calls.
2448 if (! specialIntrinsic) {
2449 Function* currentFunc = callInstr->getParent()->getParent();
2450 MachineFunction& MF = MachineFunction::get(currentFunc);
2451 MachineCodeForInstruction& mcfi =
2452 MachineCodeForInstruction::get(callInstr);
2453 const SparcRegInfo& regInfo =
2454 (SparcRegInfo&) target.getRegInfo();
2455 const TargetFrameInfo& frameInfo = target.getFrameInfo();
2457 // Create hidden virtual register for return address with type void*
2458 TmpInstruction* retAddrReg =
2459 new TmpInstruction(mcfi, PointerType::get(Type::VoidTy), callInstr);
2461 // Generate the machine instruction and its operands.
2462 // Use CALL for direct function calls; this optimistically assumes
2463 // the PC-relative address fits in the CALL address field (22 bits).
2464 // Use JMPL for indirect calls.
2465 // This will be added to mvec later, after operand copies.
2467 MachineInstr* callMI;
2468 if (calledFunc) // direct function call
2469 callMI = BuildMI(V9::CALL, 1).addPCDisp(callee);
2470 else // indirect function call
2471 callMI = (BuildMI(V9::JMPLCALLi,3).addReg(callee)
2472 .addSImm((int64_t)0).addRegDef(retAddrReg));
2474 const FunctionType* funcType =
2475 cast<FunctionType>(cast<PointerType>(callee->getType())
2476 ->getElementType());
2477 bool isVarArgs = funcType->isVarArg();
2478 bool noPrototype = isVarArgs && funcType->getNumParams() == 0;
2480 // Use a descriptor to pass information about call arguments
2481 // to the register allocator. This descriptor will be "owned"
2482 // and freed automatically when the MachineCodeForInstruction
2483 // object for the callInstr goes away.
2484 CallArgsDescriptor* argDesc =
2485 new CallArgsDescriptor(callInstr, retAddrReg,isVarArgs,noPrototype);
2486 assert(callInstr->getOperand(0) == callee
2487 && "This is assumed in the loop below!");
2489 // Insert sign-extension instructions for small signed values,
2490 // if this is an unknown function (i.e., called via a funcptr)
2491 // or an external one (i.e., which may not be compiled by llc).
2493 if (calledFunc == NULL || calledFunc->isExternal()) {
2494 for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i) {
2495 Value* argVal = callInstr->getOperand(i);
2496 const Type* argType = argVal->getType();
2497 if (argType->isIntegral() && argType->isSigned()) {
2498 unsigned argSize = target.getTargetData().getTypeSize(argType);
2500 // create a temporary virtual reg. to hold the sign-extension
2501 TmpInstruction* argExtend = new TmpInstruction(mcfi, argVal);
2503 // sign-extend argVal and put the result in the temporary reg.
2504 target.getInstrInfo().CreateSignExtensionInstructions
2505 (target, currentFunc, argVal, argExtend,
2506 8*argSize, mvec, mcfi);
2508 // replace argVal with argExtend in CallArgsDescriptor
2509 argDesc->getArgInfo(i-1).replaceArgVal(argExtend);
2515 // Insert copy instructions to get all the arguments into
2516 // all the places that they need to be.
2518 for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i) {
2520 CallArgInfo& argInfo = argDesc->getArgInfo(argNo);
2521 Value* argVal = argInfo.getArgVal(); // don't use callInstr arg here
2522 const Type* argType = argVal->getType();
2523 unsigned regType = regInfo.getRegTypeForDataType(argType);
2524 unsigned argSize = target.getTargetData().getTypeSize(argType);
2525 int regNumForArg = TargetRegInfo::getInvalidRegNum();
2526 unsigned regClassIDOfArgReg;
2528 // Check for FP arguments to varargs functions.
2529 // Any such argument in the first $K$ args must be passed in an
2530 // integer register. If there is no prototype, it must also
2531 // be passed as an FP register.
2532 // K = #integer argument registers.
2533 bool isFPArg = argVal->getType()->isFloatingPoint();
2534 if (isVarArgs && isFPArg) {
2537 // It is a function with no prototype: pass value
2538 // as an FP value as well as a varargs value. The FP value
2539 // may go in a register or on the stack. The copy instruction
2540 // to the outgoing reg/stack is created by the normal argument
2541 // handling code since this is the "normal" passing mode.
2543 regNumForArg = regInfo.regNumForFPArg(regType,
2544 false, false, argNo,
2545 regClassIDOfArgReg);
2546 if (regNumForArg == regInfo.getInvalidRegNum())
2547 argInfo.setUseStackSlot();
2549 argInfo.setUseFPArgReg();
2552 // If this arg. is in the first $K$ regs, add special copy-
2553 // float-to-int instructions to pass the value as an int.
2554 // To check if it is in the first $K$, get the register
2555 // number for the arg #i. These copy instructions are
2556 // generated here because they are extra cases and not needed
2557 // for the normal argument handling (some code reuse is
2558 // possible though -- later).
2560 int copyRegNum = regInfo.regNumForIntArg(false, false, argNo,
2561 regClassIDOfArgReg);
2562 if (copyRegNum != regInfo.getInvalidRegNum()) {
2563 // Create a virtual register to represent copyReg. Mark
2564 // this vreg as being an implicit operand of the call MI
2565 const Type* loadTy = (argType == Type::FloatTy
2566 ? Type::IntTy : Type::LongTy);
2567 TmpInstruction* argVReg = new TmpInstruction(mcfi, loadTy,
2570 callMI->addImplicitRef(argVReg);
2572 // Get a temp stack location to use to copy
2573 // float-to-int via the stack.
2575 // FIXME: For now, we allocate permanent space because
2576 // the stack frame manager does not allow locals to be
2577 // allocated (e.g., for alloca) after a temp is
2580 // int tmpOffset = MF.getInfo()->pushTempValue(argSize);
2581 int tmpOffset = MF.getInfo()->allocateLocalVar(argVReg);
2583 // Generate the store from FP reg to stack
2584 unsigned StoreOpcode = ChooseStoreInstruction(argType);
2585 M = BuildMI(convertOpcodeFromRegToImm(StoreOpcode), 3)
2586 .addReg(argVal).addMReg(regInfo.getFramePointer())
2587 .addSImm(tmpOffset);
2590 // Generate the load from stack to int arg reg
2591 unsigned LoadOpcode = ChooseLoadInstruction(loadTy);
2592 M = BuildMI(convertOpcodeFromRegToImm(LoadOpcode), 3)
2593 .addMReg(regInfo.getFramePointer()).addSImm(tmpOffset)
2594 .addReg(argVReg, MachineOperand::Def);
2596 // Mark operand with register it should be assigned
2597 // both for copy and for the callMI
2598 M->SetRegForOperand(M->getNumOperands()-1, copyRegNum);
2599 callMI->SetRegForImplicitRef(callMI->getNumImplicitRefs()-1,
2603 // Add info about the argument to the CallArgsDescriptor
2604 argInfo.setUseIntArgReg();
2605 argInfo.setArgCopy(copyRegNum);
2607 // Cannot fit in first $K$ regs so pass arg on stack
2608 argInfo.setUseStackSlot();
2610 } else if (isFPArg) {
2611 // Get the outgoing arg reg to see if there is one.
2612 regNumForArg = regInfo.regNumForFPArg(regType, false, false,
2613 argNo, regClassIDOfArgReg);
2614 if (regNumForArg == regInfo.getInvalidRegNum())
2615 argInfo.setUseStackSlot();
2617 argInfo.setUseFPArgReg();
2618 regNumForArg =regInfo.getUnifiedRegNum(regClassIDOfArgReg,
2622 // Get the outgoing arg reg to see if there is one.
2623 regNumForArg = regInfo.regNumForIntArg(false,false,
2624 argNo, regClassIDOfArgReg);
2625 if (regNumForArg == regInfo.getInvalidRegNum())
2626 argInfo.setUseStackSlot();
2628 argInfo.setUseIntArgReg();
2629 regNumForArg =regInfo.getUnifiedRegNum(regClassIDOfArgReg,
2635 // Now insert copy instructions to stack slot or arg. register
2637 if (argInfo.usesStackSlot()) {
2638 // Get the stack offset for this argument slot.
2639 // FP args on stack are right justified so adjust offset!
2640 // int arguments are also right justified but they are
2641 // always loaded as a full double-word so the offset does
2642 // not need to be adjusted.
2643 int argOffset = frameInfo.getOutgoingArgOffset(MF, argNo);
2644 if (argType->isFloatingPoint()) {
2645 unsigned slotSize = frameInfo.getSizeOfEachArgOnStack();
2646 assert(argSize <= slotSize && "Insufficient slot size!");
2647 argOffset += slotSize - argSize;
2650 // Now generate instruction to copy argument to stack
2651 MachineOpCode storeOpCode =
2652 (argType->isFloatingPoint()
2653 ? ((argSize == 4)? V9::STFi : V9::STDFi) : V9::STXi);
2655 M = BuildMI(storeOpCode, 3).addReg(argVal)
2656 .addMReg(regInfo.getStackPointer()).addSImm(argOffset);
2659 else if (regNumForArg != regInfo.getInvalidRegNum()) {
2661 // Create a virtual register to represent the arg reg. Mark
2662 // this vreg as being an implicit operand of the call MI.
2663 TmpInstruction* argVReg =
2664 new TmpInstruction(mcfi, argVal, NULL, "argReg");
2666 callMI->addImplicitRef(argVReg);
2668 // Generate the reg-to-reg copy into the outgoing arg reg.
2669 // -- For FP values, create a FMOVS or FMOVD instruction
2670 // -- For non-FP values, create an add-with-0 instruction
2671 if (argType->isFloatingPoint())
2672 M=(BuildMI(argType==Type::FloatTy? V9::FMOVS :V9::FMOVD,2)
2673 .addReg(argVal).addReg(argVReg, MachineOperand::Def));
2675 M = (BuildMI(ChooseAddInstructionByType(argType), 3)
2676 .addReg(argVal).addSImm((int64_t) 0)
2677 .addReg(argVReg, MachineOperand::Def));
2679 // Mark the operand with the register it should be assigned
2680 M->SetRegForOperand(M->getNumOperands()-1, regNumForArg);
2681 callMI->SetRegForImplicitRef(callMI->getNumImplicitRefs()-1,
2687 assert(argInfo.getArgCopy() != regInfo.getInvalidRegNum() &&
2688 "Arg. not in stack slot, primary or secondary register?");
2691 // add call instruction and delay slot before copying return value
2692 mvec.push_back(callMI);
2693 mvec.push_back(BuildMI(V9::NOP, 0));
2695 // Add the return value as an implicit ref. The call operands
2696 // were added above. Also, add code to copy out the return value.
2697 // This is always register-to-register for int or FP return values.
2699 if (callInstr->getType() != Type::VoidTy) {
2700 // Get the return value reg.
2701 const Type* retType = callInstr->getType();
2703 int regNum = (retType->isFloatingPoint()
2704 ? (unsigned) SparcFloatRegClass::f0
2705 : (unsigned) SparcIntRegClass::o0);
2706 unsigned regClassID = regInfo.getRegClassIDOfType(retType);
2707 regNum = regInfo.getUnifiedRegNum(regClassID, regNum);
2709 // Create a virtual register to represent it and mark
2710 // this vreg as being an implicit operand of the call MI
2711 TmpInstruction* retVReg =
2712 new TmpInstruction(mcfi, callInstr, NULL, "argReg");
2714 callMI->addImplicitRef(retVReg, /*isDef*/ true);
2716 // Generate the reg-to-reg copy from the return value reg.
2717 // -- For FP values, create a FMOVS or FMOVD instruction
2718 // -- For non-FP values, create an add-with-0 instruction
2719 if (retType->isFloatingPoint())
2720 M = (BuildMI(retType==Type::FloatTy? V9::FMOVS : V9::FMOVD, 2)
2721 .addReg(retVReg).addReg(callInstr, MachineOperand::Def));
2723 M = (BuildMI(ChooseAddInstructionByType(retType), 3)
2724 .addReg(retVReg).addSImm((int64_t) 0)
2725 .addReg(callInstr, MachineOperand::Def));
2727 // Mark the operand with the register it should be assigned
2728 // Also mark the implicit ref of the call defining this operand
2729 M->SetRegForOperand(0, regNum);
2730 callMI->SetRegForImplicitRef(callMI->getNumImplicitRefs()-1,regNum);
2735 // For the CALL instruction, the ret. addr. reg. is also implicit
2736 if (isa<Function>(callee))
2737 callMI->addImplicitRef(retAddrReg, /*isDef*/ true);
2739 MF.getInfo()->popAllTempValues(); // free temps used for this inst
2745 case 62: // reg: Shl(reg, reg)
2747 Value* argVal1 = subtreeRoot->leftChild()->getValue();
2748 Value* argVal2 = subtreeRoot->rightChild()->getValue();
2749 Instruction* shlInstr = subtreeRoot->getInstruction();
2751 const Type* opType = argVal1->getType();
2752 assert((opType->isInteger() || isa<PointerType>(opType)) &&
2753 "Shl unsupported for other types");
2754 unsigned opSize = target.getTargetData().getTypeSize(opType);
2756 CreateShiftInstructions(target, shlInstr->getParent()->getParent(),
2757 (opSize > 4)? V9::SLLXr6:V9::SLLr5,
2758 argVal1, argVal2, 0, shlInstr, mvec,
2759 MachineCodeForInstruction::get(shlInstr));
2763 case 63: // reg: Shr(reg, reg)
2765 const Type* opType = subtreeRoot->leftChild()->getValue()->getType();
2766 assert((opType->isInteger() || isa<PointerType>(opType)) &&
2767 "Shr unsupported for other types");
2768 unsigned opSize = target.getTargetData().getTypeSize(opType);
2769 Add3OperandInstr(opType->isSigned()
2770 ? (opSize > 4? V9::SRAXr6 : V9::SRAr5)
2771 : (opSize > 4? V9::SRLXr6 : V9::SRLr5),
2776 case 64: // reg: Phi(reg,reg)
2777 break; // don't forward the value
2779 case 65: // reg: VANext(reg): the va_next(va_list, type) instruction
2780 { // Increment the va_list pointer register according to the type.
2781 // All LLVM argument types are <= 64 bits, so use one doubleword.
2782 Instruction* vaNextI = subtreeRoot->getInstruction();
2783 assert(target.getTargetData().getTypeSize(vaNextI->getType()) <= 8 &&
2784 "We assumed that all LLVM parameter types <= 8 bytes!");
2785 int argSize = target.getFrameInfo().getSizeOfEachArgOnStack();
2786 mvec.push_back(BuildMI(V9::ADDi, 3).addReg(vaNextI->getOperand(0)).
2787 addSImm(argSize).addRegDef(vaNextI));
2791 case 66: // reg: VAArg (reg): the va_arg instruction
2792 { // Load argument from stack using current va_list pointer value.
2793 // Use 64-bit load for all non-FP args, and LDDF or double for FP.
2794 Instruction* vaArgI = subtreeRoot->getInstruction();
2795 MachineOpCode loadOp = (vaArgI->getType()->isFloatingPoint()
2796 ? (vaArgI->getType() == Type::FloatTy
2797 ? V9::LDFi : V9::LDDFi)
2799 mvec.push_back(BuildMI(loadOp, 3).addReg(vaArgI->getOperand(0)).
2800 addSImm(0).addRegDef(vaArgI));
2804 case 71: // reg: VReg
2805 case 72: // reg: Constant
2806 break; // don't forward the value
2809 assert(0 && "Unrecognized BURG rule");
2814 if (forwardOperandNum >= 0) {
2815 // We did not generate a machine instruction but need to use operand.
2816 // If user is in the same tree, replace Value in its machine operand.
2817 // If not, insert a copy instruction which should get coalesced away
2818 // by register allocation.
2819 if (subtreeRoot->parent() != NULL)
2820 ForwardOperand(subtreeRoot, subtreeRoot->parent(), forwardOperandNum);
2822 std::vector<MachineInstr*> minstrVec;
2823 Instruction* instr = subtreeRoot->getInstruction();
2824 target.getInstrInfo().
2825 CreateCopyInstructionsByType(target,
2826 instr->getParent()->getParent(),
2827 instr->getOperand(forwardOperandNum),
2829 MachineCodeForInstruction::get(instr));
2830 assert(minstrVec.size() > 0);
2831 mvec.insert(mvec.end(), minstrVec.begin(), minstrVec.end());
2835 if (maskUnsignedResult) {
2836 // If result is unsigned and smaller than int reg size,
2837 // we need to clear high bits of result value.
2838 assert(forwardOperandNum < 0 && "Need mask but no instruction generated");
2839 Instruction* dest = subtreeRoot->getInstruction();
2840 if (dest->getType()->isUnsigned()) {
2841 unsigned destSize=target.getTargetData().getTypeSize(dest->getType());
2842 if (destSize <= 4) {
2843 // Mask high 64 - N bits, where N = 4*destSize.
2845 // Use a TmpInstruction to represent the
2846 // intermediate result before masking. Since those instructions
2847 // have already been generated, go back and substitute tmpI
2848 // for dest in the result position of each one of them.
2850 MachineCodeForInstruction& mcfi = MachineCodeForInstruction::get(dest);
2851 TmpInstruction *tmpI = new TmpInstruction(mcfi, dest->getType(),
2852 dest, NULL, "maskHi");
2853 Value* srlArgToUse = tmpI;
2855 unsigned numSubst = 0;
2856 for (unsigned i=0, N=mvec.size(); i < N; ++i) {
2858 // Make sure we substitute all occurrences of dest in these instrs.
2859 // Otherwise, we will have bogus code.
2860 bool someArgsWereIgnored = false;
2862 // Make sure not to substitute an upwards-exposed use -- that would
2863 // introduce a use of `tmpI' with no preceding def. Therefore,
2864 // substitute a use or def-and-use operand only if a previous def
2865 // operand has already been substituted (i.e., numSusbt > 0).
2867 numSubst += mvec[i]->substituteValue(dest, tmpI,
2868 /*defsOnly*/ numSubst == 0,
2869 /*notDefsAndUses*/ numSubst > 0,
2870 someArgsWereIgnored);
2871 assert(!someArgsWereIgnored &&
2872 "Operand `dest' exists but not replaced: probably bogus!");
2874 assert(numSubst > 0 && "Operand `dest' not replaced: probably bogus!");
2876 // Left shift 32-N if size (N) is less than 32 bits.
2877 // Use another tmp. virtual register to represent this result.
2879 srlArgToUse = new TmpInstruction(mcfi, dest->getType(),
2880 tmpI, NULL, "maskHi2");
2881 mvec.push_back(BuildMI(V9::SLLXi6, 3).addReg(tmpI)
2882 .addZImm(8*(4-destSize))
2883 .addReg(srlArgToUse, MachineOperand::Def));
2886 // Logical right shift 32-N to get zero extension in top 64-N bits.
2887 mvec.push_back(BuildMI(V9::SRLi5, 3).addReg(srlArgToUse)
2888 .addZImm(8*(4-destSize))
2889 .addReg(dest, MachineOperand::Def));
2891 } else if (destSize < 8) {
2892 assert(0 && "Unsupported type size: 32 < size < 64 bits");