#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
MipsMCInstLower MCInstLowering(Mang, *MF, *this);
MCInst TmpInst0;
MCInstLowering.Lower(MI, TmpInst0);
+ unsigned Opc = MI->getOpcode();
+
+ // Convert aligned loads/stores to their unaligned counterparts.
+ // FIXME: expand other unaligned memory accesses too.
+ if ((Opc == Mips::LW || Opc == Mips::SW) && !MI->memoperands_empty() &&
+ (*MI->memoperands_begin())->getAlignment() < 4) {
+ MCInst Directive;
+ Directive.setOpcode(Mips::MACRO);
+ OutStreamer.EmitInstruction(Directive);
+ TmpInst0.setOpcode(Opc == Mips::LW ? Mips::ULW : Mips::USW);
+ OutStreamer.EmitInstruction(TmpInst0);
+ Directive.setOpcode(Mips::NOMACRO);
+ OutStreamer.EmitInstruction(Directive);
+ return;
+ }
+
OutStreamer.EmitInstruction(TmpInst0);
}
setExceptionSelectorRegister(Mips::A1);
}
+bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
+ // FIXME: allow unaligned memory accesses for other types too.
+ return VT.getSimpleVT().SimpleTy == MVT::i32;
+}
+
MVT::SimpleValueType MipsTargetLowering::getSetCCResultType(EVT VT) const {
return MVT::i32;
}
unsigned NumWords = (Flags.getByValSize() + 3) / 4;
unsigned LastWord = FirstWord + NumWords;
unsigned CurWord;
+ unsigned ByValAlign = Flags.getByValAlign();
// copy the first 4 words of byval arg to registers A0 - A3
for (CurWord = FirstWord; CurWord < std::min(LastWord, O32IntRegsSize);
MVT::i32));
SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, std::min(ByValAlign,
+ (unsigned )4));
MemOpChains.push_back(LoadVal.getValue(1));
unsigned DstReg = O32IntRegs[CurWord];
RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
SDValue Dst = DAG.getFrameIndex(LastFI, PtrType);
Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
DAG.getConstant(SizeInBytes, MVT::i32),
- /*Align*/4,
+ /*Align*/ByValAlign,
/*isVolatile=*/false, /*AlwaysInline=*/false,
MachinePointerInfo(0), MachinePointerInfo(0));
MemOpChains.push_back(Chain);
public:
explicit MipsTargetLowering(MipsTargetMachine &TM);
+ virtual bool allowsUnalignedMemoryAccesses (EVT VT) const;
+
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, "32">;
}
+// Unaligned memory load and store.
+// Replaces LW or SW during MCInstLowering if memory access is unaligned.
+def ULW :
+ MipsPseudo<(outs CPURegs:$dst), (ins mem:$addr), "ulw\t$dst, $addr", []>;
+def USW :
+ MipsPseudo<(outs), (ins CPURegs:$dst, mem:$addr), "usw\t$dst, $addr", []>;
+
//===----------------------------------------------------------------------===//
// Instruction definition
//===----------------------------------------------------------------------===//
--- /dev/null
+; RUN: llc -march=mips < %s | FileCheck %s
+
+%struct.S2 = type { %struct.S1, %struct.S1 }
+%struct.S1 = type { i8, i8 }
+
+@s2 = common global %struct.S2 zeroinitializer, align 1
+
+define void @foo1() nounwind {
+entry:
+; CHECK: ulw ${{[0-9]+}}, 2
+
+ tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2* @s2, i32 0, i32 1)) nounwind
+ ret void
+}
+
+declare void @foo2(%struct.S1* byval)