From: Chad Rosier Date: Sat, 3 Dec 2011 02:21:57 +0000 (+0000) Subject: [arm-fast-isel] Unaligned stores of floats require special care. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=9eff1e33f616ad2d0134740ac4595ed2e79e3d74;p=oota-llvm.git [arm-fast-isel] Unaligned stores of floats require special care. rdar://10510150 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145742 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index f43f084c03a..63f3d454870 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -181,7 +181,7 @@ class ARMFastISel : public FastISel { bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, bool isZExt, bool allocReg); - bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); + bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, unsigned Alignment = 0); bool ARMComputeAddress(const Value *Obj, Address &Addr); void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); bool ARMIsMemCpySmall(uint64_t Len); @@ -1053,7 +1053,7 @@ bool ARMFastISel::SelectLoad(const Instruction *I) { return true; } -bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { +bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, unsigned Alignment) { unsigned StrOpc; bool useAM3 = false; switch (VT.getSimpleVT().SimpleTy) { @@ -1102,9 +1102,23 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { case MVT::f32: if (!Subtarget->hasVFP2()) return false; StrOpc = ARM::VSTRS; + // Unaligned stores need special handling. + if (Alignment && Alignment < 4) { + unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(ARM::VMOVRS), MoveReg) + .addReg(SrcReg)); + SrcReg = MoveReg; + VT = MVT::i32; + StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; + } break; case MVT::f64: if (!Subtarget->hasVFP2()) return false; + // FIXME: Unaligned stores need special handling. + if (Alignment && Alignment < 8) { + return false; + } StrOpc = ARM::VSTRD; break; } @@ -1141,7 +1155,8 @@ bool ARMFastISel::SelectStore(const Instruction *I) { if (!ARMComputeAddress(I->getOperand(1), Addr)) return false; - if (!ARMEmitStore(VT, SrcReg, Addr)) return false; + if (!ARMEmitStore(VT, SrcReg, Addr, cast(I)->getAlignment())) + return false; return true; } diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll index 465e85f9a83..f9c1a9d7d35 100644 --- a/test/CodeGen/ARM/fast-isel.ll +++ b/test/CodeGen/ARM/fast-isel.ll @@ -158,3 +158,25 @@ define void @test4() { ; ARM: ldr r1, [r1] ; ARM: str r0, [r1] } + +; Check unaligned stores +%struct.anon = type <{ float }> + +@a = common global %struct.anon* null, align 4 + +define void @unaligned_store(float %x, float %y) nounwind { +entry: +; ARM: @unaligned_store +; ARM: vmov r1, s0 +; ARM: str r1, [r0] + +; THUMB: @unaligned_store +; THUMB: vmov r1, s0 +; THUMB: str r1, [r0] + + %add = fadd float %x, %y + %0 = load %struct.anon** @a, align 4 + %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0 + store float %add, float* %x1, align 1 + ret void +}