From 2ecc72cc58bc647968dec40888320263645c92d9 Mon Sep 17 00:00:00 2001 From: Evgeny Astigeevich Date: Mon, 15 Jun 2015 15:48:44 +0000 Subject: [PATCH] On behalf of Alexandros Lamprineas: LLVM targeting aarch64 doesn't correctly produce aligned accesses for non-aligned data at -O0/fast-isel (-mno-unaligned-access). The root cause seems to be in fast-isel not producing unaligned access correctly for -mno-unaligned-access. The patch just aborts fast-isel for loads and stores when -mno-unaligned-access is present. The regression test is updated to check this new test case (-mno-unaligned-access together with fast-isel). Differential Revision: http://reviews.llvm.org/D10360 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@239732 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64FastISel.cpp | 6 ++++++ test/CodeGen/AArch64/arm64-strict-align.ll | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index 9977e2b84a7..d1523e8548e 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -1678,6 +1678,9 @@ unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, bool WantZExt, MachineMemOperand *MMO) { + if(!TLI.allowsMisalignedMemoryAccesses(VT)) + return 0; + // Simplify this down to something we can handle. if (!simplifyAddress(Addr, VT)) return 0; @@ -1962,6 +1965,9 @@ bool AArch64FastISel::selectLoad(const Instruction *I) { bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, MachineMemOperand *MMO) { + if(!TLI.allowsMisalignedMemoryAccesses(VT)) + return false; + // Simplify this down to something we can handle. if (!simplifyAddress(Addr, VT)) return false; diff --git a/test/CodeGen/AArch64/arm64-strict-align.ll b/test/CodeGen/AArch64/arm64-strict-align.ll index b707527f3c0..109f4115d80 100644 --- a/test/CodeGen/AArch64/arm64-strict-align.ll +++ b/test/CodeGen/AArch64/arm64-strict-align.ll @@ -1,6 +1,7 @@ ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s ; RUN: llc < %s -mtriple=arm64-apple-darwin -aarch64-no-strict-align | FileCheck %s ; RUN: llc < %s -mtriple=arm64-apple-darwin -aarch64-strict-align | FileCheck %s --check-prefix=CHECK-STRICT +; RUN: llc < %s -mtriple=arm64-apple-darwin -aarch64-strict-align -fast-isel | FileCheck %s --check-prefix=CHECK-STRICT define i32 @f0(i32* nocapture %p) nounwind { ; CHECK-STRICT: ldrh [[HIGH:w[0-9]+]], [x0, #2] -- 2.34.1