From ec4471006306c1239c4c3a10ba083506e11ab53b Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Wed, 2 Sep 2015 15:42:49 +0000 Subject: [PATCH] [x86] fix allowsMisalignedMemoryAccesses() for 8-byte and smaller accesses This is a continuation of the fix from: http://reviews.llvm.org/D10662 and discussion in: http://reviews.llvm.org/D12154 Here, we distinguish slow unaligned SSE (128-bit) accesses from slow unaligned scalar (64-bit and under) accesses. Other lowering (eg, getOptimalMemOpType) assumes that unaligned scalar accesses are always ok, so this changes allowsMisalignedMemoryAccesses() to match that behavior. Differential Revision: http://reviews.llvm.org/D12543 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@246658 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 18 +++++++++++++----- test/CodeGen/X86/memcpy-2.ll | 26 +++++++++++--------------- test/CodeGen/X86/pr11985.ll | 22 ++++++++-------------- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b715cd5931c..fc6a904a593 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1923,13 +1923,21 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, bool *Fast) const { if (Fast) { - if (VT.getSizeInBits() == 256) - *Fast = !Subtarget->isUnalignedMem32Slow(); - else - // FIXME: We should always return that 8-byte and under accesses are fast. - // That is what other x86 lowering code assumes. + switch (VT.getSizeInBits()) { + default: + // 8-byte and under are always assumed to be fast. + *Fast = true; + break; + case 128: *Fast = !Subtarget->isUnalignedMem16Slow(); + break; + case 256: + *Fast = !Subtarget->isUnalignedMem32Slow(); + break; + // TODO: What about AVX-512 (512-bit) accesses? + } } + // Misaligned accesses of any size are always allowed. return true; } diff --git a/test/CodeGen/X86/memcpy-2.ll b/test/CodeGen/X86/memcpy-2.ll index 1d3033fd77b..7ef61c9a677 100644 --- a/test/CodeGen/X86/memcpy-2.ll +++ b/test/CodeGen/X86/memcpy-2.ll @@ -5,15 +5,6 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=X86-64 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s -check-prefix=NHM_64 -;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "t4" test case. -;;; -;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores -;;; are certainly unaligned and (2) the first load and first store overlap with the second -;;; load and second store respectively. -;;; -;;; Is either of the sequences ideal? -;;; Is the ideal code being generated for all CPU models? - @.str = internal constant [25 x i8] c"image\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" @.str2 = internal constant [30 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 4 @@ -190,13 +181,18 @@ entry: ; NOSSE: movl $2021161080 ; NOSSE: movl $2021161080 +;;; TODO: (1) Some of the loads and stores are certainly unaligned and (2) the first load and first +;;; store overlap with the second load and second store respectively. +;;; +;;; Is either of the sequences ideal? + ; X86-64-LABEL: t4: -; X86-64: movabsq $8680820740569200760, %rax -; X86-64: movq %rax -; X86-64: movq %rax -; X86-64: movq %rax -; X86-64: movw $120 -; X86-64: movl $2021161080 +; X86-64: movabsq $33909456017848440, %rax ## imm = 0x78787878787878 +; X86-64: movq %rax, -10(%rsp) +; X86-64: movabsq $8680820740569200760, %rax ## imm = 0x7878787878787878 +; X86-64: movq %rax, -16(%rsp) +; X86-64: movq %rax, -24(%rsp) +; X86-64: movq %rax, -32(%rsp) ; NHM_64-LABEL: t4: ; NHM_64: movups _.str2+14(%rip), %xmm0 diff --git a/test/CodeGen/X86/pr11985.ll b/test/CodeGen/X86/pr11985.ll index 1adf6d42347..aae00de112d 100644 --- a/test/CodeGen/X86/pr11985.ll +++ b/test/CodeGen/X86/pr11985.ll @@ -1,26 +1,20 @@ ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=prescott | FileCheck %s --check-prefix=PRESCOTT ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=nehalem | FileCheck %s --check-prefix=NEHALEM -;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "foo" test case. -;;; -;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores -;;; are certainly unaligned and (2) the first load and first store overlap with the second -;;; load and second store respectively. +;;; TODO: (1) Some of the loads and stores are certainly unaligned and (2) the first load and first +;;; store overlap with the second load and second store respectively. ;;; ;;; Is either of these sequences ideal? -;;; Is the ideal code being generated for all CPU models? define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable { ; PRESCOTT-LABEL: foo: ; PRESCOTT: # BB#0: # %entry -; PRESCOTT-NEXT: movw .Ltmp0+20(%rip), %ax -; PRESCOTT-NEXT: movw %ax, 20(%rdi) -; PRESCOTT-NEXT: movl .Ltmp0+16(%rip), %eax -; PRESCOTT-NEXT: movl %eax, 16(%rdi) -; PRESCOTT-NEXT: movq .Ltmp0+8(%rip), %rax -; PRESCOTT-NEXT: movq %rax, 8(%rdi) -; PRESCOTT-NEXT: movq .Ltmp0(%rip), %rax -; PRESCOTT-NEXT: movq %rax, (%rdi) +; PRESCOTT-NEXT: movq .Ltmp0+14(%rip), %rax +; PRESCOTT-NEXT: movq %rax, 14(%rdi) +; PRESCOTT-NEXT: movq .Ltmp0+8(%rip), %rax +; PRESCOTT-NEXT: movq %rax, 8(%rdi) +; PRESCOTT-NEXT: movq .Ltmp0(%rip), %rax +; PRESCOTT-NEXT: movq %rax, (%rdi) ; ; NEHALEM-LABEL: foo: ; NEHALEM: # BB#0: # %entry -- 2.34.1