projects
/
oota-llvm.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[x86] invert logic for attribute 'FeatureFastUAMem'
[oota-llvm.git]
/
lib
/
Target
/
X86
/
X86ISelLowering.cpp
diff --git
a/lib/Target/X86/X86ISelLowering.cpp
b/lib/Target/X86/X86ISelLowering.cpp
index 9f1bf6ddeb076c5309636c0cb73de9ae2b7c0237..430ee22916c01f8f3a6d8b47810a2aa670269adc 100644
(file)
--- a/
lib/Target/X86/X86ISelLowering.cpp
+++ b/
lib/Target/X86/X86ISelLowering.cpp
@@
-1876,10
+1876,11
@@
X86TargetLowering::getOptimalMemOpType(uint64_t Size,
if ((!IsMemset || ZeroMemset) &&
!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
if ((!IsMemset || ZeroMemset) &&
!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
- (
Subtarget->isUnalignedMemAccessFast
() ||
+ (
!Subtarget->isUnalignedMemUnder32Slow
() ||
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16)))) {
if (Size >= 32) {
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16)))) {
if (Size >= 32) {
+ // FIXME: Check if unaligned 32-byte accesses are slow.
if (Subtarget->hasInt256())
return MVT::v8i32;
if (Subtarget->hasFp256())
if (Subtarget->hasInt256())
return MVT::v8i32;
if (Subtarget->hasFp256())
@@
-1897,6
+1898,9
@@
X86TargetLowering::getOptimalMemOpType(uint64_t Size,
return MVT::f64;
}
}
return MVT::f64;
}
}
+ // This is a compromise. If we reach here, unaligned accesses may be slow on
+ // this target. However, creating smaller, aligned accesses could be even
+ // slower and would certainly be a lot more code.
if (Subtarget->is64Bit() && Size >= 8)
return MVT::i64;
return MVT::i32;
if (Subtarget->is64Bit() && Size >= 8)
return MVT::i64;
return MVT::i32;
@@
-1916,12
+1920,10
@@
X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned,
bool *Fast) const {
if (Fast) {
unsigned,
bool *Fast) const {
if (Fast) {
- // FIXME: We should be checking 128-bit accesses separately from smaller
- // accesses.
if (VT.getSizeInBits() == 256)
*Fast = !Subtarget->isUnalignedMem32Slow();
else
if (VT.getSizeInBits() == 256)
*Fast = !Subtarget->isUnalignedMem32Slow();
else
- *Fast =
Subtarget->isUnalignedMemAccessFast
();
+ *Fast =
!Subtarget->isUnalignedMemUnder32Slow
();
}
return true;
}
}
return true;
}