MVT VT = Value.getSimpleValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
+ case TargetLowering::Legal: {
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ unsigned AS = ST->getAddressSpace();
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT(), AS)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
DAG, TLI, this);
}
break;
+ }
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode())
switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(),
StVT.getSimpleVT())) {
default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
+ case TargetLowering::Legal: {
+ unsigned AS = ST->getAddressSpace();
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT(), AS)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
break;
+ }
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode())
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
+ case TargetLowering::Legal: {
+ unsigned AS = LD->getAddressSpace();
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT(), AS)) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment =
TLI.getDataLayout()->getABITypeAlignment(Ty);
}
}
break;
+ }
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(RVal, DAG);
if (Res.getNode()) {
} else {
// If this is an unaligned load and the target doesn't support
// it, expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ EVT MemVT = LD->getMemoryVT();
+ unsigned AS = LD->getAddressSpace();
+ if (!TLI.allowsUnalignedMemoryAccesses(MemVT, AS)) {
Type *Ty =
LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment =
DAG.getMachineFunction());
if (VT == MVT::Other) {
- if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
- TLI.allowsUnalignedMemoryAccesses(VT)) {
+ unsigned AS = 0;
+ if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
+ TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
VT = TLI.getPointerTy();
} else {
switch (DstAlign & 7) {
// FIXME: Only does this for 64-bit or more since we don't have proper
// cost model for unaligned load / store.
bool Fast;
+ unsigned AS = 0;
if (NumMemOps && AllowOverlap &&
VTSize >= 8 && NewVTSize < Size &&
- TLI.allowsUnalignedMemoryAccesses(VT, 0, &Fast) && Fast)
+ TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
VTSize = Size;
else {
VT = NewVT;
// bloat the code.
const TargetLowering *TLI = TM.getTargetLowering();
if (ActuallyDoIt && CSize->getZExtValue() > 4) {
+ unsigned DstAS = LHS->getType()->getPointerAddressSpace();
+ unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
- if (!TLI->isTypeLegal(LoadVT) ||!TLI->allowsUnalignedMemoryAccesses(LoadVT))
+ if (!TLI->isTypeLegal(LoadVT) ||
+ !TLI->allowsUnalignedMemoryAccesses(LoadVT, SrcAS) ||
+ !TLI->allowsUnalignedMemoryAccesses(LoadVT, DstAS))
ActuallyDoIt = false;
}