#include "llvm/Transforms/Scalar.h"
using namespace llvm;
-static cl::opt<bool>
-EnableGlobalMerge("global-merge", cl::Hidden,
- cl::desc("Enable global merge pass"),
- cl::init(true));
-
static cl::opt<bool>
DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden,
cl::desc("Inhibit optimization of S->D register accesses on A15"),
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
+ Subtarget(TT, CPU, FS, Options),
JITInfo(),
InstrItins(Subtarget.getInstrItineraryData()) {
- // Default to soft float ABI
+
+ // Default to triple-appropriate float ABI
if (Options.FloatABIType == FloatABI::Default)
- this->Options.FloatABIType = FloatABI::Soft;
+ this->Options.FloatABIType =
+ Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft;
}
void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// Add first the target-independent BasicTTI pass, then our ARM pass. This
// allows the ARM pass to delegate to the target independent layer when
// appropriate.
- PM.add(createBasicTargetTransformInfoPass(getTargetLowering()));
+ PM.add(createBasicTargetTransformInfoPass(this));
PM.add(createARMTargetTransformInfoPass(this));
}
void ARMTargetMachine::anchor() { }
+static std::string computeDataLayout(ARMSubtarget &ST) {
+ // Little endian.
+ std::string Ret = "e";
+
+ Ret += DataLayout::getManglingComponent(ST.getTargetTriple());
+
+ // Pointers are 32 bits and aligned to 32 bits.
+ Ret += "-p:32:32";
+
+ // On thumb, i16,i18 and i1 have natural aligment requirements, but we try to
+ // align to 32.
+ if (ST.isThumb())
+ Ret += "-i1:8:32-i8:8:32-i16:16:32";
+
+ // ABIs other than APC have 64 bit integers with natural alignment.
+ if (!ST.isAPCS_ABI())
+ Ret += "-i64:64";
+
+ // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
+ // bits, others to 64 bits. We always try to align to 64 bits.
+ if (ST.isAPCS_ABI())
+ Ret += "-f64:32:64";
+
+ // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
+ // to 64. We always ty to give them natural alignment.
+ if (ST.isAPCS_ABI())
+ Ret += "-v64:32:64-v128:32:128";
+ else
+ Ret += "-v128:64:128";
+
+ // On thumb and APCS, only try to align aggregates to 32 bits (the default is
+ // 64 bits).
+ if (ST.isThumb() || ST.isAPCS_ABI())
+ Ret += "-a:0:32";
+
+ // Integer registers are 32 bits.
+ Ret += "-n32";
+
+ // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
+ // aligned everywhere else.
+ if (ST.isTargetNaCl())
+ Ret += "-S128";
+ else if (ST.isAAPCS_ABI())
+ Ret += "-S64";
+ else
+ Ret += "-S32";
+
+ return Ret;
+}
+
ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
CodeGenOpt::Level OL)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
InstrInfo(Subtarget),
- DL(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:64-i64:32:64-"
- "v128:32:128-v64:32:64-n32-S32") :
- Subtarget.isAAPCS_ABI() ?
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "v128:64:128-v64:64:64-n32-S64") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "v128:64:128-v64:64:64-n32-S32")),
+ DL(computeDataLayout(Subtarget)),
TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget) {
+ initAsmInfo();
if (!Subtarget.hasARMOps())
report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
"support ARM mode execution!");
InstrInfo(Subtarget.hasThumb2()
? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
: ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
- DL(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:64-i64:32:64-"
- "i16:16:32-i8:8:32-i1:8:32-"
- "v128:32:128-v64:32:64-a:0:32-n32-S32") :
- Subtarget.isAAPCS_ABI() ?
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "i16:16:32-i8:8:32-i1:8:32-"
- "v128:64:128-v64:64:64-a:0:32-n32-S64") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "i16:16:32-i8:8:32-i1:8:32-"
- "v128:64:128-v64:64:64-a:0:32-n32-S32")),
+ DL(computeDataLayout(Subtarget)),
TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget.hasThumb2()
? new ARMFrameLowering(Subtarget)
: (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
+ initAsmInfo();
}
namespace {
}
bool ARMPassConfig::addPreISel() {
- if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge)
- addPass(createGlobalMergePass(TM->getTargetLowering()));
+ if (TM->getOptLevel() != CodeGenOpt::None)
+ addPass(createGlobalMergePass(TM));
return false;
}
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
addPass(createARMLoadStoreOptimizationPass(true));
- if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isLikeA9())
+ if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
addPass(createMLxExpansionPass());
// Since the A15SDOptimizer pass can insert VDUP instructions, it can only be
// enabled when NEON is available.
addPass(createARMLoadStoreOptimizationPass());
printAndVerify("After ARM load / store optimizer");
}
- if ((DisableA15SDOptimization || !getARMSubtarget().isCortexA15()) &&
- getARMSubtarget().hasNEON())
+ if (getARMSubtarget().hasNEON())
addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
}
addPass(createARMExpandPseudoPass());
if (getOptLevel() != CodeGenOpt::None) {
- if (!getARMSubtarget().isThumb1Only())
+ if (!getARMSubtarget().isThumb1Only()) {
+ // in v8, IfConversion depends on Thumb instruction widths
+ if (getARMSubtarget().restrictIT() &&
+ !getARMSubtarget().prefers32BitThumb())
+ addPass(createThumb2SizeReductionPass());
addPass(&IfConverterID);
+ }
}
if (getARMSubtarget().isThumb2())
addPass(createThumb2ITBlockPass());