From 5cad12d12a823d258fc60e3975ffa142d0e190ef Mon Sep 17 00:00:00 2001 From: Hal Finkel Date: Sat, 3 Aug 2013 12:25:10 +0000 Subject: [PATCH] Fix PPC64 64-bit GPR inline asm constraint matching Internally, the PowerPC backend names the 32-bit GPRs R[0-9]+, and names the 64-bit parent GPRs X[0-9]+. When matching inline assembly constraints with explicit register names, on PPC64 when an i64 MVT has been requested, we need to follow gcc's convention of using r[0-9]+ to refer to the 64-bit (parent) registers. At some point, we'll probably want to arrange things so that the generic code in TargetLowering uses the AsmName fields declared in *RegisterInfo.td in order to match these inline asm register constraints. If we do that, this change can be reverted. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187693 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/PowerPC/PPCISelLowering.cpp | 19 ++++++- test/CodeGen/PowerPC/inlineasm-i64-reg.ll | 65 +++++++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 test/CodeGen/PowerPC/inlineasm-i64-reg.ll diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index aadab824ba5..664dd12573a 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -7632,7 +7632,24 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, } } - return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); + std::pair R = + TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); + + // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers + // (which we call X[0-9]+). If a 64-bit value has been requested, and a + // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent + // register. + // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use + // the AsmName field from *RegisterInfo.td, then this would not be necessary. + if (R.first && VT == MVT::i64 && PPCSubTarget.isPPC64() && + PPC::GPRCRegClass.contains(R.first)) { + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + return std::make_pair(TRI->getMatchingSuperReg(R.first, + PPC::sub_32, &PPC::GPRCRegClass), + &PPC::G8RCRegClass); + } + + return R; } diff --git a/test/CodeGen/PowerPC/inlineasm-i64-reg.ll b/test/CodeGen/PowerPC/inlineasm-i64-reg.ll new file mode 100644 index 00000000000..fa9aa45b945 --- /dev/null +++ b/test/CodeGen/PowerPC/inlineasm-i64-reg.ll @@ -0,0 +1,65 @@ +; RUN: llc -mtriple=powerpc64-bgq-linux -mcpu=a2 < %s | FileCheck %s +target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" +target triple = "powerpc64-bgq-linux" + +%struct.BG_CoordinateMapping_t = type { [4 x i8] } + +; Function Attrs: alwaysinline inlinehint nounwind +define zeroext i32 @Kernel_RanksToCoords(i64 %mapsize, %struct.BG_CoordinateMapping_t* %map, i64* %numentries) #0 { +entry: + %mapsize.addr = alloca i64, align 8 + %map.addr = alloca %struct.BG_CoordinateMapping_t*, align 8 + %numentries.addr = alloca i64*, align 8 + %r0 = alloca i64, align 8 + %r3 = alloca i64, align 8 + %r4 = alloca i64, align 8 + %r5 = alloca i64, align 8 + %tmp = alloca i64, align 8 + store i64 %mapsize, i64* %mapsize.addr, align 8 + store %struct.BG_CoordinateMapping_t* %map, %struct.BG_CoordinateMapping_t** %map.addr, align 8 + store i64* %numentries, i64** %numentries.addr, align 8 + store i64 1055, i64* %r0, align 8 + %0 = load i64* %mapsize.addr, align 8 + store i64 %0, i64* %r3, align 8 + %1 = load %struct.BG_CoordinateMapping_t** %map.addr, align 8 + %2 = ptrtoint %struct.BG_CoordinateMapping_t* %1 to i64 + store i64 %2, i64* %r4, align 8 + %3 = load i64** %numentries.addr, align 8 + %4 = ptrtoint i64* %3 to i64 + store i64 %4, i64* %r5, align 8 + %5 = load i64* %r0, align 8 + %6 = load i64* %r3, align 8 + %7 = load i64* %r4, align 8 + %8 = load i64* %r5, align 8 + %9 = call { i64, i64, i64, i64 } asm sideeffect "sc", "={r0},={r3},={r4},={r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 %5, i64 %6, i64 %7, i64 %8) #1, !srcloc !0 + +; CHECK-LABEL: @Kernel_RanksToCoords + +; These need to be 64-bit loads, not 32-bit loads (not lwz). +; CHECK-NOT: lwz + +; CHECK: #APP +; CHECK: sc +; CHECK: #NO_APP + +; CHECK: blr + + %asmresult = extractvalue { i64, i64, i64, i64 } %9, 0 + %asmresult1 = extractvalue { i64, i64, i64, i64 } %9, 1 + %asmresult2 = extractvalue { i64, i64, i64, i64 } %9, 2 + %asmresult3 = extractvalue { i64, i64, i64, i64 } %9, 3 + store i64 %asmresult, i64* %r0, align 8 + store i64 %asmresult1, i64* %r3, align 8 + store i64 %asmresult2, i64* %r4, align 8 + store i64 %asmresult3, i64* %r5, align 8 + %10 = load i64* %r3, align 8 + store i64 %10, i64* %tmp + %11 = load i64* %tmp + %conv = trunc i64 %11 to i32 + ret i32 %conv +} + +attributes #0 = { alwaysinline inlinehint nounwind } +attributes #1 = { nounwind } + +!0 = metadata !{i32 -2146895770} -- 2.34.1