}
void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS) {
- printInstruction(MI, OS);
+ if (printAliasInstr(MI, OS))
+ printInstruction(MI, OS);
// If verbose assembly is enabled, we can print some informative comments.
if (CommentStream)
// Various unary fpstack operations default to operating on on ST1.
// For example, "fxch" -> "fxch %st(1)"
-def : InstAlias<"faddp", (ADD_FPrST0 ST1)>;
+def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
def : InstAlias<"fsubp", (SUBR_FPrST0 ST1)>;
def : InstAlias<"fsubrp", (SUB_FPrST0 ST1)>;
def : InstAlias<"fmulp", (MUL_FPrST0 ST1)>;
// For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
// instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
// gas.
-multiclass FpUnaryAlias<string Mnemonic, Instruction Inst> {
- def : InstAlias<!strconcat(Mnemonic, " $op, %st(0)"), (Inst RST:$op)>;
- def : InstAlias<!strconcat(Mnemonic, " %st(0), %st(0)"), (Inst ST0)>;
+multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
+ def : InstAlias<!strconcat(Mnemonic, " $op, %st(0)"),
+ (Inst RST:$op), EmitAlias>;
+ def : InstAlias<!strconcat(Mnemonic, " %st(0), %st(0)"),
+ (Inst ST0), EmitAlias>;
}
defm : FpUnaryAlias<"fadd", ADD_FST0r>;
-defm : FpUnaryAlias<"faddp", ADD_FPrST0>;
+defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
defm : FpUnaryAlias<"fsub", SUB_FST0r>;
defm : FpUnaryAlias<"fsubp", SUBR_FPrST0>;
defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
defm : FpUnaryAlias<"fdivp", DIVR_FPrST0>;
defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
defm : FpUnaryAlias<"fdivrp", DIV_FPrST0>;
-defm : FpUnaryAlias<"fcomi", COM_FIr>;
-defm : FpUnaryAlias<"fucomi", UCOM_FIr>;
+defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
+defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
defm : FpUnaryAlias<"fcompi", COM_FIPr>;
defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
// Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
// commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
// solely because gas supports it.
-def : InstAlias<"faddp %st(0), $op", (ADD_FPrST0 RST:$op)>;
+def : InstAlias<"faddp %st(0), $op", (ADD_FPrST0 RST:$op), 0>;
def : InstAlias<"fmulp %st(0), $op", (MUL_FPrST0 RST:$op)>;
def : InstAlias<"fsubrp %st(0), $op", (SUB_FPrST0 RST:$op)>;
def : InstAlias<"fdivp %st(0), $op", (DIVR_FPrST0 RST:$op)>;
// Match 'movq GR64, MMX' as an alias for movd.
def : InstAlias<"movq $src, $dst",
- (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0b0>;
+ (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
def : InstAlias<"movq $src, $dst",
- (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0b0>;
+ (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
// movsd with no operands (as opposed to the SSE scalar move of a double) is an
// alias for movsl. (as in rep; movsd)
; RUN: llc < %s -march=x86-64 > %t
; RUN: grep movb %t | count 2
-; RUN: grep {movzb\[wl\]} %t
+; RUN: grep {movzx} %t
define void @handle_vector_size_attribute() nounwind {
-; RUN: llc < %s -march=x86 | grep {movsbl}
+; RUN: llc < %s -march=x86 | grep {movsx}
@X = global i32 0 ; <i32*> [#uses=1]
; CHECK: bar:
; CHECK: fldt 4(%esp)
; CHECK-NEXT: fld %st(0)
-; CHECK-NEXT: fmul %st(1)
-; CHECK-NEXT: fmulp %st(1)
+; CHECK-NEXT: fmul %st(1), %st(0)
+; CHECK-NEXT: fmulp
; CHECK-NEXT: ret
}
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movzbl
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
define i32 @foo(<4 x float> %a, <4 x float> %b) nounwind {
entry:
- tail call i32 @llvm.x86.sse.ucomige.ss( <4 x float> %a, <4 x float> %b ) nounwind readnone
- ret i32 %0
+; CHECK: movzx
+ tail call i32 @llvm.x86.sse.ucomige.ss( <4 x float> %a, <4 x float> %b ) nounwind readnone
+ ret i32 %0
}
declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 > %t1
-; RUN: grep movzwl %t1 | count 2
-; RUN: grep movzbl %t1 | count 2
-; RUN: grep movd %t1 | count 4
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck %s
define <4 x i16> @a(i32* %x1) nounwind {
+; CHECK: movzx
+; CHECK-NEXT: movd
%x2 = load i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i16
}
define <8 x i16> @b(i32* %x1) nounwind {
+; CHECK: movzx
+; CHECK-NEXT: movd
%x2 = load i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i16
}
define <8 x i8> @c(i32* %x1) nounwind {
+; CHECK: movzx
+; CHECK-NEXT: movd
%x2 = load i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i8
}
define <16 x i8> @d(i32* %x1) nounwind {
+; CHECK: movzx
+; CHECK-NEXT: movd
%x2 = load i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i8
-; RUN: llc < %s -march=x86-64 | grep movzbl | count 2
+; RUN: llc < %s -march=x86-64 | FileCheck %s
-; Use movzbl to avoid partial-register updates.
+; Use movzbl (aliased as movzx) to avoid partial-register updates.
define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
+; CHECK: movzx %dil, %eax
+; CHECK: movzx %al, %eax
%q = trunc i32 %p to i8
%r = udiv i8 %q, %x
%s = zext i8 %r to i32
define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vcomisd
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vcomisd
; CHECK: setae
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vcomisd
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vcomisd
; CHECK: setbe
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vcomisd
; CHECK: setne
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vucomisd
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vucomisd
; CHECK: setae
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vucomisd
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vucomisd
; CHECK: setbe
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vucomisd
; CHECK: setne
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse41_ptestnzc(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vptest
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse41.ptestnzc(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse41_ptestz(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vptest
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse41.ptestz(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vcomiss
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vcomiss
; CHECK: setae
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.comige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vcomiss
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.comigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vcomiss
; CHECK: setbe
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.comile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vcomiss
; CHECK: setne
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.comineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vucomiss
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vucomiss
; CHECK: setae
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.ucomige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vucomiss
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.ucomigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vucomiss
; CHECK: setbe
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.ucomile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vucomiss
; CHECK: setne
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.sse.ucomineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK: vptest
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK: vptest
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vtestpd
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK: vtestpd
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vtestps
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK: vtestps
; CHECK: seta
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK: vtestpd
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK: vtestpd
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vtestps
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK: vtestps
; CHECK: sete
- ; CHECK: movzbl
+ ; CHECK: movzx
%res = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
; RUN: llc < %s -march=x86-64 | FileCheck %s
; CHECK: @bar1
-; CHECK: movzbl
+; CHECK: movzx
; CHECK: callq
define void @bar1(i1 zeroext %v1) nounwind ssp {
entry:
}
; CHECK: @bar2
-; CHECK-NOT: movzbl
+; CHECK-NOT: movzx
; CHECK: callq
define void @bar2(i8 zeroext %v1) nounwind ssp {
entry:
; CHECK: @bar3
; CHECK: callq
-; CHECK-NOT: movzbl
+; CHECK-NOT: movzx
; CHECK-NOT: and
; CHECK: ret
define zeroext i1 @bar3() nounwind ssp {
entry:
; CHECK: test5:
; CHECK: setg %al
-; CHECK: movzbl %al, %eax
+; CHECK: movzx %al, %eax
; CHECK: orl $-2, %eax
; CHECK: ret
entry:
; CHECK: test6:
; CHECK: setl %al
-; CHECK: movzbl %al, %eax
+; CHECK: movzx %al, %eax
; CHECK: leal 4(%rax,%rax,8), %eax
; CHECK: ret
%0 = load i32* %P, align 4 ; <i32> [#uses=1]
; CHECK: test3:
; CHECK: testq %rdi, %rdi
; CHECK: sete %al
-; CHECK: movzbl %al, %eax
+; CHECK: movzx %al, %eax
; CHECK: ret
}
; CHECK: test4:
; CHECK: testq %rdi, %rdi
; CHECK: setle %al
-; CHECK: movzbl %al, %eax
+; CHECK: movzx %al, %eax
; CHECK: ret
}
; X32: ret
; X64: test1:
-; X64: movslq %e[[A0:di|cx]], %rax
+; X64: movsx %e[[A0:di|cx]], %rax
; X64: movl (%r[[A1:si|dx]],%rax,4), %eax
; X64: ret
%v11 = add i64 %B, %v10
ret i64 %v11
; X64: test5:
-; X64: movslq %e[[A1]], %rax
+; X64: movsx %e[[A1]], %rax
; X64-NEXT: movq (%r[[A0]],%rax), %rax
; X64-NEXT: addq %{{rdx|r8}}, %rax
; X64-NEXT: ret
-; RUN: llc < %s -march=x86 -mcpu=i386 | grep {fucompi.*st.\[12\]}
+; RUN: llc < %s -march=x86 -mcpu=i386 | FileCheck %s
; PR1012
define float @foo(float* %col.2.0) {
- %tmp = load float* %col.2.0 ; <float> [#uses=3]
- %tmp16 = fcmp olt float %tmp, 0.000000e+00 ; <i1> [#uses=1]
- %tmp20 = fsub float -0.000000e+00, %tmp ; <float> [#uses=1]
- %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp ; <float> [#uses=1]
- ret float %iftmp.2.0
+; CHECK: fucompi
+ %tmp = load float* %col.2.0
+ %tmp16 = fcmp olt float %tmp, 0.000000e+00
+ %tmp20 = fsub float -0.000000e+00, %tmp
+ %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
+ ret float %iftmp.2.0
}
-
-; RUN: llc < %s -march=x86 | grep {movzbl %\[abcd\]h,} | count 7
+; RUN: llc < %s -march=x86 | grep {movzx %\[abcd\]h,} | count 7
; Use h-register extract and zero-extend.
; WIN64: movzbl %ch, %eax
; X86-32: qux64:
-; X86-32: movzbl %ah, %eax
+; X86-32: movzx %ah, %eax
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
ret i64 %t1
; WIN64: movzbl %ch, %eax
; X86-32: qux32:
-; X86-32: movzbl %ah, %eax
+; X86-32: movzx %ah, %eax
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
ret i32 %t1
; WIN64: movzbl %ch, %eax
; X86-32: qux16:
-; X86-32: movzbl %ah, %eax
+; X86-32: movzx %ah, %eax
%t0 = lshr i16 %x, 8
ret i16 %t0
}
; RUN: llc < %s -march=x86 > %t
-; RUN: grep {movzbl %\[abcd\]h,} %t | count 1
+; RUN: grep {movzx %\[abcd\]h,} %t | count 1
; RUN: grep {shll \$3,} %t | count 1
; Use an h register, but don't omit the explicit shift for
; PR2094
-; RUN: llc < %s -march=x86-64 | grep movslq
+; RUN: llc < %s -march=x86-64 | grep movsx
; RUN: llc < %s -march=x86-64 | grep addps
; RUN: llc < %s -march=x86-64 | grep paddd
; RUN: llc < %s -march=x86-64 | not grep movq
; RUN: llc < %s -march=x86-64 > %t
; RUN: grep and %t | count 6
-; RUN: grep movzb %t | count 6
+; RUN: grep movzx %t | count 6
; RUN: grep sar %t | count 12
; Don't optimize away zext-inreg and sext-inreg on the loop induction
-; RUN: llc < %s -march=x86 -disable-cgp-branch-opts | grep movzbl
+; RUN: llc < %s -march=x86 -disable-cgp-branch-opts | FileCheck %s
; PR3366
+; CHECK: movzx
define void @_ada_c34002a() nounwind {
entry:
%0 = load i8* null, align 1
; ISel doesn't yet know how to eliminate this extra zero-extend. But until
; it knows how to do so safely, it shouldn;t eliminate it.
; CHECK: movzbl (%rdi), %eax
-; CHECK: movzwl %ax, %eax
+; CHECK: movzx %ax, %eax
define i64 @_ZL5matchPKtPKhiR9MatchData(i8* %tmp13) nounwind {
entry:
entry:
; CHECK: foo:
; CHECK: movzwl 4(%esp), %eax
-; CHECK: xorl $21998, %eax
-; CHECK: movswl %ax, %eax
+; CHECK: xorl $21998, %eax
+; CHECK: movsx %ax, %eax
%0 = xor i16 %x, 21998
ret i16 %0
}
ret i32 0
; CHECK: test2:
; CHECK: movnew
-; CHECK: movswl
+; CHECK: movsx
}
declare i1 @return_false()
entry:
; CHECK: t1:
; CHECK: seta %al
-; CHECK: movzbl %al, %eax
+; CHECK: movzx %al, %eax
; CHECK: shll $5, %eax
%0 = icmp ugt i16 %x, 26 ; <i1> [#uses=1]
%iftmp.1.0 = select i1 %0, i16 32, i16 0 ; <i16> [#uses=1]
-; RUN: llc < %s -march=x86 | grep movzbl | count 1
+; RUN: llc < %s -march=x86 | grep movzx | count 1
; rdar://6699246
define signext i8 @t1(i8* %A) nounwind readnone ssp {
define i64 @t(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
; CHECK: t:
-; CHECK: movslq %e{{.*}}, %rax
+; CHECK: movsx %e{{.*}}, %rax
; CHECK: movq %rax
; CHECK: movl %eax
%C = add i64 %A, %B
; RUN: llc < %s -march=x86 | grep {movl 8(.esp), %eax}
; RUN: llc < %s -march=x86 | grep {shrl .eax}
-; RUN: llc < %s -march=x86 | grep {movswl .ax, .eax}
+; RUN: llc < %s -march=x86 | grep {movsx .ax, .eax}
define i32 @test1(i64 %a) nounwind {
%tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1]
; CHECK: a:
; CHECK: mull
; CHECK: seto %al
-; CHECK: movzbl %al, %eax
+; CHECK: movzx %al, %eax
; CHECK: ret
}
define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind {
entry:
; CHECK: shift3a:
-; CHECK: movzwl
+; CHECK: movzx
; CHECK: psllw
%shamt = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
%shl = shl <8 x i16> %val, %shamt
; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s
-; CHECK: movswl
-; CHECK: movswl
+; CHECK: movsx
+; CHECK: movsx
; sign extension v2i32 to v2i16
-; RUN: llc < %s | grep movswl
+; RUN: llc < %s | grep movsx
target datalayout = "e-p:64:64"
target triple = "x86_64-apple-darwin8"
// CHECK: encoding: [0xd5,0x01]
aad $1
-// CHECK: aad $10
+// CHECK: aad
// CHECK: encoding: [0xd5,0x0a]
aad $0xA
-// CHECK: aad $10
+// CHECK: aad
// CHECK: encoding: [0xd5,0x0a]
aad
// CHECK: encoding: [0xd4,0x02]
aam $2
-// CHECK: aam $10
+// CHECK: aam
// CHECK: encoding: [0xd4,0x0a]
aam $0xA
-// CHECK: aam $10
+// CHECK: aam
// CHECK: encoding: [0xd4,0x0a]
aam
// CHECK: encoding: [0xdf,0xf2]
fcompi %st(2)
-// CHECK: fcompi %st(1)
+// CHECK: fcompi
// CHECK: encoding: [0xdf,0xf1]
fcompi
// CHECK: encoding: [0xdf,0xea]
fucompi %st(2)
-// CHECK: fucompi %st(1)
+// CHECK: fucompi
// CHECK: encoding: [0xdf,0xe9]
fucompi
movsw %ds:(%esi), %es:(%edi)
movsw (%esi), %es:(%edi)
-// CHECK: movsl # encoding: [0xa5]
-// CHECK: movsl
-// CHECK: movsl
+// CHECK: movsd # encoding: [0xa5]
+// CHECK: movsd
+// CHECK: movsd
movsl
movsl %ds:(%esi), %es:(%edi)
movsl (%esi), %es:(%edi)
// rdar://8470918
smovb // CHECK: movsb
smovw // CHECK: movsw
-smovl // CHECK: movsl
+smovl // CHECK: movsd
smovq // CHECK: movsq
// rdar://8456361
// CHECK: rep
-// CHECK: movsl
+// CHECK: movsd
rep movsd
// CHECK: rep
// rdar://8407928
// CHECK: inb $127, %al
-// CHECK: inw %dx, %ax
+// CHECK: inw %dx
// CHECK: outb %al, $127
-// CHECK: outw %ax, %dx
-// CHECK: inl %dx, %eax
+// CHECK: outw %dx
+// CHECK: inl %dx
inb $0x7f
inw %dx
outb $0x7f
// PR8114
-// CHECK: outb %al, %dx
-// CHECK: outb %al, %dx
-// CHECK: outw %ax, %dx
-// CHECK: outw %ax, %dx
-// CHECK: outl %eax, %dx
-// CHECK: outl %eax, %dx
+// CHECK: outb %dx
+// CHECK: outb %dx
+// CHECK: outw %dx
+// CHECK: outw %dx
+// CHECK: outl %dx
+// CHECK: outl %dx
out %al, (%dx)
outb %al, (%dx)
out %eax, (%dx)
outl %eax, (%dx)
-// CHECK: inb %dx, %al
-// CHECK: inb %dx, %al
-// CHECK: inw %dx, %ax
-// CHECK: inw %dx, %ax
-// CHECK: inl %dx, %eax
-// CHECK: inl %dx, %eax
+// CHECK: inb %dx
+// CHECK: inb %dx
+// CHECK: inw %dx
+// CHECK: inw %dx
+// CHECK: inl %dx
+// CHECK: inl %dx
in (%dx), %al
inb (%dx), %al
// rdar://8431422
-// CHECK: fxch %st(1)
-// CHECK: fucom %st(1)
-// CHECK: fucomp %st(1)
-// CHECK: faddp %st(1)
+// CHECK: fxch
+// CHECK: fucom
+// CHECK: fucomp
+// CHECK: faddp
// CHECK: faddp %st(0)
-// CHECK: fsubp %st(1)
-// CHECK: fsubrp %st(1)
-// CHECK: fmulp %st(1)
-// CHECK: fdivp %st(1)
-// CHECK: fdivrp %st(1)
+// CHECK: fsubp
+// CHECK: fsubrp
+// CHECK: fmulp
+// CHECK: fdivp
+// CHECK: fdivrp
fxch
fucom
fdivp
fdivrp
-// CHECK: fcomi %st(1)
+// CHECK: fcomi
// CHECK: fcomi %st(2)
-// CHECK: fucomi %st(1)
-// CHECK: fucomi %st(2)
-// CHECK: fucomi %st(2)
+// CHECK: fucomi
+// CHECK: fucomi %st(2)
+// CHECK: fucomi %st(2)
fcomi
fcomi %st(2)
// CHECK: encoding: [0x48,0xa5]
movsl
-// CHECK: movsl
+// CHECK: movsd
// CHECK: encoding: [0xa5]
stosq
// CHECK: encoding: [0x48,0x0f,0xba,0xe2,0x01]
//rdar://8017633
-// CHECK: movzbl %al, %esi
+// CHECK: movzx %al, %esi
// CHECK: encoding: [0x0f,0xb6,0xf0]
movzx %al, %esi
-// CHECK: movzbq %al, %rsi
+// CHECK: movzx %al, %rsi
// CHECK: encoding: [0x48,0x0f,0xb6,0xf0]
movzx %al, %rsi
-// CHECK: movsbw %al, %ax
+// CHECK: movsx %al, %ax
// CHECK: encoding: [0x66,0x0f,0xbe,0xc0]
-movsx %al, %ax
+ movsx %al, %ax
-// CHECK: movsbl %al, %eax
+// CHECK: movsx %al, %eax
// CHECK: encoding: [0x0f,0xbe,0xc0]
-movsx %al, %eax
+ movsx %al, %eax
-// CHECK: movswl %ax, %eax
+// CHECK: movsx %ax, %eax
// CHECK: encoding: [0x0f,0xbf,0xc0]
-movsx %ax, %eax
+ movsx %ax, %eax
-// CHECK: movsbq %bl, %rax
+// CHECK: movsx %bl, %rax
// CHECK: encoding: [0x48,0x0f,0xbe,0xc3]
-movsx %bl, %rax
+ movsx %bl, %rax
-// CHECK: movswq %cx, %rax
+// CHECK: movsx %cx, %rax
// CHECK: encoding: [0x48,0x0f,0xbf,0xc1]
-movsx %cx, %rax
+ movsx %cx, %rax
-// CHECK: movslq %edi, %rax
+// CHECK: movsx %edi, %rax
// CHECK: encoding: [0x48,0x63,0xc7]
-movsx %edi, %rax
+ movsx %edi, %rax
-// CHECK: movzbw %al, %ax
+// CHECK: movzx %al, %ax
// CHECK: encoding: [0x66,0x0f,0xb6,0xc0]
-movzx %al, %ax
+ movzx %al, %ax
-// CHECK: movzbl %al, %eax
+// CHECK: movzx %al, %eax
// CHECK: encoding: [0x0f,0xb6,0xc0]
-movzx %al, %eax
+ movzx %al, %eax
-// CHECK: movzwl %ax, %eax
+// CHECK: movzx %ax, %eax
// CHECK: encoding: [0x0f,0xb7,0xc0]
-movzx %ax, %eax
+ movzx %ax, %eax
-// CHECK: movzbq %bl, %rax
+// CHECK: movzx %bl, %rax
// CHECK: encoding: [0x48,0x0f,0xb6,0xc3]
-movzx %bl, %rax
+ movzx %bl, %rax
-// CHECK: movzwq %cx, %rax
+// CHECK: movzx %cx, %rax
// CHECK: encoding: [0x48,0x0f,0xb7,0xc1]
-movzx %cx, %rax
+ movzx %cx, %rax
// CHECK: movsbw (%rax), %ax
// CHECK: encoding: [0x66,0x0f,0xbe,0x00]
-movsx (%rax), %ax
+ movsx (%rax), %ax
// CHECK: movzbw (%rax), %ax
// CHECK: encoding: [0x66,0x0f,0xb6,0x00]
-movzx (%rax), %ax
+ movzx (%rax), %ax
// rdar://7873482
rep movsl
// CHECK: rep
// CHECK: encoding: [0xf3]
-// CHECK: movsl
+// CHECK: movsd
// CHECK: encoding: [0xa5]
movsw %ds:(%rsi), %es:(%rdi)
movsw (%rsi), %es:(%rdi)
-// CHECK: movsl # encoding: [0xa5]
-// CHECK: movsl
-// CHECK: movsl
+// CHECK: movsd # encoding: [0xa5]
+// CHECK: movsd
+// CHECK: movsd
movsl
movsl %ds:(%rsi), %es:(%rdi)
movsl (%rsi), %es:(%rdi)