const MCOperand &Op2 = MI->getOperand(2);
const MCOperand &Op3 = MI->getOperand(3);
+ bool IsSigned = (Opcode == ARM64::SBFMXri || Opcode == ARM64::SBFMWri);
+ bool Is64Bit = (Opcode == ARM64::SBFMXri || Opcode == ARM64::UBFMXri);
if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
- bool IsSigned = (Opcode == ARM64::SBFMXri || Opcode == ARM64::SBFMWri);
const char *AsmMnemonic = nullptr;
switch (Op3.getImm()) {
default:
break;
case 7:
- AsmMnemonic = IsSigned ? "sxtb" : "uxtb";
+ if (IsSigned)
+ AsmMnemonic = "sxtb";
+ else if (!Is64Bit)
+ AsmMnemonic = "uxtb";
break;
case 15:
- AsmMnemonic = IsSigned ? "sxth" : "uxth";
+ if (IsSigned)
+ AsmMnemonic = "sxth";
+ else if (!Is64Bit)
+ AsmMnemonic = "uxth";
break;
case 31:
- // *xtw is only valid for 64-bit operations.
- if (Opcode == ARM64::SBFMXri || Opcode == ARM64::UBFMXri)
- AsmMnemonic = IsSigned ? "sxtw" : "uxtw";
+ // *xtw is only valid for signed 64-bit operations.
+ if (Is64Bit && IsSigned)
+ AsmMnemonic = "sxtw";
break;
}
return;
}
}
+
+ // SBFIZ/UBFIZ aliases
+ if (Op2.getImm() > Op3.getImm()) {
+ O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t'
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
+ << ", #" << (Is64Bit ? 64 : 32) - Op2.getImm() << ", #" << Op3.getImm() + 1;
+ printAnnotation(O, Annot);
+ return;
+ }
+
+ // Otherwise SBFX/UBFX is the prefered form
+ O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t'
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
+ << ", #" << Op2.getImm() << ", #" << Op3.getImm() - Op2.getImm() + 1;
+ printAnnotation(O, Annot);
+ return;
}
// Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
%uxt64 = zext i32 %var to i64
store volatile i64 %uxt64, i64* @var64
; CHECK-AARCH64: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #0, #32
-; CHECK-ARM64: uxtw {{x[0-9]+}}, {{w[0-9]+}}
+; CHECK-ARM64: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #32
ret void
}
%trunc_i1 = trunc i64 %in to i1
%sext_i1 = sext i1 %trunc_i1 to i64
store volatile i64 %sext_i1, i64* @var64
-; CHECK-AARCH64: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
-; CHECK-ARM64: sbfm {{x[0-9]+}}, {{x[0-9]+}}, #0, #0
+; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
%trunc_i8 = trunc i64 %in to i8
%sext_i8 = sext i8 %trunc_i8 to i64
; Different registers are of course, possible, though suboptimal. This is
; making sure that a 64-bit "(sext_inreg (anyext GPR32), i1)" uses the 64-bit
; sbfx rather than just 32-bits.
-; CHECK-AARCH64: sbfx x0, x0, #0, #1
-; CHECK-ARM64: sbfm x0, x0, #0, #0
+; CHECK: sbfx x0, x0, #0, #1
ret i64 %ext
}
define i32 @test_ubfx32(i32* %addr) {
; CHECK-LABEL: test_ubfx32:
-; CHECK-AARCH64: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
-; CHECK-ARM64: ubfm {{w[0-9]+}}, {{w[0-9]+}}, #23, #25
+; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
%fields = load i32* %addr
%shifted = lshr i32 %fields, 23
define i64 @test_ubfx64(i64* %addr) {
; CHECK-LABEL: test_ubfx64:
-; CHECK-AARCH64: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
-; CHECK-ARM64: ubfm {{x[0-9]+}}, {{x[0-9]+}}, #25, #34
+; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
%fields = load i64* %addr
%shifted = lshr i64 %fields, 25
%masked = and i64 %shifted, 1023
define i32 @test_sbfx32(i32* %addr) {
; CHECK-LABEL: test_sbfx32:
-; CHECK-AARCH64: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
-; CHECK-ARM64: sbfm {{w[0-9]+}}, {{w[0-9]+}}, #6, #8
+; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
%fields = load i32* %addr
%shifted = shl i32 %fields, 23
define i64 @test_sbfx64(i64* %addr) {
; CHECK-LABEL: test_sbfx64:
-; CHECK-AARCH64: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
-; CHECK-ARM64: sbfm {{x[0-9]+}}, {{x[0-9]+}}, #0, #62
+; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
%fields = load i64* %addr
%shifted = shl i64 %fields, 1
%val = load i1* @var
%ret = sext i1 %val to i32
; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
-; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1|sbfm w[0-9]+, w[0-9]+, #0, #0}}
+; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1|sbfx w[0-9]+, w[0-9]+, #0, #1}}
ret i32 %ret
; CHECK: ret
%val = load i1* @var
%ret = sext i1 %val to i64
; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
-; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1|sbfm x[0-9]+, x[0-9]+, #0, #0}}
+; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1}}
ret i64 %ret
; CHECK: ret
%ext_int = zext i32 %int to i64
store volatile i64 %ext_int, i64* @var64
-; CHECK: uxtw [[EXT:x[0-9]+]], w3
+; CHECK: ubfx [[EXT:x[0-9]+]], x3, #0, #32
; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
ret void
define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind optsize ssp {
; CHECK-LABEL: foo:
-; CHECK: ubfm
+; CHECK: ubfx
; CHECK-NOT: and
; CHECK: ret
define i32 @baz(i64 %cav1.coerce) nounwind {
; CHECK-LABEL: baz:
-; CHECK: sbfm w0, w0, #0, #3
+; CHECK: sbfx w0, w0, #0, #4
%tmp = trunc i64 %cav1.coerce to i32
%tmp1 = shl i32 %tmp, 28
%bf.val.sext = ashr exact i32 %tmp1, 28
define i32 @bar(i64 %cav1.coerce) nounwind {
; CHECK-LABEL: bar:
-; CHECK: sbfm w0, w0, #4, #9
+; CHECK: sbfx w0, w0, #4, #6
%tmp = trunc i64 %cav1.coerce to i32
%cav1.sroa.0.1.insert = shl i32 %tmp, 22
%tmp1 = ashr i32 %cav1.sroa.0.1.insert, 26
define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind optsize ssp {
; CHECK-LABEL: fct1:
-; CHECK: ubfm
+; CHECK: ubfx
; CHECK-NOT: and
; CHECK: ret
define i64 @fct2(i64 %cav1.coerce) nounwind {
; CHECK-LABEL: fct2:
-; CHECK: sbfm x0, x0, #0, #35
+; CHECK: sbfx x0, x0, #0, #36
%tmp = shl i64 %cav1.coerce, 28
%bf.val.sext = ashr exact i64 %tmp, 28
ret i64 %bf.val.sext
define i64 @fct3(i64 %cav1.coerce) nounwind {
; CHECK-LABEL: fct3:
-; CHECK: sbfm x0, x0, #4, #41
+; CHECK: sbfx x0, x0, #4, #38
%cav1.sroa.0.1.insert = shl i64 %cav1.coerce, 22
%tmp1 = ashr i64 %cav1.sroa.0.1.insert, 26
ret i64 %tmp1
define zeroext i1 @fct12bis(i32 %tmp2) unnamed_addr nounwind ssp align 2 {
; CHECK-LABEL: fct12bis:
; CHECK-NOT: and
-; CHECK: ubfm w0, w0, #11, #11
+; CHECK: ubfx w0, w0, #11, #1
%and.i.i = and i32 %tmp2, 2048
%tobool.i.i = icmp ne i32 %and.i.i, 0
ret i1 %tobool.i.i
; CHECK: ldr [[REG1:w[0-9]+]],
; CHECK-NEXT: bfm [[REG1]], w1, #16, #18
; lsr is an alias of ubfm
-; CHECK-NEXT: ubfm [[REG2:w[0-9]+]], [[REG1]], #2, #29
+; CHECK-NEXT: ubfx [[REG2:w[0-9]+]], [[REG1]], #2, #28
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
%0 = load i32* %y, align 8
; CHECK: ldr [[REG1:x[0-9]+]],
; CHECK-NEXT: bfm [[REG1]], x1, #16, #18
; lsr is an alias of ubfm
-; CHECK-NEXT: ubfm [[REG2:x[0-9]+]], [[REG1]], #2, #61
+; CHECK-NEXT: ubfx [[REG2:x[0-9]+]], [[REG1]], #2, #60
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
%0 = load i64* %y, align 8
; CHECK: and [[REG2:w[0-9]+]], [[REG1]], [[REGCST]]
; CHECK-NEXT: bfm [[REG2]], w1, #16, #18
; lsr is an alias of ubfm
-; CHECK-NEXT: ubfm [[REG3:w[0-9]+]], [[REG2]], #2, #29
+; CHECK-NEXT: ubfx [[REG3:w[0-9]+]], [[REG2]], #2, #28
; CHECK-NEXT: str [[REG3]],
; CHECK-NEXT: ret
%0 = load i32* %y, align 8
; CHECK: and [[REG2:x[0-9]+]], [[REG1]], x[[REGCST]]
; CHECK-NEXT: bfm [[REG2]], x1, #16, #18
; lsr is an alias of ubfm
-; CHECK-NEXT: ubfm [[REG3:x[0-9]+]], [[REG2]], #2, #61
+; CHECK-NEXT: ubfx [[REG3:x[0-9]+]], [[REG2]], #2, #60
; CHECK-NEXT: str [[REG3]],
; CHECK-NEXT: ret
%0 = load i64* %y, align 8
define i64 @fct18(i32 %xor72) nounwind ssp {
; CHECK-LABEL: fct18:
-; CHECK: ubfm x0, x0, #9, #16
+; CHECK: ubfx x0, x0, #9, #8
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%result = and i64 %conv82, 255
; OPT-LABEL: if.end
if.end: ; preds = %entry
; OPT: lshr
-; CHECK: ubfm [[REG1:x[0-9]+]], [[REG2:x[0-9]+]], #32, #47
+; CHECK: ubfx [[REG1:x[0-9]+]], [[REG2:x[0-9]+]], #32, #16
%x.sroa.3.0.extract.trunc = trunc i64 %x.sroa.3.0.extract.shift to i16
%tobool6 = icmp eq i16 %x.sroa.3.0.extract.trunc, 0
; CHECK: cbz
if.end13: ; preds = %if.end
; OPT: lshr
; OPT: trunc
-; CHECK: ubfm [[REG3:x[0-9]+]], [[REG4:x[0-9]+]], #16, #31
+; CHECK: ubfx [[REG3:x[0-9]+]], [[REG4:x[0-9]+]], #16, #16
%tobool16 = icmp eq i16 %x.sroa.1.0.extract.trunc, 0
; CHECK: cbz
br i1 %tobool16, label %return, label %if.then17
; CHECK: uxth w0, w0
; CHECK: str w0, [sp, #8]
; CHECK: ldr w0, [sp, #8]
-; CHECK: uxtw x3, w0
+; CHECK: ubfx x3, w0, #0, #32
; CHECK: str x3, [sp]
; CHECK: ldr x0, [sp], #16
; CHECK: ret
define i32 @sext_i1_i32(i1 signext %a) nounwind ssp {
entry:
; CHECK: sext_i1_i32
-; CHECK: sbfm w0, w0, #0, #0
+; CHECK: sbfx w0, w0, #0, #1
%conv = sext i1 %a to i32
ret i32 %conv
}
define signext i16 @sext_i1_i16(i1 %a) nounwind ssp {
entry:
; CHECK: sext_i1_i16
-; CHECK: sbfm w0, w0, #0, #0
+; CHECK: sbfx w0, w0, #0, #1
%conv = sext i1 %a to i16
ret i16 %conv
}
define signext i8 @sext_i1_i8(i1 %a) nounwind ssp {
entry:
; CHECK: sext_i1_i8
-; CHECK: sbfm w0, w0, #0, #0
+; CHECK: sbfx w0, w0, #0, #1
%conv = sext i1 %a to i8
ret i8 %conv
}
define float @sitofp_sw_i1(i1 %a) nounwind ssp {
entry:
; CHECK: sitofp_sw_i1
-; CHECK: sbfm w0, w0, #0, #0
+; CHECK: sbfx w0, w0, #0, #1
; CHECK: scvtf s0, w0
%conv = sitofp i1 %a to float
ret float %conv
define i16 @load_halfword(%struct.a* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: load_halfword:
-; CHECK: ubfm [[REG:x[0-9]+]], x1, #9, #16
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldrh w0, [x0, [[REG]], lsl #1]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
define i32 @load_word(%struct.b* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: load_word:
-; CHECK: ubfm [[REG:x[0-9]+]], x1, #9, #16
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr w0, [x0, [[REG]], lsl #2]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
define i64 @load_doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: load_doubleword:
-; CHECK: ubfm [[REG:x[0-9]+]], x1, #9, #16
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr x0, [x0, [[REG]], lsl #3]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
define void @store_halfword(%struct.a* %ctx, i32 %xor72, i16 %val) nounwind {
; CHECK-LABEL: store_halfword:
-; CHECK: ubfm [[REG:x[0-9]+]], x1, #9, #16
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: strh w2, [x0, [[REG]], lsl #1]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
define void @store_word(%struct.b* %ctx, i32 %xor72, i32 %val) nounwind {
; CHECK-LABEL: store_word:
-; CHECK: ubfm [[REG:x[0-9]+]], x1, #9, #16
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: str w2, [x0, [[REG]], lsl #2]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
define void @store_doubleword(%struct.c* %ctx, i32 %xor72, i64 %val) nounwind {
; CHECK-LABEL: store_doubleword:
-; CHECK: ubfm [[REG:x[0-9]+]], x1, #9, #16
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: str x2, [x0, [[REG]], lsl #3]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
entry:
; CHECK-LABEL: extendedLeftShiftcharToshortBy4:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #28, #7
+; CHECK: sbfiz w0, [[REG]], #4, #8
%inc = add i8 %a, 1
%conv1 = sext i8 %inc to i32
%shl = shl nsw i32 %conv1, 4
entry:
; CHECK-LABEL: extendedRightShiftcharToshortBy4:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #4, #7
+; CHECK: sbfx w0, [[REG]], #4, #4
%inc = add i8 %a, 1
%conv1 = sext i8 %inc to i32
%shr4 = lshr i32 %conv1, 4
entry:
; CHECK-LABEL: extendedLeftShiftcharToshortBy8:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #24, #7
+; CHECK: sbfiz w0, [[REG]], #8, #8
%inc = add i8 %a, 1
%conv1 = sext i8 %inc to i32
%shl = shl nsw i32 %conv1, 8
entry:
; CHECK-LABEL: extendedLeftShiftcharTointBy4:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #28, #7
+; CHECK: sbfiz w0, [[REG]], #4, #8
%inc = add i8 %a, 1
%conv = sext i8 %inc to i32
%shl = shl nsw i32 %conv, 4
entry:
; CHECK-LABEL: extendedRightShiftcharTointBy4:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #4, #7
+; CHECK: sbfx w0, [[REG]], #4, #4
%inc = add i8 %a, 1
%conv = sext i8 %inc to i32
%shr = ashr i32 %conv, 4
entry:
; CHECK-LABEL: extendedLeftShiftcharTointBy8:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #24, #7
+; CHECK: sbfiz w0, [[REG]], #8, #8
%inc = add i8 %a, 1
%conv = sext i8 %inc to i32
%shl = shl nsw i32 %conv, 8
entry:
; CHECK-LABEL: extendedLeftShiftcharToint64By4:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #60, #7
+; CHECK: sbfiz x0, x[[REG]], #4, #8
%inc = add i8 %a, 1
%conv = sext i8 %inc to i64
%shl = shl nsw i64 %conv, 4
entry:
; CHECK-LABEL: extendedRightShiftcharToint64By4:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #4, #7
+; CHECK: sbfx x0, x[[REG]], #4, #4
%inc = add i8 %a, 1
%conv = sext i8 %inc to i64
%shr = ashr i64 %conv, 4
entry:
; CHECK-LABEL: extendedLeftShiftcharToint64By8:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #56, #7
+; CHECK: sbfiz x0, x[[REG]], #8, #8
%inc = add i8 %a, 1
%conv = sext i8 %inc to i64
%shl = shl nsw i64 %conv, 8
entry:
; CHECK-LABEL: extendedLeftShiftshortTointBy4:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #28, #15
+; CHECK: sbfiz w0, [[REG]], #4, #16
%inc = add i16 %a, 1
%conv = sext i16 %inc to i32
%shl = shl nsw i32 %conv, 4
entry:
; CHECK-LABEL: extendedRightShiftshortTointBy4:
; CHECK: add [[REG:w[0-9]+]], w0, #1
-; CHECK: sbfm w0, [[REG]], #4, #15
+; CHECK: sbfx w0, [[REG]], #4, #12
%inc = add i16 %a, 1
%conv = sext i16 %inc to i32
%shr = ashr i32 %conv, 4
entry:
; CHECK-LABEL: extendedLeftShiftshortToint64By4:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #60, #15
+; CHECK: sbfiz x0, x[[REG]], #4, #16
%inc = add i16 %a, 1
%conv = sext i16 %inc to i64
%shl = shl nsw i64 %conv, 4
entry:
; CHECK-LABEL: extendedRightShiftshortToint64By4:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #4, #15
+; CHECK: sbfx x0, x[[REG]], #4, #12
%inc = add i16 %a, 1
%conv = sext i16 %inc to i64
%shr = ashr i64 %conv, 4
entry:
; CHECK-LABEL: extendedLeftShiftshortToint64By16:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #48, #15
+; CHECK: sbfiz x0, x[[REG]], #16, #16
%inc = add i16 %a, 1
%conv = sext i16 %inc to i64
%shl = shl nsw i64 %conv, 16
entry:
; CHECK-LABEL: extendedLeftShiftintToint64By4:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #60, #31
+; CHECK: sbfiz x0, x[[REG]], #4, #32
%inc = add nsw i32 %a, 1
%conv = sext i32 %inc to i64
%shl = shl nsw i64 %conv, 4
entry:
; CHECK-LABEL: extendedRightShiftintToint64By4:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sbfm x0, x[[REG]], #4, #31
+; CHECK: sbfx x0, x[[REG]], #4, #28
%inc = add nsw i32 %a, 1
%conv = sext i32 %inc to i64
%shr = ashr i64 %conv, 4
ubfx w0, w0, #2, #3
ubfx x0, x0, #2, #3
-; CHECK: bfm w0, w0, #31, #3
-; CHECK: bfm x0, x0, #63, #3
-; CHECK: bfm w0, w0, #0, #1
-; CHECK: bfm x0, x0, #0, #1
-; CHECK: bfm w0, w0, #2, #4
-; CHECK: bfm x0, x0, #2, #4
-; CHECK: sbfm w0, w0, #31, #3
-; CHECK: sbfm x0, x0, #63, #3
-; CHECK: sbfm w0, w0, #2, #4
-; CHECK: sbfm x0, x0, #2, #4
-; CHECK: ubfm w0, w0, #31, #3
-; CHECK: ubfm x0, x0, #63, #3
-; CHECK: ubfm w0, w0, #2, #4
-; CHECK: ubfm x0, x0, #2, #4
+; CHECK: bfm w0, w0, #31, #3
+; CHECK: bfm x0, x0, #63, #3
+; CHECK: bfm w0, w0, #0, #1
+; CHECK: bfm x0, x0, #0, #1
+; CHECK: bfm w0, w0, #2, #4
+; CHECK: bfm x0, x0, #2, #4
+; CHECK: sbfiz w0, w0, #1, #4
+; CHECK: sbfiz x0, x0, #1, #4
+; CHECK: sbfx w0, w0, #2, #3
+; CHECK: sbfx x0, x0, #2, #3
+; CHECK: ubfiz w0, w0, #1, #4
+; CHECK: ubfiz x0, x0, #1, #4
+; CHECK: ubfx w0, w0, #2, #3
+; CHECK: ubfx x0, x0, #2, #3
;-----------------------------------------------------------------------------
; Shift (immediate) aliases
; CHECK: sxtb x1, w2
; CHECK: sxth x1, w2
; CHECK: sxtw x1, w2
-; CHECK: uxtb x1, w2
-; CHECK: uxth x1, w2
-; CHECK: uxtw x1, w2
+; CHECK: ubfx x1, x2, #0, #8
+; CHECK: ubfx x1, x2, #0, #16
+; CHECK: ubfx x1, x2, #0, #32
;-----------------------------------------------------------------------------
; Negate with carry
; CHECK: bfm w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x33]
; CHECK: bfm x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0xb3]
-; CHECK: sbfm w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x13]
-; CHECK: sbfm x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0x93]
-; CHECK: ubfm w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x53]
-; CHECK: ubfm x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0xd3]
-; CHECK: sbfm wzr, w0, #1, #0 ; encoding: [0x1f,0x00,0x01,0x13]
-; CHECK: sbfm xzr, x0, #33, #0 ; encoding: [0x1f,0x00,0x61,0x93]
+; CHECK: sbfx w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x13]
+; CHECK: sbfx x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0x93]
+; CHECK: ubfx w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x53]
+; CHECK: ubfx x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0xd3]
+; CHECK: sbfiz wzr, w0, #31, #1 ; encoding: [0x1f,0x00,0x01,0x13]
+; CHECK: sbfiz xzr, x0, #31, #1 ; encoding: [0x1f,0x00,0x61,0x93]
; CHECK: lsl wzr, w0, #31 ; encoding: [0x1f,0x00,0x01,0x53]
-; CHECK: ubfm xzr, x0, #33, #0 ; encoding: [0x1f,0x00,0x61,0xd3]
+; CHECK: ubfiz xzr, x0, #31, #1 ; encoding: [0x1f,0x00,0x61,0xd3]
;==---------------------------------------------------------------------------==
; 5.4.5 Extract (immediate)
# CHECK: bfm w1, w2, #1, #15
# CHECK: bfm x1, x2, #1, #15
-# CHECK: sbfm w1, w2, #1, #15
-# CHECK: sbfm x1, x2, #1, #15
-# CHECK: ubfm w1, w2, #1, #15
-# CHECK: ubfm x1, x2, #1, #15
+# CHECK: sbfx w1, w2, #1, #15
+# CHECK: sbfx x1, x2, #1, #15
+# CHECK: ubfx w1, w2, #1, #15
+# CHECK: ubfx x1, x2, #1, #15
#==---------------------------------------------------------------------------==
# 5.4.5 Extract (immediate)