// anyext. Define these to do an explicit zero-extend to
// avoid partial-register updates.
-def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
- (MOVZX32rr8 GR8 :$src), sub_16bit)>;
+def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
Requires<[In32BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
- (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
- (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
- sub_16bit)>,
+ (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
+ GR16_ABCD)),
+ sub_8bit))>,
Requires<[In32BitMode]>;
// r & (2^32-1) ==> movz
Requires<[In64BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
- (EXTRACT_SUBREG (MOVZX32rr8 (i8
- (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
+ (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
Requires<[In64BitMode]>;
GR32_ABCD)),
sub_8bit))>,
Requires<[In32BitMode]>;
-
def : Pat<(sext_inreg GR16:$src, i8),
- (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
- (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
- sub_16bit)>,
+ (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit))>,
Requires<[In32BitMode]>;
def : Pat<(sext_inreg GR64:$src, i32),
(MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
Requires<[In64BitMode]>;
def : Pat<(sext_inreg GR16:$src, i8),
- (EXTRACT_SUBREG (MOVSX32rr8
- (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
+ (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
Requires<[In64BitMode]>;
+
// trunc patterns
def : Pat<(i16 (trunc GR32:$src)),
(EXTRACT_SUBREG GR32:$src, sub_16bit)>;
"movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+
+// FIXME: Use a pat pattern or define a syntax here.
+let isCodeGenOnly=1 in {
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
- "movs{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (sext GR8:$src))]>, TB;
+ "", [(set GR16:$dst, (sext GR8:$src))]>, TB;
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
- "movs{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
+ "", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
+}
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sext GR8:$src))]>, TB;
"movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+// FIXME: Use a pat pattern or define a syntax here.
+let isCodeGenOnly=1 in {
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (zext GR8:$src))]>, TB;
+ "", [(set GR16:$dst, (zext GR8:$src))]>, TB;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
+ "", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
+}
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zext GR8:$src))]>, TB;