def i64immSExt32 : PatLeaf<(i64 imm), [{
// i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
// sign extended field.
- return (int64_t)N->getValue() == (int32_t)N->getValue();
+ return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
}]>;
def i64immZExt32 : PatLeaf<(i64 imm), [{
// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
// unsignedsign extended field.
- return (uint64_t)N->getValue() == (uint32_t)N->getValue();
+ return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
}]>;
def i64immSExt8 : PatLeaf<(i64 imm), [{
// i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
// sign extended field.
- return (int64_t)N->getValue() == (int8_t)N->getValue();
+ return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
}]>;
def i64immFFFFFFFF : PatLeaf<(i64 imm), [{
// i64immFFFFFFFF - True if this is a specific constant we can't write in
// tblgen files.
- return N->getValue() == 0x00000000FFFFFFFFULL;
+ return N->getZExtValue() == 0x00000000FFFFFFFFULL;
}]>;
def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
"call\t{*}$dst", [(X86call GR64:$dst)]>;
def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
- "call\t{*}$dst", []>;
+ "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>;
}
[(brind (loadi64 addr:$dst))]>;
}
+//===----------------------------------------------------------------------===//
+// EH Pseudo Instructions
+//
+let isTerminator = 1, isReturn = 1, isBarrier = 1,
+ hasCtrlDep = 1 in {
+def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
+ "ret\t#eh_return, addr: $addr",
+ [(X86ehret GR64:$addr)]>;
+
+}
+
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions...
//
def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>;
-let isReMaterializable = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
"movabs{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, imm:$src)]>;
"movs{lq|xd}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
-def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
- "movz{bq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (zext GR8:$src))]>, TB;
-def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
- "movz{bq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
-def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "movz{wq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (zext GR16:$src))]>, TB;
-def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "movz{wq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
+// Use movzbl instead of movzbq when the destination is a register; it's
+// equivalent due to implicit zero-extending, and it has a smaller encoding.
+def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
+ "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
+ [(set GR64:$dst, (zext GR8:$src))]>, TB;
+def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
+ "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
+ [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
+// Use movzwl instead of movzwq when the destination is a register; it's
+// equivalent due to implicit zero-extending, and it has a smaller encoding.
+def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
+ "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
+ [(set GR64:$dst, (zext GR16:$src))]>, TB;
+def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
+ "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
+ [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
+
+// There's no movzlq instruction, but movl can be used for this purpose, using
+// implicit zero-extension. We need this because the seeming alternative for
+// implementing zext from 32 to 64, an EXTRACT_SUBREG/SUBREG_TO_REG pair, isn't
+// safe because both instructions could be optimized away in the
+// register-to-register case, leaving nothing behind to do the zero extension.
+def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
+ "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
+ [(set GR64:$dst, (zext GR32:$src))]>;
+def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
+ "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
+ [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
let neverHasSideEffects = 1 in {
let Defs = [RAX], Uses = [EAX] in
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
// FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
// when we have a better way to specify isel priority.
-let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
-def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
- "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
- [(set GR64:$dst, 0)]>;
+let Defs = [EFLAGS], AddedComplexity = 1,
+ isReMaterializable = 1, isAsCheapAsAMove = 1 in
+def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins),
+ "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
+ [(set GR64:$dst, 0)]>;
// Materialize i64 constant where top 32-bits are zero.
let AddedComplexity = 1, isReMaterializable = 1 in
//===----------------------------------------------------------------------===//
def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym),
- ".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
+ ".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
[(set GR64:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
//===----------------------------------------------------------------------===//
let Defs = [RAX, EFLAGS], Uses = [RAX] in {
def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
- "lock cmpxchgq $swap,$ptr",
+ "lock\n\tcmpxchgq\t$swap,$ptr",
[(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
}
-let Constraints = "$val = $dst", Defs = [EFLAGS] in {
+let Constraints = "$val = $dst" in {
+let Defs = [EFLAGS] in
def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
- "lock xadd $val, $ptr",
- [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>,
+ "lock\n\txadd\t$val, $ptr",
+ [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
TB, LOCK;
def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
- "xchg $val, $ptr",
+ "xchg\t$val, $ptr",
[(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
}
+// Atomic exchange, and, or, xor
+let Constraints = "$val = $dst", Defs = [EFLAGS],
+ usesCustomDAGSchedInserter = 1 in {
+def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMAND64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
+def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMOR64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
+def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMXOR64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
+def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMNAND64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
+def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
+ "#ATOMMIN64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
+def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMMAX64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
+def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMUMIN64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
+def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMUMAX64 PSUEDO!",
+ [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
+}
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
// zextload bool -> zextload byte
def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
-def : Pat<(zextloadi64i32 addr:$src),
- (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), x86_subreg_32bit)>;
-
// extload
+// When extloading from 16-bit and smaller memory locations into 64-bit registers,
+// use zero-extending loads so that the entire 64-bit register is defined, avoiding
+// partial-register updates.
def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
-def : Pat<(extloadi64i32 addr:$src),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
- x86_subreg_32bit)>;
-
-// anyext -> zext
-def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
-def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
+// For other extloads, use subregs, since the high contents of the register are
+// defined after an extload.
+def : Pat<(extloadi64i32 addr:$src),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
+ x86_subreg_32bit)>;
+def : Pat<(extloadi16i1 addr:$src),
+ (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
+ x86_subreg_8bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(extloadi16i8 addr:$src),
+ (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
+ x86_subreg_8bit)>,
+ Requires<[In64BitMode]>;
+
+// anyext
+def : Pat<(i64 (anyext GR8:$src)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>;
+def : Pat<(i64 (anyext GR16:$src)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
def : Pat<(i64 (anyext GR32:$src)),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>;
-
-def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
-def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
-def : Pat<(i64 (anyext (loadi32 addr:$src))),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
- x86_subreg_32bit)>;
+def : Pat<(i16 (anyext GR8:$src)),
+ (INSERT_SUBREG (i16 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (anyext GR8:$src)),
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
+ Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// Some peepholes
//===----------------------------------------------------------------------===//
-// r & (2^32-1) ==> mov32 + implicit zext
-def : Pat<(and GR64:$src, i64immFFFFFFFF),
- (SUBREG_TO_REG (i64 0),
- (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)),
- x86_subreg_32bit)>;
+// r & (2^32-1) ==> movz
+def : Pat<(and GR64:$src, i64immFFFFFFFF),
+ (MOVZX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
+// r & (2^16-1) ==> movz
+def : Pat<(and GR64:$src, 0xffff),
+ (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR64:$src, 0xff),
+ (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR32:$src1, 0xff),
+ (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit)))>,
+ Requires<[In64BitMode]>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR16:$src1, 0xff),
+ (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
+ Requires<[In64BitMode]>;
+
+// sext_inreg patterns
+def : Pat<(sext_inreg GR64:$src, i32),
+ (MOVSX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
+def : Pat<(sext_inreg GR64:$src, i16),
+ (MOVSX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
+def : Pat<(sext_inreg GR64:$src, i8),
+ (MOVSX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
+def : Pat<(sext_inreg GR32:$src, i8),
+ (MOVSX32rr8 (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)))>,
+ Requires<[In64BitMode]>;
+def : Pat<(sext_inreg GR16:$src, i8),
+ (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
+ Requires<[In64BitMode]>;
+
+// trunc patterns
+def : Pat<(i32 (trunc GR64:$src)),
+ (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
+def : Pat<(i16 (trunc GR64:$src)),
+ (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
+def : Pat<(i8 (trunc GR64:$src)),
+ (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
+def : Pat<(i8 (trunc GR32:$src)),
+ (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
+ Requires<[In64BitMode]>;
+def : Pat<(i8 (trunc GR16:$src)),
+ (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit))>,
+ Requires<[In64BitMode]>;
// (shl x, 1) ==> (add x, x)
def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
+// (shl x (and y, 63)) ==> (shl x, y)
+def : Pat<(shl GR64:$src1, (and CL:$amt, 63)),
+ (SHL64rCL GR64:$src1)>;
+def : Pat<(store (shl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
+ (SHL64mCL addr:$dst)>;
+
+def : Pat<(srl GR64:$src1, (and CL:$amt, 63)),
+ (SHR64rCL GR64:$src1)>;
+def : Pat<(store (srl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
+ (SHR64mCL addr:$dst)>;
+
+def : Pat<(sra GR64:$src1, (and CL:$amt, 63)),
+ (SAR64rCL GR64:$src1)>;
+def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
+ (SAR64mCL addr:$dst)>;
+
// (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
def : Pat<(or (srl GR64:$src1, CL:$amt),
(shl GR64:$src2, (sub 64, CL:$amt))),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert GR64:$src))]>;
def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))]>;
def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "movq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
//===----------------------------------------------------------------------===//