1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // Move Instructions...
21 let neverHasSideEffects = 1 in
22 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
23 "mov{q}\t{$src, $dst|$dst, $src}", []>;
25 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
26 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
27 "movabs{q}\t{$src, $dst|$dst, $src}",
28 [(set GR64:$dst, imm:$src)]>;
29 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
30 "mov{q}\t{$src, $dst|$dst, $src}",
31 [(set GR64:$dst, i64immSExt32:$src)]>;
34 // The assembler accepts movq of a 64-bit immediate as an alternate spelling of
36 let isAsmParserOnly = 1 in {
37 def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
38 "mov{q}\t{$src, $dst|$dst, $src}", []>;
41 let isCodeGenOnly = 1 in {
42 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
43 "mov{q}\t{$src, $dst|$dst, $src}", []>;
46 let canFoldAsLoad = 1, isReMaterializable = 1 in
47 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
48 "mov{q}\t{$src, $dst|$dst, $src}",
49 [(set GR64:$dst, (load addr:$src))]>;
51 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
52 "mov{q}\t{$src, $dst|$dst, $src}",
53 [(store GR64:$src, addr:$dst)]>;
54 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
55 "mov{q}\t{$src, $dst|$dst, $src}",
56 [(store i64immSExt32:$src, addr:$dst)]>;
58 /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
59 let isCodeGenOnly = 1 in {
60 let neverHasSideEffects = 1 in
61 def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
62 "mov{q}\t{$src, $dst|$dst, $src}", []>;
65 canFoldAsLoad = 1, isReMaterializable = 1 in
66 def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
67 "mov{q}\t{$src, $dst|$dst, $src}",
71 def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
72 "mov{q}\t{$src, $dst|$dst, $src}",
76 // FIXME: These definitions are utterly broken
77 // Just leave them commented out for now because they're useless outside
78 // of the large code model, and most compilers won't generate the instructions
81 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
82 "mov{q}\t{$src, %rax|%rax, $src}", []>;
83 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
84 "mov{q}\t{$src, %rax|%rax, $src}", []>;
85 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
86 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
87 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
88 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
91 //===----------------------------------------------------------------------===//
92 // Comparison Instructions...
96 let Defs = [EFLAGS] in {
98 def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
99 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
100 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
101 "cmp{q}\t{$src2, $src1|$src1, $src2}",
102 [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
104 // These are alternate spellings for use by the disassembler, we mark them as
105 // code gen only to ensure they aren't matched by the assembler.
106 let isCodeGenOnly = 1 in {
107 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
108 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
111 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
112 "cmp{q}\t{$src2, $src1|$src1, $src2}",
113 [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
114 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
115 "cmp{q}\t{$src2, $src1|$src1, $src2}",
116 [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
117 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
118 "cmp{q}\t{$src2, $src1|$src1, $src2}",
119 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
120 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
121 "cmp{q}\t{$src2, $src1|$src1, $src2}",
122 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
123 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
124 "cmp{q}\t{$src2, $src1|$src1, $src2}",
125 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
126 i64immSExt8:$src2))]>;
127 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
128 (ins i64mem:$src1, i64i32imm:$src2),
129 "cmp{q}\t{$src2, $src1|$src1, $src2}",
130 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
131 i64immSExt32:$src2))]>;
135 // TODO: BTC, BTR, and BTS
136 let Defs = [EFLAGS] in {
137 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
138 "bt{q}\t{$src2, $src1|$src1, $src2}",
139 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
141 // Unlike with the register+register form, the memory+register form of the
142 // bt instruction does not ignore the high bits of the index. From ISel's
143 // perspective, this is pretty bizarre. Disable these instructions for now.
144 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
145 "bt{q}\t{$src2, $src1|$src1, $src2}",
146 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
147 // (implicit EFLAGS)]
151 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
152 "bt{q}\t{$src2, $src1|$src1, $src2}",
153 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
154 // Note that these instructions don't need FastBTMem because that
155 // only applies when the other operand is in a register. When it's
156 // an immediate, bt is still fast.
157 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
158 "bt{q}\t{$src2, $src1|$src1, $src2}",
159 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
160 i64immSExt8:$src2))]>, TB;
162 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
163 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
164 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
165 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
166 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
167 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
168 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
169 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
171 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
172 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
173 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
174 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
175 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
176 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
177 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
178 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
180 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
181 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
182 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
183 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
184 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
185 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
186 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
187 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
192 //===----------------------------------------------------------------------===//
193 // X86-64 SSE Instructions
194 //===----------------------------------------------------------------------===//
196 // Move instructions...
198 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
199 "mov{d|q}\t{$src, $dst|$dst, $src}",
201 (v2i64 (scalar_to_vector GR64:$src)))]>;
202 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
203 "mov{d|q}\t{$src, $dst|$dst, $src}",
204 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
207 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
208 "mov{d|q}\t{$src, $dst|$dst, $src}",
209 [(set FR64:$dst, (bitconvert GR64:$src))]>;
210 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
211 "movq\t{$src, $dst|$dst, $src}",
212 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
214 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
215 "mov{d|q}\t{$src, $dst|$dst, $src}",
216 [(set GR64:$dst, (bitconvert FR64:$src))]>;
217 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
218 "movq\t{$src, $dst|$dst, $src}",
219 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;