using namespace llvm;
#define MRM_MAPPING \
+ MAP(C0, 32) \
MAP(C1, 33) \
MAP(C2, 34) \
MAP(C3, 35) \
MAP(C4, 36) \
- MAP(C8, 37) \
- MAP(C9, 38) \
- MAP(CA, 39) \
- MAP(CB, 40) \
- MAP(E8, 41) \
- MAP(F0, 42) \
- MAP(F8, 45) \
- MAP(F9, 46) \
- MAP(D0, 47) \
- MAP(D1, 48) \
- MAP(D4, 49) \
- MAP(D5, 50) \
- MAP(D6, 51) \
- MAP(D8, 52) \
- MAP(D9, 53) \
- MAP(DA, 54) \
- MAP(DB, 55) \
- MAP(DC, 56) \
- MAP(DD, 57) \
- MAP(DE, 58) \
- MAP(DF, 59)
+ MAP(C5, 37) \
+ MAP(C6, 38) \
+ MAP(C7, 39) \
+ MAP(C8, 40) \
+ MAP(C9, 41) \
+ MAP(CA, 42) \
+ MAP(CB, 43) \
+ MAP(CC, 44) \
+ MAP(CD, 45) \
+ MAP(CE, 46) \
+ MAP(CF, 47) \
+ MAP(D0, 48) \
+ MAP(D1, 49) \
+ MAP(D2, 50) \
+ MAP(D3, 51) \
+ MAP(D4, 52) \
+ MAP(D5, 53) \
+ MAP(D6, 54) \
+ MAP(D7, 55) \
+ MAP(D8, 56) \
+ MAP(D9, 57) \
+ MAP(DA, 58) \
+ MAP(DB, 59) \
+ MAP(DC, 60) \
+ MAP(DD, 61) \
+ MAP(DE, 62) \
+ MAP(DF, 63) \
+ MAP(E0, 64) \
+ MAP(E1, 65) \
+ MAP(E2, 66) \
+ MAP(E3, 67) \
+ MAP(E4, 68) \
+ MAP(E5, 69) \
+ MAP(E6, 70) \
+ MAP(E7, 71) \
+ MAP(E8, 72) \
+ MAP(E9, 73) \
+ MAP(EA, 74) \
+ MAP(EB, 75) \
+ MAP(EC, 76) \
+ MAP(ED, 77) \
+ MAP(EE, 78) \
+ MAP(EF, 79) \
+ MAP(F0, 80) \
+ MAP(F1, 81) \
+ MAP(F2, 82) \
+ MAP(F3, 83) \
+ MAP(F4, 84) \
+ MAP(F5, 85) \
+ MAP(F6, 86) \
+ MAP(F7, 87) \
+ MAP(F8, 88) \
+ MAP(F9, 89) \
+ MAP(FA, 90) \
+ MAP(FB, 91) \
+ MAP(FC, 92) \
+ MAP(FD, 93) \
+ MAP(FE, 94) \
+ MAP(FF, 95)
// A clone of X86 since we can't depend on something that is generated.
namespace X86Local {
RawFrmSrc = 8,
RawFrmDst = 9,
RawFrmDstSrc = 10,
+ RawFrmImm8 = 11,
+ RawFrmImm16 = 12,
MRMXr = 14, MRMXm = 15,
MRM0r = 16, MRM1r = 17, MRM2r = 18, MRM3r = 19,
MRM4r = 20, MRM5r = 21, MRM6r = 22, MRM7r = 23,
MRM0m = 24, MRM1m = 25, MRM2m = 26, MRM3m = 27,
MRM4m = 28, MRM5m = 29, MRM6m = 30, MRM7m = 31,
- RawFrmImm8 = 43,
- RawFrmImm16 = 44,
#define MAP(from, to) MRM_##from = to,
MRM_MAPPING
#undef MAP
};
enum {
- OB = 0, TB = 1, T8 = 2, TA = 3, XOP8 = 4, XOP9 = 5, XOPA = 6,
- D8 = 7, D9 = 8, DA = 9, DB = 10,
- DC = 11, DD = 12, DE = 13, DF = 14,
- A6 = 15, A7 = 16
+ OB = 0, TB = 1, T8 = 2, TA = 3, XOP8 = 4, XOP9 = 5, XOPA = 6
};
enum {
- PD = 1, XS = 2, XD = 3
+ PS = 1, PD = 2, XS = 3, XD = 4
};
enum {
enum {
OpSize16 = 1, OpSize32 = 2
};
+
+ enum {
+ AdSize16 = 1, AdSize32 = 2, AdSize64 = 3
+ };
}
using namespace X86Disassembler;
return;
}
- OpPrefix = byteFromRec(Rec->getValueAsDef("OpPrefix"), "Value");
- OpMap = byteFromRec(Rec->getValueAsDef("OpMap"), "Value");
+ OpPrefix = byteFromRec(Rec, "OpPrefixBits");
+ OpMap = byteFromRec(Rec, "OpMapBits");
Opcode = byteFromRec(Rec, "Opcode");
Form = byteFromRec(Rec, "FormBits");
- Encoding = byteFromRec(Rec->getValueAsDef("OpEnc"), "Value");
+ Encoding = byteFromRec(Rec, "OpEncBits");
- OpSize = byteFromRec(Rec->getValueAsDef("OpSize"), "Value");
- HasAdSizePrefix = Rec->getValueAsBit("hasAdSizePrefix");
+ OpSize = byteFromRec(Rec, "OpSizeBits");
+ AdSize = byteFromRec(Rec, "AdSizeBits");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
HasVEX_4VOp3 = Rec->getValueAsBit("hasVEX_4VOp3");
HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
HasEVEX_KZ = Rec->getValueAsBit("hasEVEX_Z");
HasEVEX_B = Rec->getValueAsBit("hasEVEX_B");
- HasREPPrefix = Rec->getValueAsBit("hasREPPrefix");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
ForceDisassemble = Rec->getValueAsBit("ForceDisassemble");
+ CD8_Scale = byteFromRec(Rec, "CD8_Scale");
Name = Rec->getName();
AsmString = Rec->getValueAsString("AsmString");
insnContext = EVEX_KB(IC_EVEX_L_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L_W_XD);
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L_W);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
} else if (HasVEX_LPrefix) {
// VEX_L
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L_XD);
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
}
else if (HasEVEX_L2Prefix && HasVEX_WPrefix) {
// EVEX_L2 & VEX_W
insnContext = EVEX_KB(IC_EVEX_L2_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L2_W_XD);
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L2_W);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
} else if (HasEVEX_L2Prefix) {
// EVEX_L2
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L2_XD);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L2_XS);
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L2);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
}
else if (HasVEX_WPrefix) {
// VEX_W
insnContext = EVEX_KB(IC_EVEX_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_W_XD);
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_W);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
}
// No L, no W
else if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_L_W_XS;
else if (OpPrefix == X86Local::XD)
insnContext = IC_VEX_L_W_XD;
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = IC_VEX_L_W;
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
} else if (OpPrefix == X86Local::PD && HasVEX_LPrefix)
insnContext = IC_VEX_L_OPSIZE;
else if (OpPrefix == X86Local::PD && HasVEX_WPrefix)
insnContext = IC_VEX_W_XS;
else if (HasVEX_WPrefix && OpPrefix == X86Local::XD)
insnContext = IC_VEX_W_XD;
- else if (HasVEX_WPrefix)
+ else if (HasVEX_WPrefix && OpPrefix == X86Local::PS)
insnContext = IC_VEX_W;
- else if (HasVEX_LPrefix)
+ else if (HasVEX_LPrefix && OpPrefix == X86Local::PS)
insnContext = IC_VEX_L;
else if (OpPrefix == X86Local::XD)
insnContext = IC_VEX_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_VEX_XS;
- else
+ else if (OpPrefix == X86Local::PS)
insnContext = IC_VEX;
- } else if (Is64Bit || HasREX_WPrefix) {
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ } else if (Is64Bit || HasREX_WPrefix || AdSize == X86Local::AdSize64) {
if (HasREX_WPrefix && (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD))
insnContext = IC_64BIT_REXW_OPSIZE;
+ else if (HasREX_WPrefix && AdSize == X86Local::AdSize32)
+ insnContext = IC_64BIT_REXW_ADSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
insnContext = IC_64BIT_XD_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
insnContext = IC_64BIT_XS_OPSIZE;
+ else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize32)
+ insnContext = IC_64BIT_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
insnContext = IC_64BIT_OPSIZE;
- else if (HasAdSizePrefix)
+ else if (AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_ADSIZE;
else if (HasREX_WPrefix && OpPrefix == X86Local::XS)
insnContext = IC_64BIT_REXW_XS;
insnContext = IC_XD_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
insnContext = IC_XS_OPSIZE;
+ else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize16)
+ insnContext = IC_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
insnContext = IC_OPSIZE;
- else if (HasAdSizePrefix)
+ else if (AdSize == X86Local::AdSize16)
insnContext = IC_ADSIZE;
else if (OpPrefix == X86Local::XD)
insnContext = IC_XD;
- else if (OpPrefix == X86Local::XS || HasREPPrefix)
+ else if (OpPrefix == X86Local::XS)
insnContext = IC_XS;
else
insnContext = IC;
return insnContext;
}
+void RecognizableInstr::adjustOperandEncoding(OperandEncoding &encoding) {
+ // The scaling factor for AVX512 compressed displacement encoding is an
+ // instruction attribute. Adjust the ModRM encoding type to include the
+ // scale for compressed displacement.
+ if (encoding != ENCODING_RM || CD8_Scale == 0)
+ return;
+ encoding = (OperandEncoding)(encoding + Log2_32(CD8_Scale));
+ assert(encoding <= ENCODING_RM_CD64 && "Invalid CDisp scaling");
+}
+
void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
unsigned &physicalOperandIndex,
unsigned &numPhysicalOperands,
const std::string &typeName = (*Operands)[operandIndex].Rec->getName();
- Spec->operands[operandIndex].encoding = encodingFromString(typeName,
- OpSize);
+ OperandEncoding encoding = encodingFromString(typeName, OpSize);
+ // Adjust the encoding type for an operand based on the instruction.
+ adjustOperandEncoding(encoding);
+ Spec->operands[operandIndex].encoding = encoding;
Spec->operands[operandIndex].type = typeFromString(typeName,
HasREX_WPrefix, OpSize);
assert(numOperands <= X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
- if (OperandList[operandIndex].Constraints.size()) {
+ if (!OperandList[operandIndex].Constraints.empty()) {
const CGIOperandList::ConstraintInfo &Constraint =
OperandList[operandIndex].Constraints[0];
if (Constraint.isTied()) {
// physicalOperandIndex should always be < numPhysicalOperands
unsigned physicalOperandIndex = 0;
+ // Given the set of prefix bits, how many additional operands does the
+ // instruction have?
+ unsigned additionalOperands = 0;
+ if (HasVEX_4V || HasVEX_4VOp3)
+ ++additionalOperands;
+ if (HasEVEX_K)
+ ++additionalOperands;
+
switch (Form) {
default: llvm_unreachable("Unhandled form");
case X86Local::RawFrmSrc:
break;
case X86Local::MRMDestReg:
// Operand 1 is a register operand in the R/M field.
+ // - In AVX512 there may be a mask operand here -
// Operand 2 is a register operand in the Reg/Opcode field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
- if (HasVEX_4V)
- assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 4 &&
- "Unexpected number of operands for MRMDestRegFrm with VEX_4V");
- else
- assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
- "Unexpected number of operands for MRMDestRegFrm");
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 3 + additionalOperands &&
+ "Unexpected number of operands for MRMDestRegFrm");
HANDLE_OPERAND(rmRegister)
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// Operand 2 is a register operand in the Reg/Opcode field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
- if (HasVEX_4V)
- assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 4 &&
- "Unexpected number of operands for MRMDestMemFrm with VEX_4V");
- else
- assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
- "Unexpected number of operands for MRMDestMemFrm");
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 3 + additionalOperands &&
+ "Unexpected number of operands for MRMDestMemFrm with VEX_4V");
+
HANDLE_OPERAND(memory)
if (HasEVEX_K)
// Operand 3 (optional) is an immediate.
// Operand 4 (optional) is an immediate.
- if (HasVEX_4V || HasVEX_4VOp3)
- assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 &&
- "Unexpected number of operands for MRMSrcRegFrm with VEX_4V");
- else
- assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 4 &&
- "Unexpected number of operands for MRMSrcRegFrm");
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 4 + additionalOperands &&
+ "Unexpected number of operands for MRMSrcRegFrm");
HANDLE_OPERAND(roRegister)
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
- if (HasVEX_4V || HasVEX_4VOp3)
- assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 &&
- "Unexpected number of operands for MRMSrcMemFrm with VEX_4V");
- else
- assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
- "Unexpected number of operands for MRMSrcMemFrm");
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 4 + additionalOperands &&
+ "Unexpected number of operands for MRMSrcMemFrm");
HANDLE_OPERAND(roRegister)
case X86Local::MRM5r:
case X86Local::MRM6r:
case X86Local::MRM7r:
- {
- // Operand 1 is a register operand in the R/M field.
- // Operand 2 (optional) is an immediate or relocation.
- // Operand 3 (optional) is an immediate.
- unsigned kOp = (HasEVEX_K) ? 1:0;
- unsigned Op4v = (HasVEX_4V) ? 1:0;
- if (numPhysicalOperands > 3 + kOp + Op4v)
- llvm_unreachable("Unexpected number of operands for MRMnr");
- }
+ // Operand 1 is a register operand in the R/M field.
+ // Operand 2 (optional) is an immediate or relocation.
+ // Operand 3 (optional) is an immediate.
+ assert(numPhysicalOperands >= 0 + additionalOperands &&
+ numPhysicalOperands <= 3 + additionalOperands &&
+ "Unexpected number of operands for MRMnr");
+
if (HasVEX_4V)
HANDLE_OPERAND(vvvvRegister)
case X86Local::MRM5m:
case X86Local::MRM6m:
case X86Local::MRM7m:
- {
- // Operand 1 is a memory operand (possibly SIB-extended)
- // Operand 2 (optional) is an immediate or relocation.
- unsigned kOp = (HasEVEX_K) ? 1:0;
- unsigned Op4v = (HasVEX_4V) ? 1:0;
- if (numPhysicalOperands < 1 + kOp + Op4v ||
- numPhysicalOperands > 2 + kOp + Op4v)
- llvm_unreachable("Unexpected number of operands for MRMnm");
- }
+ // Operand 1 is a memory operand (possibly SIB-extended)
+ // Operand 2 (optional) is an immediate or relocation.
+ assert(numPhysicalOperands >= 1 + additionalOperands &&
+ numPhysicalOperands <= 2 + additionalOperands &&
+ "Unexpected number of operands for MRMnm");
+
if (HasVEX_4V)
HANDLE_OPERAND(vvvvRegister)
if (HasEVEX_K)
HANDLE_OPERAND(relocation)
}
break;
- case X86Local::MRM_C1:
- case X86Local::MRM_C2:
- case X86Local::MRM_C3:
- case X86Local::MRM_C4:
- case X86Local::MRM_C8:
- case X86Local::MRM_C9:
- case X86Local::MRM_CA:
- case X86Local::MRM_CB:
- case X86Local::MRM_E8:
- case X86Local::MRM_F0:
- case X86Local::MRM_F9:
- case X86Local::MRM_D0:
- case X86Local::MRM_D1:
- case X86Local::MRM_D4:
- case X86Local::MRM_D5:
- case X86Local::MRM_D6:
- case X86Local::MRM_D8:
- case X86Local::MRM_D9:
- case X86Local::MRM_DA:
- case X86Local::MRM_DB:
- case X86Local::MRM_DC:
- case X86Local::MRM_DD:
- case X86Local::MRM_DE:
- case X86Local::MRM_DF:
+ case X86Local::MRM_C0: case X86Local::MRM_C1: case X86Local::MRM_C2:
+ case X86Local::MRM_C3: case X86Local::MRM_C4: case X86Local::MRM_C8:
+ case X86Local::MRM_C9: case X86Local::MRM_CA: case X86Local::MRM_CB:
+ case X86Local::MRM_CF: case X86Local::MRM_D0: case X86Local::MRM_D1:
+ case X86Local::MRM_D4: case X86Local::MRM_D5: case X86Local::MRM_D6:
+ case X86Local::MRM_D7: case X86Local::MRM_D8: case X86Local::MRM_D9:
+ case X86Local::MRM_DA: case X86Local::MRM_DB: case X86Local::MRM_DC:
+ case X86Local::MRM_DD: case X86Local::MRM_DE: case X86Local::MRM_DF:
+ case X86Local::MRM_E0: case X86Local::MRM_E1: case X86Local::MRM_E2:
+ case X86Local::MRM_E3: case X86Local::MRM_E4: case X86Local::MRM_E5:
+ case X86Local::MRM_E8: case X86Local::MRM_E9: case X86Local::MRM_EA:
+ case X86Local::MRM_EB: case X86Local::MRM_EC: case X86Local::MRM_ED:
+ case X86Local::MRM_EE: case X86Local::MRM_F0: case X86Local::MRM_F1:
+ case X86Local::MRM_F2: case X86Local::MRM_F3: case X86Local::MRM_F4:
+ case X86Local::MRM_F5: case X86Local::MRM_F6: case X86Local::MRM_F7:
+ case X86Local::MRM_F9: case X86Local::MRM_FA: case X86Local::MRM_FB:
+ case X86Local::MRM_FC: case X86Local::MRM_FD: case X86Local::MRM_FE:
+ case X86Local::MRM_FF:
// Ignored.
break;
}
// Special cases where the LLVM tables are not complete
#define MAP(from, to) \
- case X86Local::MRM_##from: \
- filter = new ExactFilter(0x##from); \
- break;
+ case X86Local::MRM_##from:
OpcodeType opcodeType = (OpcodeType)-1;
- ModRMFilter* filter = NULL;
+ ModRMFilter* filter = nullptr;
uint8_t opcodeToSet = 0;
switch (OpMap) {
case X86Local::TB:
case X86Local::T8:
case X86Local::TA:
- case X86Local::A6:
- case X86Local::A7:
case X86Local::XOP8:
case X86Local::XOP9:
case X86Local::XOPA:
case X86Local::TB: opcodeType = TWOBYTE; break;
case X86Local::T8: opcodeType = THREEBYTE_38; break;
case X86Local::TA: opcodeType = THREEBYTE_3A; break;
- case X86Local::A6: opcodeType = THREEBYTE_A6; break;
- case X86Local::A7: opcodeType = THREEBYTE_A7; break;
case X86Local::XOP8: opcodeType = XOP8_MAP; break;
case X86Local::XOP9: opcodeType = XOP9_MAP; break;
case X86Local::XOPA: opcodeType = XOPA_MAP; break;
filter = new ExtendedFilter(false, Form - X86Local::MRM0m);
break;
MRM_MAPPING
+ filter = new ExactFilter(0xC0 + Form - X86Local::MRM_C0); \
+ break;
} // switch (Form)
opcodeToSet = Opcode;
break;
- case X86Local::D8:
- case X86Local::D9:
- case X86Local::DA:
- case X86Local::DB:
- case X86Local::DC:
- case X86Local::DD:
- case X86Local::DE:
- case X86Local::DF:
- assert(Opcode >= 0xc0 && "Unexpected opcode for an escape opcode");
- assert(Form == X86Local::RawFrm);
- opcodeType = ONEBYTE;
- filter = new ExactFilter(Opcode);
- opcodeToSet = 0xd8 + (OpMap - X86Local::D8);
- break;
} // switch (OpMap)
+ unsigned AddressSize = 0;
+ switch (AdSize) {
+ case X86Local::AdSize16: AddressSize = 16; break;
+ case X86Local::AdSize32: AddressSize = 32; break;
+ case X86Local::AdSize64: AddressSize = 64; break;
+ }
+
assert(opcodeType != (OpcodeType)-1 &&
"Opcode type not set");
assert(filter && "Filter not set");
insnContext(),
currentOpcode,
*filter,
- UID, Is32Bit, IgnoresVEX_L);
+ UID, Is32Bit, IgnoresVEX_L, AddressSize);
} else {
tables.setTableFields(opcodeType,
insnContext(),
opcodeToSet,
*filter,
- UID, Is32Bit, IgnoresVEX_L);
+ UID, Is32Bit, IgnoresVEX_L, AddressSize);
}
delete filter;
TYPE("i32mem", TYPE_Mv)
TYPE("i32imm", TYPE_IMMv)
TYPE("i32i8imm", TYPE_IMM32)
- TYPE("u32u8imm", TYPE_IMM32)
TYPE("GR32", TYPE_R32)
TYPE("GR32orGR64", TYPE_R32)
TYPE("i64mem", TYPE_Mv)
TYPE("GR64", TYPE_R64)
TYPE("i8mem", TYPE_M8)
TYPE("i8imm", TYPE_IMM8)
+ TYPE("u8imm", TYPE_UIMM8)
+ TYPE("i32u8imm", TYPE_UIMM8)
TYPE("GR8", TYPE_R8)
TYPE("VR128", TYPE_XMM128)
TYPE("VR128X", TYPE_XMM128)
TYPE("f128mem", TYPE_M128)
TYPE("f256mem", TYPE_M256)
TYPE("f512mem", TYPE_M512)
+ TYPE("FR128", TYPE_XMM128)
TYPE("FR64", TYPE_XMM64)
TYPE("FR64X", TYPE_XMM64)
TYPE("f64mem", TYPE_M64FP)
TYPE("i16imm_pcrel", TYPE_REL16)
TYPE("i32imm_pcrel", TYPE_REL32)
TYPE("SSECC", TYPE_IMM3)
+ TYPE("XOPCC", TYPE_IMM3)
TYPE("AVXCC", TYPE_IMM5)
+ TYPE("AVX512ICC", TYPE_AVX512ICC)
TYPE("AVX512RC", TYPE_IMM32)
- TYPE("brtarget", TYPE_RELv)
- TYPE("uncondbrtarget", TYPE_RELv)
+ TYPE("brtarget32", TYPE_RELv)
+ TYPE("brtarget16", TYPE_RELv)
TYPE("brtarget8", TYPE_REL8)
TYPE("f80mem", TYPE_M80FP)
- TYPE("lea32mem", TYPE_LEA)
TYPE("lea64_32mem", TYPE_LEA)
TYPE("lea64mem", TYPE_LEA)
TYPE("VR64", TYPE_MM64)
TYPE("i64imm", TYPE_IMMv)
+ TYPE("anymem", TYPE_M)
TYPE("opaque32mem", TYPE_M1616)
TYPE("opaque48mem", TYPE_M1632)
TYPE("opaque80mem", TYPE_M1664)
TYPE("dstidx16", TYPE_DSTIDX16)
TYPE("dstidx32", TYPE_DSTIDX32)
TYPE("dstidx64", TYPE_DSTIDX64)
- TYPE("offset8", TYPE_MOFFS8)
- TYPE("offset16", TYPE_MOFFS16)
- TYPE("offset32", TYPE_MOFFS32)
- TYPE("offset64", TYPE_MOFFS64)
+ TYPE("offset16_8", TYPE_MOFFS8)
+ TYPE("offset16_16", TYPE_MOFFS16)
+ TYPE("offset16_32", TYPE_MOFFS32)
+ TYPE("offset32_8", TYPE_MOFFS8)
+ TYPE("offset32_16", TYPE_MOFFS16)
+ TYPE("offset32_32", TYPE_MOFFS32)
+ TYPE("offset32_64", TYPE_MOFFS64)
+ TYPE("offset64_8", TYPE_MOFFS8)
+ TYPE("offset64_16", TYPE_MOFFS16)
+ TYPE("offset64_32", TYPE_MOFFS32)
+ TYPE("offset64_64", TYPE_MOFFS64)
TYPE("VR256", TYPE_XMM256)
TYPE("VR256X", TYPE_XMM256)
TYPE("VR512", TYPE_XMM512)
TYPE("VK1", TYPE_VK1)
TYPE("VK1WM", TYPE_VK1)
+ TYPE("VK2", TYPE_VK2)
+ TYPE("VK2WM", TYPE_VK2)
+ TYPE("VK4", TYPE_VK4)
+ TYPE("VK4WM", TYPE_VK4)
TYPE("VK8", TYPE_VK8)
TYPE("VK8WM", TYPE_VK8)
TYPE("VK16", TYPE_VK16)
TYPE("VK16WM", TYPE_VK16)
+ TYPE("VK32", TYPE_VK32)
+ TYPE("VK32WM", TYPE_VK32)
+ TYPE("VK64", TYPE_VK64)
+ TYPE("VK64WM", TYPE_VK64)
TYPE("GR16_NOAX", TYPE_Rv)
TYPE("GR32_NOAX", TYPE_Rv)
TYPE("GR64_NOAX", TYPE_R64)
TYPE("vx32mem", TYPE_M32)
+ TYPE("vx32xmem", TYPE_M32)
TYPE("vy32mem", TYPE_M32)
+ TYPE("vy32xmem", TYPE_M32)
TYPE("vz32mem", TYPE_M32)
TYPE("vx64mem", TYPE_M64)
+ TYPE("vx64xmem", TYPE_M64)
TYPE("vy64mem", TYPE_M64)
TYPE("vy64xmem", TYPE_M64)
TYPE("vz64mem", TYPE_M64)
+ TYPE("BNDR", TYPE_BNDR)
errs() << "Unhandled type string " << s << "\n";
llvm_unreachable("Unhandled type string");
}
ENCODING("i16imm", ENCODING_IW)
}
ENCODING("i32i8imm", ENCODING_IB)
- ENCODING("u32u8imm", ENCODING_IB)
ENCODING("SSECC", ENCODING_IB)
+ ENCODING("XOPCC", ENCODING_IB)
ENCODING("AVXCC", ENCODING_IB)
+ ENCODING("AVX512ICC", ENCODING_IB)
ENCODING("AVX512RC", ENCODING_IB)
ENCODING("i16imm", ENCODING_Iv)
ENCODING("i16i8imm", ENCODING_IB)
ENCODING("i64i32imm", ENCODING_ID)
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
+ ENCODING("u8imm", ENCODING_IB)
+ ENCODING("i32u8imm", ENCODING_IB)
// This is not a typo. Instructions like BLENDVPD put
// register IDs in 8-bit immediates nowadays.
ENCODING("FR32", ENCODING_IB)
ENCODING("FR64", ENCODING_IB)
+ ENCODING("FR128", ENCODING_IB)
ENCODING("VR128", ENCODING_IB)
ENCODING("VR256", ENCODING_IB)
ENCODING("FR32X", ENCODING_IB)
ENCODING("GR8", ENCODING_RM)
ENCODING("VR128", ENCODING_RM)
ENCODING("VR128X", ENCODING_RM)
+ ENCODING("FR128", ENCODING_RM)
ENCODING("FR64", ENCODING_RM)
ENCODING("FR32", ENCODING_RM)
ENCODING("FR64X", ENCODING_RM)
ENCODING("VR256X", ENCODING_RM)
ENCODING("VR512", ENCODING_RM)
ENCODING("VK1", ENCODING_RM)
+ ENCODING("VK2", ENCODING_RM)
+ ENCODING("VK4", ENCODING_RM)
ENCODING("VK8", ENCODING_RM)
ENCODING("VK16", ENCODING_RM)
+ ENCODING("VK32", ENCODING_RM)
+ ENCODING("VK64", ENCODING_RM)
+ ENCODING("BNDR", ENCODING_RM)
errs() << "Unhandled R/M register encoding " << s << "\n";
llvm_unreachable("Unhandled R/M register encoding");
}
ENCODING("GR64", ENCODING_REG)
ENCODING("GR8", ENCODING_REG)
ENCODING("VR128", ENCODING_REG)
+ ENCODING("FR128", ENCODING_REG)
ENCODING("FR64", ENCODING_REG)
ENCODING("FR32", ENCODING_REG)
ENCODING("VR64", ENCODING_REG)
ENCODING("FR32X", ENCODING_REG)
ENCODING("VR512", ENCODING_REG)
ENCODING("VK1", ENCODING_REG)
+ ENCODING("VK2", ENCODING_REG)
+ ENCODING("VK4", ENCODING_REG)
ENCODING("VK8", ENCODING_REG)
ENCODING("VK16", ENCODING_REG)
+ ENCODING("VK32", ENCODING_REG)
+ ENCODING("VK64", ENCODING_REG)
ENCODING("VK1WM", ENCODING_REG)
+ ENCODING("VK2WM", ENCODING_REG)
+ ENCODING("VK4WM", ENCODING_REG)
ENCODING("VK8WM", ENCODING_REG)
ENCODING("VK16WM", ENCODING_REG)
+ ENCODING("VK32WM", ENCODING_REG)
+ ENCODING("VK64WM", ENCODING_REG)
+ ENCODING("BNDR", ENCODING_REG)
errs() << "Unhandled reg/opcode register encoding " << s << "\n";
llvm_unreachable("Unhandled reg/opcode register encoding");
}
ENCODING("GR32", ENCODING_VVVV)
ENCODING("GR64", ENCODING_VVVV)
ENCODING("FR32", ENCODING_VVVV)
+ ENCODING("FR128", ENCODING_VVVV)
ENCODING("FR64", ENCODING_VVVV)
ENCODING("VR128", ENCODING_VVVV)
ENCODING("VR256", ENCODING_VVVV)
ENCODING("VR256X", ENCODING_VVVV)
ENCODING("VR512", ENCODING_VVVV)
ENCODING("VK1", ENCODING_VVVV)
+ ENCODING("VK2", ENCODING_VVVV)
+ ENCODING("VK4", ENCODING_VVVV)
ENCODING("VK8", ENCODING_VVVV)
ENCODING("VK16", ENCODING_VVVV)
+ ENCODING("VK32", ENCODING_VVVV)
+ ENCODING("VK64", ENCODING_VVVV)
errs() << "Unhandled VEX.vvvv register encoding " << s << "\n";
llvm_unreachable("Unhandled VEX.vvvv register encoding");
}
RecognizableInstr::writemaskRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("VK1WM", ENCODING_WRITEMASK)
+ ENCODING("VK2WM", ENCODING_WRITEMASK)
+ ENCODING("VK4WM", ENCODING_WRITEMASK)
ENCODING("VK8WM", ENCODING_WRITEMASK)
ENCODING("VK16WM", ENCODING_WRITEMASK)
+ ENCODING("VK32WM", ENCODING_WRITEMASK)
+ ENCODING("VK64WM", ENCODING_WRITEMASK)
errs() << "Unhandled mask register encoding " << s << "\n";
llvm_unreachable("Unhandled mask register encoding");
}
ENCODING("i256mem", ENCODING_RM)
ENCODING("i512mem", ENCODING_RM)
ENCODING("f80mem", ENCODING_RM)
- ENCODING("lea32mem", ENCODING_RM)
ENCODING("lea64_32mem", ENCODING_RM)
ENCODING("lea64mem", ENCODING_RM)
+ ENCODING("anymem", ENCODING_RM)
ENCODING("opaque32mem", ENCODING_RM)
ENCODING("opaque48mem", ENCODING_RM)
ENCODING("opaque80mem", ENCODING_RM)
ENCODING("opaque512mem", ENCODING_RM)
ENCODING("vx32mem", ENCODING_RM)
+ ENCODING("vx32xmem", ENCODING_RM)
ENCODING("vy32mem", ENCODING_RM)
+ ENCODING("vy32xmem", ENCODING_RM)
ENCODING("vz32mem", ENCODING_RM)
ENCODING("vx64mem", ENCODING_RM)
+ ENCODING("vx64xmem", ENCODING_RM)
ENCODING("vy64mem", ENCODING_RM)
ENCODING("vy64xmem", ENCODING_RM)
ENCODING("vz64mem", ENCODING_RM)
ENCODING("i64i32imm", ENCODING_ID)
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
+ ENCODING("u8imm", ENCODING_IB)
+ ENCODING("i32u8imm", ENCODING_IB)
ENCODING("i64i32imm_pcrel", ENCODING_ID)
ENCODING("i16imm_pcrel", ENCODING_IW)
ENCODING("i32imm_pcrel", ENCODING_ID)
- ENCODING("brtarget", ENCODING_Iv)
+ ENCODING("brtarget32", ENCODING_Iv)
+ ENCODING("brtarget16", ENCODING_Iv)
ENCODING("brtarget8", ENCODING_IB)
ENCODING("i64imm", ENCODING_IO)
- ENCODING("offset8", ENCODING_Ia)
- ENCODING("offset16", ENCODING_Ia)
- ENCODING("offset32", ENCODING_Ia)
- ENCODING("offset64", ENCODING_Ia)
+ ENCODING("offset16_8", ENCODING_Ia)
+ ENCODING("offset16_16", ENCODING_Ia)
+ ENCODING("offset16_32", ENCODING_Ia)
+ ENCODING("offset32_8", ENCODING_Ia)
+ ENCODING("offset32_16", ENCODING_Ia)
+ ENCODING("offset32_32", ENCODING_Ia)
+ ENCODING("offset32_64", ENCODING_Ia)
+ ENCODING("offset64_8", ENCODING_Ia)
+ ENCODING("offset64_16", ENCODING_Ia)
+ ENCODING("offset64_32", ENCODING_Ia)
+ ENCODING("offset64_64", ENCODING_Ia)
ENCODING("srcidx8", ENCODING_SI)
ENCODING("srcidx16", ENCODING_SI)
ENCODING("srcidx32", ENCODING_SI)