CBackend
CellSPU
CppBackend
+ Hexagon
Mips
MBlaze
MSP430
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011 Free Software Foundation, Inc.
-timestamp='2011-08-23'
+timestamp='2011-11-02'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
| c4x | clipper \
| d10v | d30v | dlx | dsp16xx \
| fido | fr30 | frv \
+ | hexagon \
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
| i370 | i860 | i960 | ia64 \
| ip2k | iq2000 \
| elxsi-* \
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
| h8300-* | h8500-* \
+ | hexagon-* \
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
| i*86-* | i860-* | i960-* | ia64-* \
| ip2k-* | iq2000-* \
mips-*) llvm_cv_target_arch="Mips" ;;
xcore-*) llvm_cv_target_arch="XCore" ;;
msp430-*) llvm_cv_target_arch="MSP430" ;;
+ hexagon-*) llvm_cv_target_arch="Hexagon" ;;
mblaze-*) llvm_cv_target_arch="MBlaze" ;;
ptx-*) llvm_cv_target_arch="PTX" ;;
*) llvm_cv_target_arch="Unknown" ;;
Mips) AC_SUBST(TARGET_HAS_JIT,1) ;;
XCore) AC_SUBST(TARGET_HAS_JIT,0) ;;
MSP430) AC_SUBST(TARGET_HAS_JIT,0) ;;
+ Hexagon) AC_SUBST(TARGET_HAS_JIT,0) ;;
MBlaze) AC_SUBST(TARGET_HAS_JIT,0) ;;
PTX) AC_SUBST(TARGET_HAS_JIT,0) ;;
*) AC_SUBST(TARGET_HAS_JIT,0) ;;
TARGETS_TO_BUILD=""
AC_ARG_ENABLE([targets],AS_HELP_STRING([--enable-targets],
[Build specific host targets: all or target1,target2,... Valid targets are:
- host, x86, x86_64, sparc, powerpc, arm, mips, spu,
+ host, x86, x86_64, sparc, powerpc, arm, mips, spu, hexagon,
xcore, msp430, ptx, cbe, and cpp (default=all)]),,
enableval=all)
if test "$enableval" = host-only ; then
enableval=host
fi
case "$enableval" in
- all) TARGETS_TO_BUILD="X86 Sparc PowerPC ARM Mips CellSPU XCore MSP430 CBackend CppBackend MBlaze PTX" ;;
+ all) TARGETS_TO_BUILD="X86 Sparc PowerPC ARM Mips CellSPU XCore MSP430 CBackend CppBackend MBlaze PTX Hexagon" ;;
*)for a_target in `echo $enableval|sed -e 's/,/ /g' ` ; do
case "$a_target" in
x86) TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
msp430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
cbe) TARGETS_TO_BUILD="CBackend $TARGETS_TO_BUILD" ;;
cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;;
+ hexagon) TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
mblaze) TARGETS_TO_BUILD="MBlaze $TARGETS_TO_BUILD" ;;
ptx) TARGETS_TO_BUILD="PTX $TARGETS_TO_BUILD" ;;
host) case "$llvm_cv_target_arch" in
CellSPU|SPU) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;;
XCore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;;
MSP430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
+ Hexagon) TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
PTX) TARGETS_TO_BUILD="PTX $TARGETS_TO_BUILD" ;;
*) AC_MSG_ERROR([Can not set target to build]) ;;
esac ;;
--enable-targets Build specific host targets: all or
target1,target2,... Valid targets are: host, x86,
x86_64, sparc, powerpc, arm, mips, spu, xcore,
- msp430, ptx, cbe, and cpp (default=all)
+ hexagon, msp430, ptx, cbe, and cpp (default=all)
--enable-cbe-printf-a Enable C Backend output with hex floating point via
%a (default is YES)
--enable-bindings Build specific language bindings:
mips-*) llvm_cv_target_arch="Mips" ;;
xcore-*) llvm_cv_target_arch="XCore" ;;
msp430-*) llvm_cv_target_arch="MSP430" ;;
+ hexagon-*) llvm_cv_target_arch="Hexagon" ;;
mblaze-*) llvm_cv_target_arch="MBlaze" ;;
ptx-*) llvm_cv_target_arch="PTX" ;;
*) llvm_cv_target_arch="Unknown" ;;
XCore) TARGET_HAS_JIT=0
;;
MSP430) TARGET_HAS_JIT=0
+ ;;
+ Hexagon) TARGET_HAS_JIT=0
;;
MBlaze) TARGET_HAS_JIT=0
;;
enableval=host
fi
case "$enableval" in
- all) TARGETS_TO_BUILD="X86 Sparc PowerPC ARM Mips CellSPU XCore MSP430 CBackend CppBackend MBlaze PTX" ;;
+ all) TARGETS_TO_BUILD="X86 Sparc PowerPC ARM Mips CellSPU XCore MSP430 CBackend CppBackend MBlaze PTX Hexagon" ;;
*)for a_target in `echo $enableval|sed -e 's/,/ /g' ` ; do
case "$a_target" in
x86) TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
msp430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
cbe) TARGETS_TO_BUILD="CBackend $TARGETS_TO_BUILD" ;;
cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;;
+ hexagon) TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
mblaze) TARGETS_TO_BUILD="MBlaze $TARGETS_TO_BUILD" ;;
ptx) TARGETS_TO_BUILD="PTX $TARGETS_TO_BUILD" ;;
host) case "$llvm_cv_target_arch" in
CellSPU|SPU) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;;
XCore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;;
MSP430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
+ Hexagon) TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
PTX) TARGETS_TO_BUILD="PTX $TARGETS_TO_BUILD" ;;
*) { { echo "$as_me:$LINENO: error: Can not set target to build" >&5
echo "$as_me: error: Can not set target to build" >&2;}
<th>Feature</th>
<th>ARM</th>
<th>CellSPU</th>
+ <th>Hexagon</th>
<th>MBlaze</th>
<th>MSP430</th>
<th>Mips</th>
<td><a href="#feat_reliable">is generally reliable</a></td>
<td class="yes"></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="yes"></td> <!-- Hexagon -->
<td class="no"></td> <!-- MBlaze -->
<td class="unknown"></td> <!-- MSP430 -->
<td class="yes"></td> <!-- Mips -->
<td><a href="#feat_asmparser">assembly parser</a></td>
<td class="no"></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="no"></td> <!-- Hexagon -->
<td class="yes"></td> <!-- MBlaze -->
<td class="no"></td> <!-- MSP430 -->
<td class="no"></td> <!-- Mips -->
<td><a href="#feat_disassembler">disassembler</a></td>
<td class="yes"></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="no"></td> <!-- Hexagon -->
<td class="yes"></td> <!-- MBlaze -->
<td class="no"></td> <!-- MSP430 -->
<td class="no"></td> <!-- Mips -->
<td><a href="#feat_inlineasm">inline asm</a></td>
<td class="yes"></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="yes"></td> <!-- Hexagon -->
<td class="yes"></td> <!-- MBlaze -->
<td class="unknown"></td> <!-- MSP430 -->
<td class="no"></td> <!-- Mips -->
<td><a href="#feat_jit">jit</a></td>
<td class="partial"><a href="#feat_jit_arm">*</a></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="no"></td> <!-- Hexagon -->
<td class="no"></td> <!-- MBlaze -->
<td class="unknown"></td> <!-- MSP430 -->
<td class="yes"></td> <!-- Mips -->
<td><a href="#feat_objectwrite">.o file writing</a></td>
<td class="no"></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="no"></td> <!-- Hexagon -->
<td class="yes"></td> <!-- MBlaze -->
<td class="no"></td> <!-- MSP430 -->
<td class="no"></td> <!-- Mips -->
<td><a href="#feat_tailcall">tail calls</a></td>
<td class="yes"></td> <!-- ARM -->
<td class="no"></td> <!-- CellSPU -->
+ <td class="yes"></td> <!-- Hexagon -->
<td class="no"></td> <!-- MBlaze -->
<td class="unknown"></td> <!-- MSP430 -->
<td class="no"></td> <!-- Mips -->
arm, // ARM; arm, armv.*, xscale
cellspu, // CellSPU: spu, cellspu
+ hexagon, // Hexagon: hexagon
mips, // MIPS: mips, mipsallegrex
mipsel, // MIPSEL: mipsel, mipsallegrexel, psp
mips64, // MIPS64: mips64
include "llvm/IntrinsicsCellSPU.td"
include "llvm/IntrinsicsXCore.td"
include "llvm/IntrinsicsPTX.td"
+include "llvm/IntrinsicsHexagon.td"
--- /dev/null
+//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the Hexagon-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all Hexagon intrinsics.
+//
+// All Hexagon intrinsics start with "llvm.hexagon.".
+let TargetPrefix = "hexagon" in {
+ /// Hexagon_Intrinsic - Base class for all altivec intrinsics.
+ class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+ list<LLVMType> param_types,
+ list<IntrinsicProperty> properties>
+ : GCCBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
+ Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_MEM,BT_BOOL,BT_PTR) ->
+// Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(void_ftype_SI,BT_VOID,BT_INT) ->
+// Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
+// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i16_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_SI,BT_INT,BT_INT) ->
+// Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_SI,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_DI,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_DI,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_QI,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_SI,BT_BOOL,BT_INT) ->
+// Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_QI,BT_LONGLONG,BT_BOOL) ->
+// Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_QI,BT_INT,BT_BOOL) ->
+// Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SISI,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(void_ftype_SISI,BT_VOID,BT_INT,BT_INT) ->
+// Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SISI,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(USI_ftype_SISI,BT_UINT,BT_INT,BT_INT) ->
+// Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SISI,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_SISI,BT_ULONGLONG,BT_INT,BT_INT) ->
+// Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SIDI,BT_LONGLONG,BT_INT,BT_LONGLONG) ->
+// Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DISI,BT_LONGLONG,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SIDI,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DIDI,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_DIDI,BT_ULONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DISI,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DIDI,BT_BOOL,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QIQI,BT_INT,BT_BOOL,BT_BOOL) ->
+// Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QISI,BT_INT,BT_BOOL,BT_INT) ->
+// Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i1_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(void_ftype_SISISI,BT_VOID,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISISI,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_SISISI,BT_LONGLONG,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_DISISI,BT_INT,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DISISI,BT_LONGLONG,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDISI,BT_INT,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDISI,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG,BT_INT) ->
+// Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDIDI,BT_INT,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG) ->
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISIDI,BT_INT,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_QISISI,BT_INT,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QISISI,BT_LONGLONG,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i1_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QIDIDI,BT_LONGLONG,BT_BOOL,BT_LONGLONG,
+// BT_LONGLONG) ->
+// Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIQI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+// BT_BOOL) ->
+// Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(SI_ftype_SISISISI,BT_INT,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(DI_ftype_DIDISISI,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeq : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpeqp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpeqp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtup : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtup">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsset : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclr : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeqi : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgti : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgti">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgei : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgei">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgeui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgeui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmplt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmplt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpltu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpltu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclri : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_and : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.and">;
+//
+// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_or : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.or">;
+//
+// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_xor : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.xor">;
+//
+// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_andn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.andn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
+//
+def int_hexagon_C2_not : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.not">;
+//
+// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_orn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.orn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
+//
+def int_hexagon_C2_pxfer_map : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.pxfer.map">;
+//
+// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_any8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.any8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_all8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.all8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
+//
+def int_hexagon_C2_vitpack : Hexagon_si_qiqi_Intrinsic<"HEXAGON.C2.vitpack">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_mux : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.mux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxii : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxii">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxir : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxir">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxri : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
+//
+def int_hexagon_C2_vmux : Hexagon_di_qididi_Intrinsic<"HEXAGON.C2.vmux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
+//
+def int_hexagon_C2_mask : Hexagon_di_qi_Intrinsic<"HEXAGON.C2.mask">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbeq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpheq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpweq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpweq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
+//
+def int_hexagon_C2_tfrpr : Hexagon_si_qi_Intrinsic<"HEXAGON.C2.tfrpr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
+//
+def int_hexagon_C2_tfrrp : Hexagon_qi_si_Intrinsic<"HEXAGON.C2.tfrrp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysmi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpysmi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsip :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsip">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsin :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsin">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.acc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.nac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.dpmpyuu.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.acc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.nac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_up :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.rnd.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyui">;
+//
+// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_maci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.maci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_acci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.acci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_accii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.accii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_nacci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.nacci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_naccii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.naccii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_subacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.subacc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrmac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrmpy.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_di_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
+//
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.M2.vrcmpys.acc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_si_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1rp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyl.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmaci.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyi.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcrotate :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.vcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_add :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.add">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_sub :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.sub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hl">;
+def int_hexagon_A2_addh_l16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.lh">;
+def int_hexagon_A2_addh_l16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hl">;
+def int_hexagon_A2_addh_l16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.lh">;
+def int_hexagon_A2_addh_l16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_aslh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.aslh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_asrh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.asrh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addpsat :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addpsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
+//
+def int_hexagon_A2_addsp :
+Hexagon_di_sidi_Intrinsic<"HEXAGON.A2.addsp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_subp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.subp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
+//
+def int_hexagon_A2_neg :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.neg">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_negsat :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.negsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abs :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.abs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abssat :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.abssat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vconj :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vconj">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_negp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.negp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_absp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.absp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_max :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.max">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_maxu :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.maxu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_min :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.min">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_minu :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.minu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.maxp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxup :
+Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.maxup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.minp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minup :
+Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.minup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfr :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrsi :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfrsi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_tfrp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.tfrp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrpi :
+Hexagon_di_si_Intrinsic<"HEXAGON.A2.tfrpi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxth :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxth :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combinew :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combinew">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combineii :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combineii">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfril :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfril">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfrih :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfrih">;
+//
+// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.and">;
+//
+// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.or">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.xor">;
+//
+// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
+//
+def int_hexagon_A2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_xor_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.xor.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subri">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_andir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.andir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_orir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.orir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_andp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.andp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_orp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.orp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_xorp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.xorp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_notp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.notp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtw :
+Hexagon_di_si_Intrinsic<"HEXAGON.A2.sxtw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_sat :
+Hexagon_si_di_Intrinsic<"HEXAGON.A2.sat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sath :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sath">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satuh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satub :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddubs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vadduhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddws :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavghs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavghs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svnavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svadduhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubuhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vraddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vraddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vraddub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vraddub.acc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vradduh :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vradduh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsububs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsububs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubuhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubws :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsh :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabshsat :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabshsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsw :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabswsat :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabswsat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffw">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vrsadub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vrsadub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vrsadub.acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgubr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgubr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsl.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r.sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r.sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.i.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.i.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.i.r.sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r.rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd_goodsyntax,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r.rnd.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_addasl_rrri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_addasl_rrri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.addasl.rrri">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_valignib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.valignib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignrb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_valignrb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON.S2.valignrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vspliceib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vspliceib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.vspliceib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplicerb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_vsplicerb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON.S2.vsplicerb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrh :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsplatrh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrb :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.vsplatrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_insert :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.insert">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxb_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxb_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxb.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxh_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxh_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxh.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxw_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxw_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxw.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxd_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxd_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxd.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_extractu :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.extractu">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S2_insertp :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON.S2.insertp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup,DI_ftype_DISISI,3)
+//
+def int_hexagon_S2_extractup :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.S2.extractup">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert_rp,SI_ftype_SISIDI,3)
+//
+def int_hexagon_S2_insert_rp :
+Hexagon_si_sisidi_Intrinsic<"HEXAGON.S2.insert.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S2_extractu_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON.S2.extractu.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp_rp,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_S2_insertp_rp :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.S2.insertp.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_extractup_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.extractup.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_i :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.S2.tstbit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.setbit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.togglebit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.clrbit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_r :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.S2.tstbit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.setbit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.togglebit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.clrbit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON.S2.asr.i.svw.trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON.S2.asr.r.svw.trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwh :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vrndpackwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwhs,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwhs :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vrndpackwhs">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vzxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathub,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathub :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.svsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathb :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.svsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunohb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunohb :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vtrunohb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunewh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunewh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.vtrunewh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunowh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunowh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.vtrunowh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunehb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunehb :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vtrunehb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vzxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsatwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsatwuh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_packhl,DI_ftype_SISI,2)
+//
+def int_hexagon_S2_packhl :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.S2.packhl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_swiz,SI_ftype_SI,1)
+//
+def int_hexagon_A2_swiz :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.swiz">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsathub.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsathb.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsatwh.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsatwuh.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffob,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffob :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffob">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeb,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeb :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffeb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffoh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffoh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffoh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffeh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_parityp,SI_ftype_DIDI,2)
+//
+def int_hexagon_S2_parityp :
+Hexagon_si_didi_Intrinsic<"HEXAGON.S2.parityp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lfsp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_lfsp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.lfsp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbnorm,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clbnorm :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.clbnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clb :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.clb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl0 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.cl0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl1 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.cl1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbp,SI_ftype_DI,1)
+//
+def int_hexagon_S2_clbp :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.clbp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl0p :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.cl0p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl1p :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.cl1p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brev,SI_ftype_SI,1)
+//
+def int_hexagon_S2_brev :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.brev">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct0 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.ct0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct1 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.ct1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_interleave :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.interleave">;
+//
+// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_deinterleave :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.deinterleave">;
+
+//
+// BUILTIN_INFO(SI_to_SXTHI_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_SI_to_SXTHI_asrh :
+Hexagon_si_si_Intrinsic<"SI.to.SXTHI.asrh">;
+
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.orn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.andn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A4.ornp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A4.andnp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_sisi,2)
+//
+def int_hexagon_A4_combineir :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A4.combineir">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_sisi,2)
+//
+def int_hexagon_A4_combineri :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A4.combineri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneq :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneqi :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplte :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplte">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpltei :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpltei">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplteu">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplteui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9 :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C4.fastcorner9">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C4.fastcorner9_not">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_andn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_and :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_orn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_or :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_andn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_and :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_orn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_or :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_or">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.addaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.subaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andnp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S4.andnp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ornp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S4.ornp">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_xor_xacc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M4.xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andix :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_andix">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_andi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_ori :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_ori">;
+//
+// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_modwrapu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.modwrapu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.cround_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.cround_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_ri_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_rr_sat">;
return static_cast<int32_t>(x) == x;
}
+/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedInt(int64_t x) {
+ return isInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
/// isUInt - Checks if an unsigned integer fits into the given bit width.
template<unsigned N>
inline bool isUInt(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
+/// isShiftedUInt<N,S> - Checks if a unsigned integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedUInt(uint64_t x) {
+ return isUInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
/// bit width.
inline bool isUIntN(unsigned N, uint64_t x) {
case arm: return "arm";
case cellspu: return "cellspu";
+ case hexagon: return "hexagon";
case mips: return "mips";
case mipsel: return "mipsel";
case mips64: return "mips64";
case mblaze: return "mblaze";
+ case hexagon: return "hexagon";
+
case sparcv9:
case sparc: return "sparc";
return ppc;
if (Name == "mblaze")
return mblaze;
+ if (Name == "hexagon")
+ return hexagon;
if (Name == "sparc")
return sparc;
if (Name == "sparcv9")
return mips64;
else if (ArchName == "mips64el")
return mips64el;
+ else if (ArchName == "hexagon")
+ return hexagon;
else if (ArchName == "sparc")
return sparc;
else if (ArchName == "sparcv9")
--- /dev/null
+set(LLVM_TARGET_DEFINITIONS Hexagon.td)
+
+tablegen(LLVM HexagonGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM HexagonGenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM HexagonGenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM HexagonGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM HexagonGenCallingConv.inc -gen-callingconv)
+tablegen(LLVM HexagonGenSubtargetInfo.inc -gen-subtarget)
+tablegen(LLVM HexagonGenIntrinsics.inc -gen-tgt-intrinsic)
+add_public_tablegen_target(HexagonCommonTableGen)
+
+add_llvm_target(HexagonCodeGen
+ HexagonAsmPrinter.cpp
+ HexagonCallingConvLower.cpp
+ HexagonCFGOptimizer.cpp
+ HexagonExpandPredSpillCode.cpp
+ HexagonFrameLowering.cpp
+ HexagonHardwareLoops.cpp
+ HexagonInstrInfo.cpp
+ HexagonISelDAGToDAG.cpp
+ HexagonISelLowering.cpp
+ HexagonMCAsmInfo.cpp
+ HexagonOptimizeSZExtends.cpp
+ HexagonRegisterInfo.cpp
+ HexagonRemoveSZExtArgs.cpp
+ HexagonSelectionDAGInfo.cpp
+ HexagonSplitTFRCondSets.cpp
+ HexagonSubtarget.cpp
+ HexagonTargetMachine.cpp
+ HexagonTargetObjectFile.cpp
+ )
+
+add_llvm_library_dependencies(LLVMHexagonCodeGen
+ LLVMAsmPrinter
+ LLVMCodeGen
+ LLVMCore
+ LLVMHexagonInfo
+ LLVMSelectionDAG
+ LLVMSupport
+ LLVMTarget
+ )
+
+add_subdirectory(TargetInfo)
--- /dev/null
+//=-- Hexagon.h - Top-level interface for Hexagon representation --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// Hexagon back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TARGET_Hexagon_H
+#define TARGET_Hexagon_H
+
+#include <cassert>
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+ class FunctionPass;
+ class TargetMachine;
+ class HexagonTargetMachine;
+ class raw_ostream;
+
+ FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM);
+ FunctionPass *createHexagonDelaySlotFillerPass(TargetMachine &TM);
+ FunctionPass *createHexagonFPMoverPass(TargetMachine &TM);
+ FunctionPass *createHexagonRemoveExtendOps(HexagonTargetMachine &TM);
+ FunctionPass *createHexagonCFGOptimizer(HexagonTargetMachine &TM);
+
+ FunctionPass* createHexagonSplitTFRCondSets(HexagonTargetMachine &TM);
+ FunctionPass* createHexagonExpandPredSpillCode(HexagonTargetMachine &TM);
+
+ FunctionPass *createHexagonHardwareLoops();
+ FunctionPass *createHexagonOptimizeSZExtends();
+ FunctionPass *createHexagonFixupHwLoops();
+
+ extern Target TheHexagonTarget;
+
+} // end namespace llvm;
+
+// Defines symbolic names for Hexagon instructions and registers.
+// This defines a mapping from register name to register number.
+//
+
+#define GET_REGINFO_ENUM
+#include "HexagonGenRegisterInfo.inc"
+
+#define GET_INSTRINFO_ENUM
+#include "HexagonGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "HexagonGenSubtargetInfo.inc"
+
+#define Hexagon_POINTER_SIZE 4
+
+#define Hexagon_PointerSize (Hexagon_POINTER_SIZE)
+#define Hexagon_PointerSize_Bits (Hexagon_POINTER_SIZE * 8)
+#define Hexagon_WordSize Hexagon_PointerSize
+#define Hexagon_WordSize_Bits Hexagon_PointerSize_Bits
+
+// allocframe saves LR and FP on stack before allocating
+// a new stack frame. This takes 8 bytes.
+#define HEXAGON_LRFP_SIZE 8
+
+#endif
--- /dev/null
+//===- Hexagon.td - Describe the Hexagon Target Machine ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// Hexagon Subtarget features.
+//
+
+
+// Hexagon Archtectures
+def ArchV2 : SubtargetFeature<"v2", "HexagonArchVersion", "V2",
+ "Hexagon v2">;
+def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3",
+ "Hexagon v3">;
+def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4",
+ "Hexagon v4">;
+
+//===----------------------------------------------------------------------===//
+// Register File, Calling Conv, Instruction Descriptions
+//===----------------------------------------------------------------------===//
+include "HexagonSchedule.td"
+include "HexagonRegisterInfo.td"
+include "HexagonCallingConv.td"
+include "HexagonInstrInfo.td"
+include "HexagonIntrinsics.td"
+include "HexagonIntrinsicsDerived.td"
+
+
+def HexagonInstrInfo : InstrInfo {
+ // Define how we want to layout our target-specific information field.
+}
+
+//===----------------------------------------------------------------------===//
+// Hexagon processors supported.
+//===----------------------------------------------------------------------===//
+
+class Proc<string Name, ProcessorItineraries Itin,
+ list<SubtargetFeature> Features>
+ : Processor<Name, Itin, Features>;
+
+def : Proc<"hexagonv2", HexagonItineraries, [ArchV2]>;
+def : Proc<"hexagonv3", HexagonItineraries, [ArchV2, ArchV3]>;
+def : Proc<"hexagonv4", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4]>;
+
+//===----------------------------------------------------------------------===//
+// Declare the target which we are implementing
+//===----------------------------------------------------------------------===//
+
+def Hexagon : Target {
+ // Pull in Instruction Info:
+ let InstructionSet = HexagonInstrInfo;
+}
--- /dev/null
+//===-- HexagonAsmPrinter.cpp - Print machine instrs to Hexagon assembly ----=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to Hexagon assembly language. This printer is
+// the output mechanism used by `llc'.
+//
+// Documentation at http://developer.apple.com/documentation/DeveloperTools/
+// Reference/Assembler/ASMIntroduction/chapter_1_section_1.html
+//
+//===----------------------------------------------------------------------===//
+
+
+#define DEBUG_TYPE "asm-printer"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static cl::opt<bool> AlignCalls(
+ "hexagon-align-calls", cl::Hidden, cl::init(true),
+ cl::desc("Insert falign after call instruction for Hexagon target"));
+
+
+namespace {
+ class HexagonAsmPrinter : public AsmPrinter {
+ const HexagonSubtarget *Subtarget;
+
+ public:
+ explicit HexagonAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer) {
+ Subtarget = &TM.getSubtarget<HexagonSubtarget>();
+ }
+
+ virtual const char *getPassName() const {
+ return "Hexagon Assembly Printer";
+ }
+
+ /// printInstruction - This method is automatically generated by tablegen
+ /// from the instruction set description. This method returns true if the
+ /// machine instruction was sufficiently described to print it, otherwise it
+ void printInstruction(const MachineInstr *MI, raw_ostream &O);
+ virtual void EmitInstruction(const MachineInstr *MI);
+
+ void printOp(const MachineOperand &MO, raw_ostream &O);
+
+ /// printRegister - Print register according to target requirements.
+ ///
+ void printRegister(const MachineOperand &MO, bool R0AsZero,
+ raw_ostream &O) {
+ unsigned RegNo = MO.getReg();
+ assert(TargetRegisterInfo::isPhysicalRegister(RegNo) && "Not physreg??");
+ O << getRegisterName(RegNo);
+ }
+
+ void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &OS) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ if (MO.isReg()) {
+ printRegister(MO, false, OS);
+ } else if (MO.isImm()) {
+ OS << MO.getImm();
+ } else {
+ printOp(MO, OS);
+ }
+ }
+
+
+ bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+
+ void printHexagonImmOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ int value = MI->getOperand(OpNo).getImm();
+ O << value;
+ }
+
+
+ void printHexagonNegImmOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ int value = MI->getOperand(OpNo).getImm();
+ O << -value;
+ }
+
+ void printHexagonMEMriOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO1 = MI->getOperand(OpNo);
+ const MachineOperand &MO2 = MI->getOperand(OpNo+1);
+
+ O << getRegisterName(MO1.getReg())
+ << " + #"
+ << (int) MO2.getImm();
+ }
+
+
+ void printHexagonFrameIndexOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO1 = MI->getOperand(OpNo);
+ const MachineOperand &MO2 = MI->getOperand(OpNo+1);
+
+ O << getRegisterName(MO1.getReg())
+ << ", #"
+ << MO2.getImm();
+ }
+
+ void printBranchOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ // Branches can take an immediate operand. This is used by the branch
+ // selection pass to print $+8, an eight byte displacement from the PC.
+ if (MI->getOperand(OpNo).isImm()) {
+ O << "$+" << MI->getOperand(OpNo).getImm()*4;
+ } else {
+ printOp(MI->getOperand(OpNo), O);
+ }
+ }
+
+ void printCallOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ }
+
+ void printAbsAddrOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ }
+
+
+ void printSymbolHi(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
+ O << "#HI(";
+ if (MI->getOperand(OpNo).isImm()) {
+ printHexagonImmOperand(MI, OpNo, O);
+ } else {
+ printOp(MI->getOperand(OpNo), O);
+ }
+ O << ")";
+ }
+
+ void printSymbolLo(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
+ O << "#HI(";
+ if (MI->getOperand(OpNo).isImm()) {
+ printHexagonImmOperand(MI, OpNo, O);
+ } else {
+ printOp(MI->getOperand(OpNo), O);
+ }
+ O << ")";
+ }
+
+ void printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O);
+
+ void printAddrModeBasePlusOffset(const MachineInstr *MI, int OpNo,
+ raw_ostream &O);
+
+ void printGlobalOperand(const MachineInstr *MI, int OpNo, raw_ostream &O);
+ void printJumpTable(const MachineInstr *MI, int OpNo, raw_ostream &O);
+
+ void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
+
+ static const char *getRegisterName(unsigned RegNo);
+ };
+
+} // end of anonymous namespace
+
+// Include the auto-generated portion of the assembly writer.
+#include "HexagonGenAsmWriter.inc"
+
+
+void HexagonAsmPrinter::EmitAlignment(unsigned NumBits,
+ const GlobalValue *GV) const {
+
+ // For basic block level alignment, use falign.
+ if (!GV) {
+ OutStreamer.EmitRawText(StringRef("\t.falign"));
+ return;
+ }
+
+ AsmPrinter::EmitAlignment(NumBits, GV);
+}
+
+void HexagonAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
+ switch (MO.getType()) {
+ case MachineOperand::MO_Immediate:
+ dbgs() << "printOp() does not handle immediate values\n";
+ abort();
+ return;
+
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ return;
+ case MachineOperand::MO_JumpTableIndex:
+ O << *GetJTISymbol(MO.getIndex());
+ // FIXME: PIC relocation model.
+ return;
+ case MachineOperand::MO_ConstantPoolIndex:
+ O << *GetCPISymbol(MO.getIndex());
+ return;
+ case MachineOperand::MO_ExternalSymbol:
+ O << *GetExternalSymbolSymbol(MO.getSymbolName());
+ return;
+ case MachineOperand::MO_GlobalAddress: {
+ // Computing the address of a global symbol, not calling it.
+ O << *Mang->getSymbol(MO.getGlobal());
+ printOffset(MO.getOffset(), O);
+ return;
+ }
+
+ default:
+ O << "<unknown operand type: " << MO.getType() << ">";
+ return;
+ }
+}
+
+
+//
+// isBlockOnlyReachableByFallthrough - We need to override this since the
+// default AsmPrinter does not print labels for any basic block that
+// is only reachable by a fall through. That works for all cases except
+// for the case in which the basic block is reachable by a fall through but
+// through an indirect from a jump table. In this case, the jump table
+// will contain a label not defined by AsmPrinter.
+//
+bool HexagonAsmPrinter::
+isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
+ if (MBB->hasAddressTaken()) {
+ return false;
+ }
+ return AsmPrinter::isBlockOnlyReachableByFallthrough(MBB);
+}
+
+
+/// PrintAsmOperand - Print out an operand for an inline asm expression.
+///
+bool HexagonAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default: return true; // Unknown modifier.
+ case 'c': // Don't print "$" before a global var name or constant.
+ // Hexagon never has a prefix.
+ printOperand(MI, OpNo, OS);
+ return false;
+ case 'L': // Write second word of DImode reference.
+ // Verify that this operand has two consecutive registers.
+ if (!MI->getOperand(OpNo).isReg() ||
+ OpNo+1 == MI->getNumOperands() ||
+ !MI->getOperand(OpNo+1).isReg())
+ return true;
+ ++OpNo; // Return the high-part.
+ break;
+ case 'I':
+ // Write 'i' if an integer constant, otherwise nothing. Used to print
+ // addi vs add, etc.
+ if (MI->getOperand(OpNo).isImm())
+ OS << "i";
+ return false;
+ }
+ }
+
+ printOperand(MI, OpNo, OS);
+ return false;
+}
+
+bool HexagonAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo, unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0])
+ return true; // Unknown modifier.
+
+ const MachineOperand &Base = MI->getOperand(OpNo);
+ const MachineOperand &Offset = MI->getOperand(OpNo+1);
+
+ if (Base.isReg())
+ printOperand(MI, OpNo, O);
+ else
+ assert(0 && "Unimplemented");
+
+ if (Offset.isImm()) {
+ if (Offset.getImm())
+ O << " + #" << Offset.getImm();
+ }
+ else
+ assert(0 && "Unimplemented");
+
+ return false;
+}
+
+void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ assert(0 && "Unimplemented");
+}
+
+
+/// printMachineInstruction -- Print out a single Hexagon MI in Darwin syntax to
+/// the current output stream.
+///
+void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ SmallString<128> Str;
+ raw_svector_ostream O(Str);
+
+ const MachineFunction* MF = MI->getParent()->getParent();
+ const HexagonMachineFunctionInfo* MFI =
+ (const HexagonMachineFunctionInfo*)
+ MF->getInfo<HexagonMachineFunctionInfo>();
+
+
+
+ // Print a brace for the beginning of the packet.
+ if (MFI->isStartPacket(MI)) {
+ O << "\t{" << '\n';
+ }
+
+ DEBUG( O << "// MI = " << *MI << '\n';);
+
+ // Indent
+ O << "\t";
+
+
+ if (MI->getOpcode() == Hexagon::ENDLOOP0) {
+ if (MFI->isEndPacket(MI) && MFI->isStartPacket(MI)) {
+ O << "\t{ nop }";
+ } else {
+ O << "}";
+ }
+ printInstruction(MI, O);
+ } else if (MI->getOpcode() == Hexagon::STriwt) {
+ //
+ // Handle truncated store on Hexagon.
+ //
+ O << "\tmemw(";
+ printHexagonMEMriOperand(MI, 0, O);
+
+ O << ") = ";
+ unsigned SubRegNum =
+ TM.getRegisterInfo()->getSubReg(MI->getOperand(2)
+ .getReg(), Hexagon::subreg_loreg);
+ const char *SubRegName = getRegisterName(SubRegNum);
+ O << SubRegName << '\n';
+ } else if (MI->getOpcode() == Hexagon::MPYI_rin) {
+ // Handle multipy with -ve constant on Hexagon:
+ // "$dst =- mpyi($src1, #$src2)"
+ printOperand(MI, 0, O);
+ O << " =- mpyi(";
+ printOperand(MI, 1, O);
+ O << ", #";
+ printHexagonNegImmOperand(MI, 2, O);
+ O << ")";
+ } else if (MI->getOpcode() == Hexagon::MEMw_ADDSUBi_indexed_MEM_V4) {
+ //
+ // Handle memw(Rs+u6:2) [+-]= #U5
+ //
+ O << "\tmemw("; printHexagonMEMriOperand(MI, 0, O); O << ") ";
+ int addend = MI->getOperand(2).getImm();
+ if (addend < 0)
+ O << "-= " << "#" << -addend << '\n';
+ else
+ O << "+= " << "#" << addend << '\n';
+ } else if (MI->getOpcode() == Hexagon::MEMw_ADDSUBi_MEM_V4) {
+ //
+ // Handle memw(Rs+u6:2) [+-]= #U5
+ //
+ O << "\tmemw("; printHexagonMEMriOperand(MI, 0, O); O << ") ";
+ int addend = MI->getOperand(2).getImm();
+ if (addend < 0)
+ O << "-= " << "#" << -addend << '\n';
+ else
+ O << "+= " << "#" << addend << '\n';
+ } else if (MI->getOpcode() == Hexagon::MEMh_ADDSUBi_indexed_MEM_V4) {
+ //
+ // Handle memh(Rs+u6:1) [+-]= #U5
+ //
+ O << "\tmemh("; printHexagonMEMriOperand(MI, 0, O); O << ") ";
+ int addend = MI->getOperand(2).getImm();
+ if (addend < 0)
+ O << "-= " << "#" << -addend << '\n';
+ else
+ O << "+= " << "#" << addend << '\n';
+ } else if (MI->getOpcode() == Hexagon::MEMh_ADDSUBi_MEM_V4) {
+ //
+ // Handle memh(Rs+u6:1) [+-]= #U5
+ //
+ O << "\tmemh("; printHexagonMEMriOperand(MI, 0, O); O << ") ";
+ int addend = MI->getOperand(2).getImm();
+ if (addend < 0)
+ O << "-= " << "#" << -addend << '\n';
+ else
+ O << "+= " << "#" << addend << '\n';
+ } else if (MI->getOpcode() == Hexagon::MEMb_ADDSUBi_indexed_MEM_V4) {
+ //
+ // Handle memb(Rs+u6:1) [+-]= #U5
+ //
+ O << "\tmemb("; printHexagonMEMriOperand(MI, 0, O); O << ") ";
+ int addend = MI->getOperand(2).getImm();
+ if (addend < 0)
+ O << "-= " << "#" << -addend << '\n';
+ else
+ O << "+= " << "#" << addend << '\n';
+ } else if (MI->getOpcode() == Hexagon::MEMb_ADDSUBi_MEM_V4) {
+ //
+ // Handle memb(Rs+u6:1) [+-]= #U5
+ //
+ O << "\tmemb("; printHexagonMEMriOperand(MI, 0, O); O << ") ";
+ int addend = MI->getOperand(2).getImm();
+ if (addend < 0)
+ O << "-= " << "#" << -addend << '\n';
+ else
+ O << "+= " << "#" << addend << '\n';
+ } else if (MI->getOpcode() == Hexagon::CMPbGTri_V4) {
+ //
+ // Handle Pd=cmpb.gt(Rs,#s8)
+ //
+ O << "\t";
+ printRegister(MI->getOperand(0), false, O);
+ O << " = cmpb.gt(";
+ printRegister(MI->getOperand(1), false, O);
+ O << ", ";
+ int val = MI->getOperand(2).getImm() >> 24;
+ O << "#" << val << ")" << '\n';
+ } else if (MI->getOpcode() == Hexagon::CMPhEQri_V4) {
+ //
+ // Handle Pd=cmph.eq(Rs,#8)
+ //
+ O << "\t";
+ printRegister(MI->getOperand(0), false, O);
+ O << " = cmph.eq(";
+ printRegister(MI->getOperand(1), false, O);
+ O << ", ";
+ int val = MI->getOperand(2).getImm();
+ assert((((0 <= val) && (val <= 127)) ||
+ ((65408 <= val) && (val <= 65535))) &&
+ "Not in correct range!");
+ if (val >= 65408) val -= 65536;
+ O << "#" << val << ")" << '\n';
+ } else if (MI->getOpcode() == Hexagon::CMPhGTri_V4) {
+ //
+ // Handle Pd=cmph.gt(Rs,#8)
+ //
+ O << "\t";
+ printRegister(MI->getOperand(0), false, O);
+ O << " = cmph.gt(";
+ printRegister(MI->getOperand(1), false, O);
+ O << ", ";
+ int val = MI->getOperand(2).getImm() >> 16;
+ O << "#" << val << ")" << '\n';
+ } else {
+ printInstruction(MI, O);
+ }
+
+ // Print a brace for the end of the packet.
+ if (MFI->isEndPacket(MI) && MI->getOpcode() != Hexagon::ENDLOOP0) {
+ O << "\n\t}" << '\n';
+ }
+
+ if (AlignCalls && MI->getDesc().isCall()) {
+ O << "\n\t.falign" << "\n";
+ }
+
+ OutStreamer.EmitRawText(O.str());
+ return;
+}
+
+/// PrintUnmangledNameSafely - Print out the printable characters in the name.
+/// Don't print things like \n or \0.
+// static void PrintUnmangledNameSafely(const Value *V, raw_ostream &OS) {
+// for (const char *Name = V->getNameStart(), *E = Name+V->getNameLen();
+// Name != E; ++Name)
+// if (isprint(*Name))
+// OS << *Name;
+// }
+
+
+void HexagonAsmPrinter::printAddrModeBasePlusOffset(const MachineInstr *MI,
+ int OpNo, raw_ostream &O) {
+ const MachineOperand &MO1 = MI->getOperand(OpNo);
+ const MachineOperand &MO2 = MI->getOperand(OpNo+1);
+
+ O << getRegisterName(MO1.getReg())
+ << " + #"
+ << MO2.getImm();
+}
+
+
+void HexagonAsmPrinter::printGlobalOperand(const MachineInstr *MI, int OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ assert( (MO.getType() == MachineOperand::MO_GlobalAddress) &&
+ "Expecting global address");
+
+ O << *Mang->getSymbol(MO.getGlobal());
+ if (MO.getOffset() != 0) {
+ O << " + ";
+ O << MO.getOffset();
+ }
+}
+
+void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
+ "Expecting jump table index");
+
+ // Hexagon_TODO: Do we need name mangling?
+ O << *GetJTISymbol(MO.getIndex());
+}
+
+extern "C" void LLVMInitializeHexagonAsmPrinter() {
+ RegisterAsmPrinter<HexagonAsmPrinter> X(TheHexagonTarget);
+}
--- /dev/null
+//===---- HexagonCFGOptimizer.cpp - CFG optimizations ---------------------===//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#define DEBUG_TYPE "hexagon_cfg"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include <iostream>
+
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+namespace {
+
+class HexagonCFGOptimizer : public MachineFunctionPass {
+
+private:
+ HexagonTargetMachine& QTM;
+ const HexagonSubtarget &QST;
+
+ void InvertAndChangeJumpTarget(MachineInstr*, MachineBasicBlock*);
+
+ public:
+ static char ID;
+ HexagonCFGOptimizer(HexagonTargetMachine& TM) : MachineFunctionPass(ID),
+ QTM(TM),
+ QST(*TM.getSubtargetImpl()) {}
+
+ const char *getPassName() const {
+ return "Hexagon CFG Optimizer";
+ }
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+
+char HexagonCFGOptimizer::ID = 0;
+
+static bool IsConditionalBranch(int Opc) {
+ return (Opc == Hexagon::JMP_Pred) || (Opc == Hexagon::JMP_PredNot)
+ || (Opc == Hexagon::JMP_PredPt) || (Opc == Hexagon::JMP_PredNotPt);
+}
+
+
+static bool IsUnconditionalJump(int Opc) {
+ return (Opc == Hexagon::JMP);
+}
+
+
+void
+HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI,
+ MachineBasicBlock* NewTarget) {
+ const HexagonInstrInfo *QII = QTM.getInstrInfo();
+ int NewOpcode = 0;
+ switch(MI->getOpcode()) {
+ case Hexagon::JMP_Pred:
+ NewOpcode = Hexagon::JMP_PredNot;
+ break;
+
+ case Hexagon::JMP_PredNot:
+ NewOpcode = Hexagon::JMP_Pred;
+ break;
+
+ case Hexagon::JMP_PredPt:
+ NewOpcode = Hexagon::JMP_PredNotPt;
+ break;
+
+ case Hexagon::JMP_PredNotPt:
+ NewOpcode = Hexagon::JMP_PredPt;
+ break;
+
+ default:
+ assert(0 && "Cannot handle this case");
+ }
+
+ MI->setDesc(QII->get(NewOpcode));
+ MI->getOperand(1).setMBB(NewTarget);
+}
+
+
+bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+
+ // Traverse the basic block.
+ MachineBasicBlock::iterator MII = MBB->getFirstTerminator();
+ if (MII != MBB->end()) {
+ MachineInstr *MI = MII;
+ int Opc = MI->getOpcode();
+ if (IsConditionalBranch(Opc)) {
+
+ //
+ // (Case 1) Transform the code if the following condition occurs:
+ // BB1: if (p0) jump BB3
+ // ...falls-through to BB2 ...
+ // BB2: jump BB4
+ // ...next block in layout is BB3...
+ // BB3: ...
+ //
+ // Transform this to:
+ // BB1: if (!p0) jump BB4
+ // Remove BB2
+ // BB3: ...
+ //
+ // (Case 2) A variation occurs when BB3 contains a JMP to BB4:
+ // BB1: if (p0) jump BB3
+ // ...falls-through to BB2 ...
+ // BB2: jump BB4
+ // ...other basic blocks ...
+ // BB4:
+ // ...not a fall-thru
+ // BB3: ...
+ // jump BB4
+ //
+ // Transform this to:
+ // BB1: if (!p0) jump BB4
+ // Remove BB2
+ // BB3: ...
+ // BB4: ...
+ //
+ unsigned NumSuccs = MBB->succ_size();
+ MachineBasicBlock::succ_iterator SI = MBB->succ_begin();
+ MachineBasicBlock* FirstSucc = *SI;
+ MachineBasicBlock* SecondSucc = *(++SI);
+ MachineBasicBlock* LayoutSucc = NULL;
+ MachineBasicBlock* JumpAroundTarget = NULL;
+
+ if (MBB->isLayoutSuccessor(FirstSucc)) {
+ LayoutSucc = FirstSucc;
+ JumpAroundTarget = SecondSucc;
+ } else if (MBB->isLayoutSuccessor(SecondSucc)) {
+ LayoutSucc = SecondSucc;
+ JumpAroundTarget = FirstSucc;
+ } else {
+ // Odd case...cannot handle.
+ }
+
+ // The target of the unconditional branch must be JumpAroundTarget.
+ // TODO: If not, we should not invert the unconditional branch.
+ MachineBasicBlock* CondBranchTarget = NULL;
+ if ((MI->getOpcode() == Hexagon::JMP_Pred) ||
+ (MI->getOpcode() == Hexagon::JMP_PredNot)) {
+ CondBranchTarget = MI->getOperand(1).getMBB();
+ }
+
+ if (!LayoutSucc || (CondBranchTarget != JumpAroundTarget)) {
+ continue;
+ }
+
+ if ((NumSuccs == 2) && LayoutSucc && (LayoutSucc->pred_size() == 1)) {
+
+ // Ensure that BB2 has one instruction -- an unconditional jump.
+ if ((LayoutSucc->size() == 1) &&
+ IsUnconditionalJump(LayoutSucc->front().getOpcode())) {
+ MachineBasicBlock* UncondTarget =
+ LayoutSucc->front().getOperand(0).getMBB();
+ // Check if the layout successor of BB2 is BB3.
+ bool case1 = LayoutSucc->isLayoutSuccessor(JumpAroundTarget);
+ bool case2 = JumpAroundTarget->isSuccessor(UncondTarget) &&
+ JumpAroundTarget->size() >= 1 &&
+ IsUnconditionalJump(JumpAroundTarget->back().getOpcode()) &&
+ JumpAroundTarget->pred_size() == 1 &&
+ JumpAroundTarget->succ_size() == 1;
+
+ if (case1 || case2) {
+ InvertAndChangeJumpTarget(MI, UncondTarget);
+ MBB->removeSuccessor(JumpAroundTarget);
+ MBB->addSuccessor(UncondTarget);
+
+ // Remove the unconditional branch in LayoutSucc.
+ LayoutSucc->erase(LayoutSucc->begin());
+ LayoutSucc->removeSuccessor(UncondTarget);
+ LayoutSucc->addSuccessor(JumpAroundTarget);
+
+ // This code performs the conversion for case 2, which moves
+ // the block to the fall-thru case (BB3 in the code above).
+ if (case2 && !case1) {
+ JumpAroundTarget->moveAfter(LayoutSucc);
+ // only move a block if it doesn't have a fall-thru. otherwise
+ // the CFG will be incorrect.
+ if (!UncondTarget->canFallThrough()) {
+ UncondTarget->moveAfter(JumpAroundTarget);
+ }
+ }
+
+ //
+ // Correct live-in information. Is used by post-RA scheduler
+ // The live-in to LayoutSucc is now all values live-in to
+ // JumpAroundTarget.
+ //
+ std::vector<unsigned> OrigLiveIn(LayoutSucc->livein_begin(),
+ LayoutSucc->livein_end());
+ std::vector<unsigned> NewLiveIn(JumpAroundTarget->livein_begin(),
+ JumpAroundTarget->livein_end());
+ for (unsigned i = 0; i < OrigLiveIn.size(); ++i) {
+ LayoutSucc->removeLiveIn(OrigLiveIn[i]);
+ }
+ for (unsigned i = 0; i < NewLiveIn.size(); ++i) {
+ LayoutSucc->addLiveIn(NewLiveIn[i]);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+}
+
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonCFGOptimizer(HexagonTargetMachine &TM) {
+ return new HexagonCFGOptimizer(TM);
+}
--- /dev/null
+//===- HexagonCallingConv.td - Calling Conventions Hexagon -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the Hexagon architectures.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Return Value Calling Conventions
+//===----------------------------------------------------------------------===//
+
+// Hexagon 32-bit C return-value convention.
+def RetCC_Hexagon32 : CallingConv<[
+ CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
+ CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
+
+ // Alternatively, they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
+
+// Hexagon 32-bit C Calling convention.
+def CC_Hexagon32 : CallingConv<[
+ // All arguments get passed in integer registers if there is space.
+ CCIfType<[i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
+ CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
+
+ // Alternatively, they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
--- /dev/null
+//===-- llvm/CallingConvLower.cpp - Calling Convention lowering -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Hexagon_CCState class, used for lowering and
+// implementing calling conventions. Adapted from the machine independent
+// version of the class (CCState) but this handles calls to varargs functions
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonCallingConvLower.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "Hexagon.h"
+using namespace llvm;
+
+Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg,
+ const TargetMachine &tm,
+ SmallVector<CCValAssign, 16> &locs,
+ LLVMContext &c)
+ : CallingConv(CC), IsVarArg(isVarArg), TM(tm),
+ TRI(*TM.getRegisterInfo()), Locs(locs), Context(c) {
+ // No stack is used.
+ StackOffset = 0;
+
+ UsedRegs.resize((TRI.getNumRegs()+31)/32);
+}
+
+// HandleByVal - Allocate a stack slot large enough to pass an argument by
+// value. The size and alignment information of the argument is encoded in its
+// parameter attribute.
+void Hexagon_CCState::HandleByVal(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign,
+ ISD::ArgFlagsTy ArgFlags) {
+ unsigned Align = ArgFlags.getByValAlign();
+ unsigned Size = ArgFlags.getByValSize();
+ if (MinSize > (int)Size)
+ Size = MinSize;
+ if (MinAlign > (int)Align)
+ Align = MinAlign;
+ unsigned Offset = AllocateStack(Size, Align);
+
+ addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset,
+ LocVT.getSimpleVT(), LocInfo));
+}
+
+/// MarkAllocated - Mark a register and all of its aliases as allocated.
+void Hexagon_CCState::MarkAllocated(unsigned Reg) {
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
+
+ if (const unsigned *RegAliases = TRI.getAliasSet(Reg))
+ for (; (Reg = *RegAliases); ++RegAliases)
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
+}
+
+/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
+/// incorporating info about the formals into this state.
+void
+Hexagon_CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg>
+ &Ins,
+ Hexagon_CCAssignFn Fn,
+ unsigned SretValueInRegs) {
+ unsigned NumArgs = Ins.size();
+ unsigned i = 0;
+
+ // If the function returns a small struct in registers, skip
+ // over the first (dummy) argument.
+ if (SretValueInRegs != 0) {
+ ++i;
+ }
+
+
+ for (; i != NumArgs; ++i) {
+ EVT ArgVT = Ins[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this, 0, 0, false)) {
+ dbgs() << "Formal argument #" << i << " has unhandled type "
+ << ArgVT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
+/// incorporating info about the result values into this state.
+void
+Hexagon_CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ Hexagon_CCAssignFn Fn,
+ unsigned SretValueInRegs) {
+
+ // For Hexagon, Return small structures in registers.
+ if (SretValueInRegs != 0) {
+ if (SretValueInRegs <= 32) {
+ unsigned Reg = Hexagon::R0;
+ addLoc(CCValAssign::getReg(0, MVT::i32, Reg, MVT::i32,
+ CCValAssign::Full));
+ return;
+ }
+ if (SretValueInRegs <= 64) {
+ unsigned Reg = Hexagon::D0;
+ addLoc(CCValAssign::getReg(0, MVT::i64, Reg, MVT::i64,
+ CCValAssign::Full));
+ return;
+ }
+ }
+
+
+ // Determine which register each value should be copied into.
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ EVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this, -1, -1, false)){
+ dbgs() << "Return operand #" << i << " has unhandled type "
+ << VT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+
+/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
+/// about the passed values into this state.
+void
+Hexagon_CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg>
+ &Outs,
+ Hexagon_CCAssignFn Fn,
+ int NonVarArgsParams,
+ unsigned SretValueSize) {
+ unsigned NumOps = Outs.size();
+
+ unsigned i = 0;
+ // If the called function returns a small struct in registers, skip
+ // the first actual parameter. We do not want to pass a pointer to
+ // the stack location.
+ if (SretValueSize != 0) {
+ ++i;
+ }
+
+ for (; i != NumOps; ++i) {
+ EVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this,
+ NonVarArgsParams, i+1, false)) {
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeCallOperands - Same as above except it takes vectors of types
+/// and argument flags.
+void
+Hexagon_CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ Hexagon_CCAssignFn Fn) {
+ unsigned NumOps = ArgVTs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ EVT ArgVT = ArgVTs[i];
+ ISD::ArgFlagsTy ArgFlags = Flags[i];
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this, -1, -1,
+ false)) {
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
+/// incorporating info about the passed values into this state.
+void
+Hexagon_CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ Hexagon_CCAssignFn Fn,
+ unsigned SretValueInRegs) {
+
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ EVT VT = Ins[i].VT;
+ ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
+ if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this, -1, -1, false)) {
+ dbgs() << "Call result #" << i << " has unhandled type "
+ << VT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeCallResult - Same as above except it's specialized for calls which
+/// produce a single value.
+void Hexagon_CCState::AnalyzeCallResult(EVT VT, Hexagon_CCAssignFn Fn) {
+ if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this, -1, -1,
+ false)) {
+ dbgs() << "Call result has unhandled type "
+ << VT.getEVTString() << "\n";
+ abort();
+ }
+}
--- /dev/null
+//===-- HexagonCallingConvLower.h - Calling Conventions ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Hexagon_CCState class, used for lowering
+// and implementing calling conventions. Adapted from the target independent
+// version but this handles calls to varargs functions
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_Hexagon_CODEGEN_CALLINGCONVLOWER_H
+#define LLVM_Hexagon_CODEGEN_CALLINGCONVLOWER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+
+//
+// Need to handle varargs.
+//
+namespace llvm {
+ class TargetRegisterInfo;
+ class TargetMachine;
+ class Hexagon_CCState;
+ class SDNode;
+
+
+/// Hexagon_CCAssignFn - This function assigns a location for Val, updating
+/// State to reflect the change.
+typedef bool Hexagon_CCAssignFn(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, Hexagon_CCState &State,
+ int NonVarArgsParams,
+ int CurrentParam,
+ bool ForceMem);
+
+
+/// CCState - This class holds information needed while lowering arguments and
+/// return values. It captures which registers are already assigned and which
+/// stack slots are used. It provides accessors to allocate these values.
+class Hexagon_CCState {
+ CallingConv::ID CallingConv;
+ bool IsVarArg;
+ const TargetMachine &TM;
+ const TargetRegisterInfo &TRI;
+ SmallVector<CCValAssign, 16> &Locs;
+ LLVMContext &Context;
+
+ unsigned StackOffset;
+ SmallVector<uint32_t, 16> UsedRegs;
+public:
+ Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
+ SmallVector<CCValAssign, 16> &locs, LLVMContext &c);
+
+ void addLoc(const CCValAssign &V) {
+ Locs.push_back(V);
+ }
+
+ LLVMContext &getContext() const { return Context; }
+ const TargetMachine &getTarget() const { return TM; }
+ unsigned getCallingConv() const { return CallingConv; }
+ bool isVarArg() const { return IsVarArg; }
+
+ unsigned getNextStackOffset() const { return StackOffset; }
+
+ /// isAllocated - Return true if the specified register (or an alias) is
+ /// allocated.
+ bool isAllocated(unsigned Reg) const {
+ return UsedRegs[Reg/32] & (1 << (Reg&31));
+ }
+
+ /// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
+ /// incorporating info about the formals into this state.
+ void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+ Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
+
+ /// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
+ /// incorporating info about the result values into this state.
+ void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
+
+ /// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
+ /// about the passed values into this state.
+ void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ Hexagon_CCAssignFn Fn, int NonVarArgsParams,
+ unsigned SretValueSize);
+
+ /// AnalyzeCallOperands - Same as above except it takes vectors of types
+ /// and argument flags.
+ void AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ Hexagon_CCAssignFn Fn);
+
+ /// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
+ /// incorporating info about the passed values into this state.
+ void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
+
+ /// AnalyzeCallResult - Same as above except it's specialized for calls which
+ /// produce a single value.
+ void AnalyzeCallResult(EVT VT, Hexagon_CCAssignFn Fn);
+
+ /// getFirstUnallocated - Return the first unallocated register in the set, or
+ /// NumRegs if they are all allocated.
+ unsigned getFirstUnallocated(const unsigned *Regs, unsigned NumRegs) const {
+ for (unsigned i = 0; i != NumRegs; ++i)
+ if (!isAllocated(Regs[i]))
+ return i;
+ return NumRegs;
+ }
+
+ /// AllocateReg - Attempt to allocate one register. If it is not available,
+ /// return zero. Otherwise, return the register, marking it and any aliases
+ /// as allocated.
+ unsigned AllocateReg(unsigned Reg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with extra register to be shadowed.
+ unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateReg - Attempt to allocate one of the specified registers. If none
+ /// are available, return zero. Otherwise, return the first one available,
+ /// marking it and any aliases as allocated.
+ unsigned AllocateReg(const unsigned *Regs, unsigned NumRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
+ if (FirstUnalloc == NumRegs)
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc];
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with list of registers to be shadowed.
+ unsigned AllocateReg(const unsigned *Regs, const unsigned *ShadowRegs,
+ unsigned NumRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
+ if (FirstUnalloc == NumRegs)
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateStack - Allocate a chunk of stack space with the specified size
+ /// and alignment.
+ unsigned AllocateStack(unsigned Size, unsigned Align) {
+ assert(Align && ((Align-1) & Align) == 0); // Align is power of 2.
+ StackOffset = ((StackOffset + Align-1) & ~(Align-1));
+ unsigned Result = StackOffset;
+ StackOffset += Size;
+ return Result;
+ }
+
+ // HandleByVal - Allocate a stack slot large enough to pass an argument by
+ // value. The size and alignment information of the argument is encoded in its
+ // parameter attribute.
+ void HandleByVal(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
+
+private:
+ /// MarkAllocated - Mark a register and all of its aliases as allocated.
+ void MarkAllocated(unsigned Reg);
+};
+
+
+
+} // end namespace llvm
+
+#endif
--- /dev/null
+//===--- HexagonExpandPredSpillCode.cpp - Expand Predicate Spill Code ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===////
+// The Hexagon processor has no instructions that load or store predicate
+// registers directly. So, when these registers must be spilled a general
+// purpose register must be found and the value copied to/from it from/to
+// the predicate register. This code currently does not use the register
+// scavenger mechanism available in the allocator. There are two registers
+// reserved to allow spilling/restoring predicate registers. One is used to
+// hold the predicate value. The other is used when stack frame offsets are
+// too large.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include <map>
+#include <iostream>
+
+#include "llvm/Support/CommandLine.h"
+
+
+using namespace llvm;
+
+
+namespace {
+
+class HexagonExpandPredSpillCode : public MachineFunctionPass {
+ HexagonTargetMachine& QTM;
+ const HexagonSubtarget &QST;
+
+ public:
+ static char ID;
+ HexagonExpandPredSpillCode(HexagonTargetMachine& TM) :
+ MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {}
+
+ const char *getPassName() const {
+ return "Hexagon Expand Predicate Spill Code";
+ }
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+
+char HexagonExpandPredSpillCode::ID = 0;
+
+
+bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
+
+ const HexagonInstrInfo *TII = QTM.getInstrInfo();
+ const HexagonRegisterInfo *RegInfo = QTM.getRegisterInfo();
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+ // Traverse the basic block.
+ for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
+ ++MII) {
+ MachineInstr *MI = MII;
+ int Opc = MI->getOpcode();
+ if (Opc == Hexagon::STriw_pred) {
+ // STriw_pred [R30], ofst, SrcReg;
+ unsigned FP = MI->getOperand(0).getReg();
+ assert(FP == RegInfo->getFrameRegister() &&
+ "Not a Frame Pointer, Nor a Spill Slot");
+ assert(MI->getOperand(1).isImm() && "Not an offset");
+ int Offset = MI->getOperand(1).getImm();
+ int SrcReg = MI->getOperand(2).getReg();
+ assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
+ "Not a predicate register");
+ if (!TII->isValidOffset(Hexagon::STriw, Offset)) {
+ if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::CONST32_Int_Real),
+ HEXAGON_RESERVED_REG_1).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_rr),
+ HEXAGON_RESERVED_REG_1)
+ .addReg(FP).addReg(HEXAGON_RESERVED_REG_1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ HEXAGON_RESERVED_REG_2).addReg(SrcReg);
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::STriw))
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0).addReg(HEXAGON_RESERVED_REG_2);
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_ri),
+ HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ HEXAGON_RESERVED_REG_2).addReg(SrcReg);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw))
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0)
+ .addReg(HEXAGON_RESERVED_REG_2);
+ }
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ HEXAGON_RESERVED_REG_2).addReg(SrcReg);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)).
+ addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ } else if (Opc == Hexagon::LDriw_pred) {
+ // DstReg = LDriw_pred [R30], ofst.
+ int DstReg = MI->getOperand(0).getReg();
+ assert(Hexagon::PredRegsRegClass.contains(DstReg) &&
+ "Not a predicate register");
+ unsigned FP = MI->getOperand(1).getReg();
+ assert(FP == RegInfo->getFrameRegister() &&
+ "Not a Frame Pointer, Nor a Spill Slot");
+ assert(MI->getOperand(2).isImm() && "Not an offset");
+ int Offset = MI->getOperand(2).getImm();
+ if (!TII->isValidOffset(Hexagon::LDriw, Offset)) {
+ if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::CONST32_Int_Real),
+ HEXAGON_RESERVED_REG_1).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_rr),
+ HEXAGON_RESERVED_REG_1)
+ .addReg(FP)
+ .addReg(HEXAGON_RESERVED_REG_1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ HEXAGON_RESERVED_REG_2)
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ DstReg).addReg(HEXAGON_RESERVED_REG_2);
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_ri),
+ HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ HEXAGON_RESERVED_REG_2)
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ DstReg).addReg(HEXAGON_RESERVED_REG_2);
+ }
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ HEXAGON_RESERVED_REG_2).addReg(FP).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ DstReg).addReg(HEXAGON_RESERVED_REG_2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ }
+ }
+ }
+
+ return true;
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonExpandPredSpillCode(HexagonTargetMachine &TM) {
+ return new HexagonExpandPredSpillCode(TM);
+}
--- /dev/null
+//==-- HexagonFrameLowering.cpp - Define frame lowering --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+//===----------------------------------------------------------------------===//
+#include "Hexagon.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonFrameLowering.h"
+
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <iostream>
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Function.h"
+using namespace llvm;
+
+static cl::opt<bool> DisableDeallocRet(
+ "disable-hexagon-dealloc-ret",
+ cl::Hidden,
+ cl::desc("Disable Dealloc Return for Hexagon target"));
+
+/// determineFrameLayout - Determine the size of the frame and maximum call
+/// frame size.
+void HexagonFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ unsigned FrameSize = MFI->getStackSize();
+
+ // Get the alignments provided by the target.
+ unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
+ // Get the maximum call frame size of all the calls.
+ unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
+
+ // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
+ // that allocations will be aligned.
+ if (MFI->hasVarSizedObjects())
+ maxCallFrameSize = RoundUpToAlignment(maxCallFrameSize, TargetAlign);
+
+ // Update maximum call frame size.
+ MFI->setMaxCallFrameSize(maxCallFrameSize);
+
+ // Include call frame size in total.
+ FrameSize += maxCallFrameSize;
+
+ // Make sure the frame is aligned.
+ FrameSize = RoundUpToAlignment(FrameSize, TargetAlign);
+
+ // Update frame info.
+ MFI->setStackSize(FrameSize);
+}
+
+
+void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ const HexagonRegisterInfo *QRI =
+ static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo());
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ determineFrameLayout(MF);
+
+ // Check if frame moves are needed for EH.
+ bool needsFrameMoves = MMI.hasDebugInfo() ||
+ !MF.getFunction()->needsUnwindTableEntry();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ int NumBytes = (int) MFI->getStackSize();
+
+ // LLVM expects allocframe not to be the first instruction in the
+ // basic block.
+ MachineBasicBlock::iterator InsertPt = MBB.begin();
+
+ //
+ // ALLOCA adjust regs. Iterate over ADJDYNALLOC nodes and change the offset.
+ //
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+ const std::vector<MachineInstr*>& AdjustRegs =
+ FuncInfo->getAllocaAdjustInsts();
+ for (std::vector<MachineInstr*>::const_iterator i = AdjustRegs.begin(),
+ e = AdjustRegs.end();
+ i != e; ++i) {
+ MachineInstr* MI = *i;
+ assert((MI->getOpcode() == Hexagon::ADJDYNALLOC) &&
+ "Expected adjust alloca node");
+
+ MachineOperand& MO = MI->getOperand(2);
+ assert(MO.isImm() && "Expected immediate");
+ MO.setImm(MFI->getMaxCallFrameSize());
+ }
+
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+
+ if (needsFrameMoves) {
+ // Advance CFA. DW_CFA_def_cfa
+ unsigned FPReg = QRI->getFrameRegister();
+ unsigned RAReg = QRI->getRARegister();
+
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(FPReg, -8);
+ Moves.push_back(MachineMove(0, Dst, Src));
+
+ // R31 = (R31 - #4)
+ MachineLocation LRDst(RAReg, -4);
+ MachineLocation LRSrc(RAReg);
+ Moves.push_back(MachineMove(0, LRDst, LRSrc));
+
+ // R30 = (R30 - #8)
+ MachineLocation SPDst(FPReg, -8);
+ MachineLocation SPSrc(FPReg);
+ Moves.push_back(MachineMove(0, SPDst, SPSrc));
+ }
+
+ //
+ // Only insert ALLOCFRAME if we need to.
+ //
+ if (hasFP(MF)) {
+ // Check for overflow.
+ // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used?
+ const unsigned int ALLOCFRAME_MAX = 16384;
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ if (NumBytes >= ALLOCFRAME_MAX) {
+ // Emit allocframe(#0).
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::ALLOCFRAME)).addImm(0);
+
+ // Subtract offset from frame pointer.
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::CONST32_Int_Real),
+ HEXAGON_RESERVED_REG_1).addImm(NumBytes);
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::SUB_rr),
+ QRI->getStackRegister()).
+ addReg(QRI->getStackRegister()).
+ addReg(HEXAGON_RESERVED_REG_1);
+ } else {
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::ALLOCFRAME)).addImm(NumBytes);
+ }
+ }
+}
+// Returns true if MBB has a machine instructions that indicates a tail call
+// in the block.
+bool HexagonFrameLowering::hasTailCall(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ unsigned RetOpcode = MBBI->getOpcode();
+
+ return RetOpcode == Hexagon::TCRETURNtg || RetOpcode == Hexagon::TCRETURNtext;}
+
+void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ DebugLoc dl = MBBI->getDebugLoc();
+ //
+ // Only insert deallocframe if we need to.
+ //
+ if (hasFP(MF)) {
+ MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ MachineBasicBlock::iterator MBBI_end = MBB.end();
+ //
+ // For Hexagon, we don't need the frame size.
+ //
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ int NumBytes = (int) MFI->getStackSize();
+
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ // Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
+ // versions.
+ if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPR
+ && !DisableDeallocRet) {
+ // Remove jumpr node.
+ MBB.erase(MBBI);
+ // Add dealloc_return.
+ BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4))
+ .addImm(NumBytes);
+ } else { // Add deallocframe for V2 and V3.
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME)).addImm(NumBytes);
+ }
+ }
+}
+
+bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+ return (MFI->hasCalls() || (MFI->getStackSize() > 0) ||
+ FuncInfo->hasClobberLR() );
+}
+
+bool
+HexagonFrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ if (CSI.empty()) {
+ return false;
+ }
+
+ // We can only schedule double loads if we spill contiguous callee-saved regs
+ // For instance, we cannot scheduled double-word loads if we spill r24,
+ // r26, and r27.
+ // Hexagon_TODO: We can try to double-word align odd registers for -O2 and
+ // above.
+ bool ContiguousRegs = true;
+
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ //
+ // Check if we can use a double-word store.
+ //
+ const unsigned* SuperReg = TRI->getSuperRegisters(Reg);
+
+ // Assume that there is exactly one superreg.
+ assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg");
+ bool CanUseDblStore = false;
+ const TargetRegisterClass* SuperRegClass = 0;
+
+ if (ContiguousRegs && (i < CSI.size()-1)) {
+ const unsigned* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg());
+ assert(SuperRegNext[0] && !SuperRegNext[1] &&
+ "Expected exactly one superreg");
+ SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]);
+ CanUseDblStore = (SuperRegNext[0] == SuperReg[0]);
+ }
+
+
+ if (CanUseDblStore) {
+ TII.storeRegToStackSlot(MBB, MI, SuperReg[0], true,
+ CSI[i+1].getFrameIdx(), SuperRegClass, TRI);
+ MBB.addLiveIn(SuperReg[0]);
+ ++i;
+ } else {
+ // Cannot use a double-word store.
+ ContiguousRegs = false;
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(), RC,
+ TRI);
+ MBB.addLiveIn(Reg);
+ }
+ }
+ return true;
+}
+
+
+bool HexagonFrameLowering::restoreCalleeSavedRegisters(
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ if (CSI.empty()) {
+ return false;
+ }
+
+ // We can only schedule double loads if we spill contiguous callee-saved regs
+ // For instance, we cannot scheduled double-word loads if we spill r24,
+ // r26, and r27.
+ // Hexagon_TODO: We can try to double-word align odd registers for -O2 and
+ // above.
+ bool ContiguousRegs = true;
+
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ //
+ // Check if we can use a double-word load.
+ //
+ const unsigned* SuperReg = TRI->getSuperRegisters(Reg);
+ const TargetRegisterClass* SuperRegClass = 0;
+
+ // Assume that there is exactly one superreg.
+ assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg");
+ bool CanUseDblLoad = false;
+ if (ContiguousRegs && (i < CSI.size()-1)) {
+ const unsigned* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg());
+ assert(SuperRegNext[0] && !SuperRegNext[1] &&
+ "Expected exactly one superreg");
+ SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]);
+ CanUseDblLoad = (SuperRegNext[0] == SuperReg[0]);
+ }
+
+
+ if (CanUseDblLoad) {
+ TII.loadRegFromStackSlot(MBB, MI, SuperReg[0], CSI[i+1].getFrameIdx(),
+ SuperRegClass, TRI);
+ MBB.addLiveIn(SuperReg[0]);
+ ++i;
+ } else {
+ // Cannot use a double-word load.
+ ContiguousRegs = false;
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
+ MBB.addLiveIn(Reg);
+ }
+ }
+ return true;
+}
+
+int HexagonFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ return MF.getFrameInfo()->getObjectOffset(FI);
+}
--- /dev/null
+//=- HexagonFrameLowering.h - Define frame lowering for Hexagon --*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGON_FRAMEINFO_H
+#define HEXAGON_FRAMEINFO_H
+
+#include "Hexagon.h"
+#include "HexagonSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+
+class HexagonFrameLowering : public TargetFrameLowering {
+private:
+ const HexagonSubtarget &STI;
+ void determineFrameLayout(MachineFunction &MF) const;
+
+public:
+ explicit HexagonFrameLowering(const HexagonSubtarget &sti)
+ : TargetFrameLowering(StackGrowsDown, 8, 0), STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ virtual bool
+ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ virtual bool
+ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+ bool hasFP(const MachineFunction &MF) const;
+ bool hasTailCall(MachineBasicBlock &MBB) const;
+};
+
+} // End llvm namespace
+
+#endif
--- /dev/null
+//===-- HexagonHardwareLoops.cpp - Identify and generate hardware loops ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies loops where we can generate the Hexagon hardware
+// loop instruction. The hardware loop can perform loop branches with a
+// zero-cycle overhead.
+//
+// The pattern that defines the induction variable can changed depending on
+// prior optimizations. For example, the IndVarSimplify phase run by 'opt'
+// normalizes induction variables, and the Loop Strength Reduction pass
+// run by 'llc' may also make changes to the induction variable.
+// The pattern detected by this phase is due to running Strength Reduction.
+//
+// Criteria for hardware loops:
+// - Countable loops (w/ ind. var for a trip count)
+// - Assumes loops are normalized by IndVarSimplify
+// - Try inner-most loops first
+// - No nested hardware loops.
+// - No function calls in loops.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hwloops"
+#include "llvm/Constants.h"
+#include "llvm/PassSupport.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include <algorithm>
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+
+using namespace llvm;
+
+STATISTIC(NumHWLoops, "Number of loops converted to hardware loops");
+
+namespace {
+ class CountValue;
+ struct HexagonHardwareLoops : public MachineFunctionPass {
+ MachineLoopInfo *MLI;
+ MachineRegisterInfo *MRI;
+ const TargetInstrInfo *TII;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+
+ HexagonHardwareLoops() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const { return "Hexagon Hardware Loops"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ /// getCanonicalInductionVariable - Check to see if the loop has a canonical
+ /// induction variable.
+ /// Should be defined in MachineLoop. Based upon version in class Loop.
+ const MachineInstr *getCanonicalInductionVariable(MachineLoop *L) const;
+
+ /// getTripCount - Return a loop-invariant LLVM register indicating the
+ /// number of times the loop will be executed. If the trip-count cannot
+ /// be determined, this return null.
+ CountValue *getTripCount(MachineLoop *L) const;
+
+ /// isInductionOperation - Return true if the instruction matches the
+ /// pattern for an opertion that defines an induction variable.
+ bool isInductionOperation(const MachineInstr *MI, unsigned IVReg) const;
+
+ /// isInvalidOperation - Return true if the instruction is not valid within
+ /// a hardware loop.
+ bool isInvalidLoopOperation(const MachineInstr *MI) const;
+
+ /// containsInavlidInstruction - Return true if the loop contains an
+ /// instruction that inhibits using the hardware loop.
+ bool containsInvalidInstruction(MachineLoop *L) const;
+
+ /// converToHardwareLoop - Given a loop, check if we can convert it to a
+ /// hardware loop. If so, then perform the conversion and return true.
+ bool convertToHardwareLoop(MachineLoop *L);
+
+ };
+
+ char HexagonHardwareLoops::ID = 0;
+
+
+ // CountValue class - Abstraction for a trip count of a loop. A
+ // smaller vesrsion of the MachineOperand class without the concerns
+ // of changing the operand representation.
+ class CountValue {
+ public:
+ enum CountValueType {
+ CV_Register,
+ CV_Immediate
+ };
+ private:
+ CountValueType Kind;
+ union Values {
+ unsigned RegNum;
+ int64_t ImmVal;
+ Values(unsigned r) : RegNum(r) {}
+ Values(int64_t i) : ImmVal(i) {}
+ } Contents;
+ bool isNegative;
+
+ public:
+ CountValue(unsigned r, bool neg) : Kind(CV_Register), Contents(r),
+ isNegative(neg) {}
+ explicit CountValue(int64_t i) : Kind(CV_Immediate), Contents(i),
+ isNegative(i < 0) {}
+ CountValueType getType() const { return Kind; }
+ bool isReg() const { return Kind == CV_Register; }
+ bool isImm() const { return Kind == CV_Immediate; }
+ bool isNeg() const { return isNegative; }
+
+ unsigned getReg() const {
+ assert(isReg() && "Wrong CountValue accessor");
+ return Contents.RegNum;
+ }
+ void setReg(unsigned Val) {
+ Contents.RegNum = Val;
+ }
+ int64_t getImm() const {
+ assert(isImm() && "Wrong CountValue accessor");
+ if (isNegative) {
+ return -Contents.ImmVal;
+ }
+ return Contents.ImmVal;
+ }
+ void setImm(int64_t Val) {
+ Contents.ImmVal = Val;
+ }
+
+ void print(raw_ostream &OS, const TargetMachine *TM = 0) const {
+ if (isReg()) { OS << PrintReg(getReg()); }
+ if (isImm()) { OS << getImm(); }
+ }
+ };
+
+ struct HexagonFixupHwLoops : public MachineFunctionPass {
+ public:
+ static char ID; // Pass identification, replacement for typeid.
+
+ HexagonFixupHwLoops() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const { return "Hexagon Hardware Loop Fixup"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ /// Maximum distance between the loop instr and the basic block.
+ /// Just an estimate.
+ static const unsigned MAX_LOOP_DISTANCE = 200;
+
+ /// fixupLoopInstrs - Check the offset between each loop instruction and
+ /// the loop basic block to determine if we can use the LOOP instruction
+ /// or if we need to set the LC/SA registers explicitly.
+ bool fixupLoopInstrs(MachineFunction &MF);
+
+ /// convertLoopInstr - Add the instruction to set the LC and SA registers
+ /// explicitly.
+ void convertLoopInstr(MachineFunction &MF,
+ MachineBasicBlock::iterator &MII,
+ RegScavenger &RS);
+
+ };
+
+ char HexagonFixupHwLoops::ID = 0;
+
+} // end anonymous namespace
+
+
+/// isHardwareLoop - Returns true if the instruction is a hardware loop
+/// instruction.
+static bool isHardwareLoop(const MachineInstr *MI) {
+ return MI->getOpcode() == Hexagon::LOOP0_r ||
+ MI->getOpcode() == Hexagon::LOOP0_i;
+}
+
+/// isCompareEquals - Returns true if the instruction is a compare equals
+/// instruction with an immediate operand.
+static bool isCompareEqualsImm(const MachineInstr *MI) {
+ return MI->getOpcode() == Hexagon::CMPEQri;
+}
+
+
+/// createHexagonHardwareLoops - Factory for creating
+/// the hardware loop phase.
+FunctionPass *llvm::createHexagonHardwareLoops() {
+ return new HexagonHardwareLoops();
+}
+
+
+bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
+
+ bool Changed = false;
+
+ // get the loop information
+ MLI = &getAnalysis<MachineLoopInfo>();
+ // get the register information
+ MRI = &MF.getRegInfo();
+ // the target specific instructio info.
+ TII = MF.getTarget().getInstrInfo();
+
+ for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
+ I != E; ++I) {
+ MachineLoop *L = *I;
+ if (!L->getParentLoop()) {
+ Changed |= convertToHardwareLoop(L);
+ }
+ }
+
+ return Changed;
+}
+
+/// getCanonicalInductionVariable - Check to see if the loop has a canonical
+/// induction variable. We check for a simple recurrence pattern - an
+/// integer recurrence that decrements by one each time through the loop and
+/// ends at zero. If so, return the phi node that corresponds to it.
+///
+/// Based upon the similar code in LoopInfo except this code is specific to
+/// the machine.
+/// This method assumes that the IndVarSimplify pass has been run by 'opt'.
+///
+const MachineInstr
+*HexagonHardwareLoops::getCanonicalInductionVariable(MachineLoop *L) const {
+ MachineBasicBlock *TopMBB = L->getTopBlock();
+ MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
+ assert(PI != TopMBB->pred_end() &&
+ "Loop must have more than one incoming edge!");
+ MachineBasicBlock *Backedge = *PI++;
+ if (PI == TopMBB->pred_end()) return 0; // dead loop
+ MachineBasicBlock *Incoming = *PI++;
+ if (PI != TopMBB->pred_end()) return 0; // multiple backedges?
+
+ // make sure there is one incoming and one backedge and determine which
+ // is which.
+ if (L->contains(Incoming)) {
+ if (L->contains(Backedge))
+ return 0;
+ std::swap(Incoming, Backedge);
+ } else if (!L->contains(Backedge))
+ return 0;
+
+ // Loop over all of the PHI nodes, looking for a canonical induction variable:
+ // - The PHI node is "reg1 = PHI reg2, BB1, reg3, BB2".
+ // - The recurrence comes from the backedge.
+ // - the definition is an induction operatio.n
+ for (MachineBasicBlock::iterator I = TopMBB->begin(), E = TopMBB->end();
+ I != E && I->isPHI(); ++I) {
+ const MachineInstr *MPhi = &*I;
+ unsigned DefReg = MPhi->getOperand(0).getReg();
+ for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
+ // Check each operand for the value from the backedge.
+ MachineBasicBlock *MBB = MPhi->getOperand(i+1).getMBB();
+ if (L->contains(MBB)) { // operands comes from the backedge
+ // Check if the definition is an induction operation.
+ const MachineInstr *DI = MRI->getVRegDef(MPhi->getOperand(i).getReg());
+ if (isInductionOperation(DI, DefReg)) {
+ return MPhi;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/// getTripCount - Return a loop-invariant LLVM value indicating the
+/// number of times the loop will be executed. The trip count can
+/// be either a register or a constant value. If the trip-count
+/// cannot be determined, this returns null.
+///
+/// We find the trip count from the phi instruction that defines the
+/// induction variable. We follow the links to the CMP instruction
+/// to get the trip count.
+///
+/// Based upon getTripCount in LoopInfo.
+///
+CountValue *HexagonHardwareLoops::getTripCount(MachineLoop *L) const {
+ // Check that the loop has a induction variable.
+ const MachineInstr *IV_Inst = getCanonicalInductionVariable(L);
+ if (IV_Inst == 0) return 0;
+
+ // Canonical loops will end with a 'cmpeq_ri IV, Imm',
+ // if Imm is 0, get the count from the PHI opnd
+ // if Imm is -M, than M is the count
+ // Otherwise, Imm is the count
+ const MachineOperand *IV_Opnd;
+ const MachineOperand *InitialValue;
+ if (!L->contains(IV_Inst->getOperand(2).getMBB())) {
+ InitialValue = &IV_Inst->getOperand(1);
+ IV_Opnd = &IV_Inst->getOperand(3);
+ } else {
+ InitialValue = &IV_Inst->getOperand(3);
+ IV_Opnd = &IV_Inst->getOperand(1);
+ }
+
+ // Look for the cmp instruction to determine if we
+ // can get a useful trip count. The trip count can
+ // be either a register or an immediate. The location
+ // of the value depends upon the type (reg or imm).
+ while ((IV_Opnd = IV_Opnd->getNextOperandForReg())) {
+ const MachineInstr *MI = IV_Opnd->getParent();
+ if (L->contains(MI) && isCompareEqualsImm(MI)) {
+ const MachineOperand &MO = MI->getOperand(2);
+ assert(MO.isImm() && "IV Cmp Operand should be 0");
+ int64_t ImmVal = MO.getImm();
+
+ const MachineInstr *IV_DefInstr = MRI->getVRegDef(IV_Opnd->getReg());
+ assert(L->contains(IV_DefInstr->getParent()) &&
+ "IV definition should occurs in loop");
+ int64_t iv_value = IV_DefInstr->getOperand(2).getImm();
+
+ if (ImmVal == 0) {
+ // Make sure the induction variable changes by one on each iteration.
+ if (iv_value != 1 && iv_value != -1) {
+ return 0;
+ }
+ return new CountValue(InitialValue->getReg(), iv_value > 0);
+ } else {
+ assert(InitialValue->isReg() && "Expecting register for init value");
+ const MachineInstr *DefInstr = MRI->getVRegDef(InitialValue->getReg());
+ if (DefInstr && DefInstr->getOpcode() == Hexagon::TFRI) {
+ int64_t count = ImmVal - DefInstr->getOperand(1).getImm();
+ if ((count % iv_value) != 0) {
+ return 0;
+ }
+ return new CountValue(count/iv_value);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/// isInductionOperation - return true if the operation is matches the
+/// pattern that defines an induction variable:
+/// add iv, c
+///
+bool
+HexagonHardwareLoops::isInductionOperation(const MachineInstr *MI,
+ unsigned IVReg) const {
+ return (MI->getOpcode() ==
+ Hexagon::ADD_ri && MI->getOperand(1).getReg() == IVReg);
+}
+
+/// isInvalidOperation - Return true if the operation is invalid within
+/// hardware loop.
+bool
+HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI) const {
+
+ // call is not allowed because the callee may use a hardware loop
+ if (MI->getDesc().isCall()) {
+ return true;
+ }
+ // do not allow nested hardware loops
+ if (isHardwareLoop(MI)) {
+ return true;
+ }
+ // check if the instruction defines a hardware loop register
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef() &&
+ (MO.getReg() == Hexagon::LC0 || MO.getReg() == Hexagon::LC1 ||
+ MO.getReg() == Hexagon::SA0 || MO.getReg() == Hexagon::SA0)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// containsInvalidInstruction - Return true if the loop contains
+/// an instruction that inhibits the use of the hardware loop function.
+///
+bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const {
+ const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = Blocks[i];
+ for (MachineBasicBlock::iterator
+ MII = MBB->begin(), E = MBB->end(); MII != E; ++MII) {
+ const MachineInstr *MI = &*MII;
+ if (isInvalidLoopOperation(MI)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// converToHardwareLoop - check if the loop is a candidate for
+/// converting to a hardware loop. If so, then perform the
+/// transformation.
+///
+/// This function works on innermost loops first. A loop can
+/// be converted if it is a counting loop; either a register
+/// value or an immediate.
+///
+/// The code makes several assumptions about the representation
+/// of the loop in llvm.
+bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
+ bool Changed = false;
+ // Process nested loops first.
+ for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
+ Changed |= convertToHardwareLoop(*I);
+ }
+ // If a nested loop has been converted, then we can't convert this loop.
+ if (Changed) {
+ return Changed;
+ }
+ // Are we able to determine the trip count for the loop?
+ CountValue *TripCount = getTripCount(L);
+ if (TripCount == 0) {
+ return false;
+ }
+ // Does the loop contain any invalid instructions?
+ if (containsInvalidInstruction(L)) {
+ return false;
+ }
+ MachineBasicBlock *Preheader = L->getLoopPreheader();
+ // No preheader means there's not place for the loop instr.
+ if (Preheader == 0) {
+ return false;
+ }
+ MachineBasicBlock::iterator InsertPos = Preheader->getFirstTerminator();
+
+ MachineBasicBlock *LastMBB = L->getExitingBlock();
+ // Don't generate hw loop if the loop has more than one exit.
+ if (LastMBB == 0) {
+ return false;
+ }
+ MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
+
+ // Determine the loop start.
+ MachineBasicBlock *LoopStart = L->getTopBlock();
+ if (L->getLoopLatch() != LastMBB) {
+ // When the exit and latch are not the same, use the latch block as the
+ // start.
+ // The loop start address is used only after the 1st iteration, and the loop
+ // latch may contains instrs. that need to be executed after the 1st iter.
+ LoopStart = L->getLoopLatch();
+ // Make sure the latch is a successor of the exit, otherwise it won't work.
+ if (!LastMBB->isSuccessor(LoopStart)) {
+ return false;
+ }
+ }
+
+ // Convert the loop to a hardware loop
+ DEBUG(dbgs() << "Change to hardware loop at "; L->dump());
+
+ if (TripCount->isReg()) {
+ // Create a copy of the loop count register.
+ MachineFunction *MF = LastMBB->getParent();
+ const TargetRegisterClass *RC =
+ MF->getRegInfo().getRegClass(TripCount->getReg());
+ unsigned CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), CountReg).addReg(TripCount->getReg());
+ if (TripCount->isNeg()) {
+ unsigned CountReg1 = CountReg;
+ CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(Hexagon::NEG), CountReg).addReg(CountReg1);
+ }
+
+ // Add the Loop instruction to the begining of the loop.
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(Hexagon::LOOP0_r)).addMBB(LoopStart).addReg(CountReg);
+ } else {
+ assert(TripCount->isImm() && "Expecting immedate vaule for trip count");
+ // Add the Loop immediate instruction to the beginning of the loop.
+ int64_t CountImm = TripCount->getImm();
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(Hexagon::LOOP0_i)).addMBB(LoopStart).addImm(CountImm);
+ }
+
+ // Make sure the loop start always has a reference in the CFG. We need to
+ // create a BlockAddress operand to get this mechanism to work both the
+ // MachineBasicBlock and BasicBlock objects need the flag set.
+ LoopStart->setHasAddressTaken();
+ // This line is needed to set the hasAddressTaken flag on the BasicBlock
+ // object
+ BlockAddress::get(const_cast<BasicBlock *>(LoopStart->getBasicBlock()));
+
+ // Replace the loop branch with an endloop instruction.
+ DebugLoc dl = LastI->getDebugLoc();
+ BuildMI(*LastMBB, LastI, dl, TII->get(Hexagon::ENDLOOP0)).addMBB(LoopStart);
+
+ // The loop ends with either:
+ // - a conditional branch followed by an unconditional branch, or
+ // - a conditional branch to the loop start.
+ if (LastI->getOpcode() == Hexagon::JMP_Pred ||
+ LastI->getOpcode() == Hexagon::JMP_PredNot) {
+ // delete one and change/add an uncond. branch to out of the loop
+ MachineBasicBlock *BranchTarget = LastI->getOperand(1).getMBB();
+ LastI = LastMBB->erase(LastI);
+ if (!L->contains(BranchTarget)) {
+ if (LastI != LastMBB->end()) {
+ TII->RemoveBranch(*LastMBB);
+ }
+ SmallVector<MachineOperand, 0> Cond;
+ TII->InsertBranch(*LastMBB, BranchTarget, 0, Cond, dl);
+ }
+ } else {
+ // Conditional branch to loop start; just delete it.
+ LastMBB->erase(LastI);
+ }
+ delete TripCount;
+
+ ++NumHWLoops;
+ return true;
+}
+
+/// createHexagonFixupHwLoops - Factory for creating the hardware loop
+/// phase.
+FunctionPass *llvm::createHexagonFixupHwLoops() {
+ return new HexagonFixupHwLoops();
+}
+
+bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "****** Hexagon Hardware Loop Fixup ******\n");
+
+ bool Changed = fixupLoopInstrs(MF);
+ return Changed;
+}
+
+/// fixupLoopInsts - For Hexagon, if the loop label is to far from the
+/// loop instruction then we need to set the LC0 and SA0 registers
+/// explicitly instead of using LOOP(start,count). This function
+/// checks the distance, and generates register assignments if needed.
+///
+/// This function makes two passes over the basic blocks. The first
+/// pass computes the offset of the basic block from the start.
+/// The second pass checks all the loop instructions.
+bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
+
+ // Offset of the current instruction from the start.
+ unsigned InstOffset = 0;
+ // Map for each basic block to it's first instruction.
+ DenseMap<MachineBasicBlock*, unsigned> BlockToInstOffset;
+
+ // First pass - compute the offset of each basic block.
+ for (MachineFunction::iterator MBB = MF.begin(), MBBe = MF.end();
+ MBB != MBBe; ++MBB) {
+ BlockToInstOffset[MBB] = InstOffset;
+ InstOffset += (MBB->size() * 4);
+ }
+
+ // Second pass - check each loop instruction to see if it needs to
+ // be converted.
+ InstOffset = 0;
+ bool Changed = false;
+ RegScavenger RS;
+
+ // Loop over all the basic blocks.
+ for (MachineFunction::iterator MBB = MF.begin(), MBBe = MF.end();
+ MBB != MBBe; ++MBB) {
+ InstOffset = BlockToInstOffset[MBB];
+ RS.enterBasicBlock(MBB);
+
+ // Loop over all the instructions.
+ MachineBasicBlock::iterator MIE = MBB->end();
+ MachineBasicBlock::iterator MII = MBB->begin();
+ while (MII != MIE) {
+ if (isHardwareLoop(MII)) {
+ RS.forward(MII);
+ assert(MII->getOperand(0).isMBB() &&
+ "Expect a basic block as loop operand");
+ int diff = InstOffset - BlockToInstOffset[MII->getOperand(0).getMBB()];
+ diff = (diff > 0 ? diff : -diff);
+ if ((unsigned)diff > MAX_LOOP_DISTANCE) {
+ // Convert to explicity setting LC0 and SA0.
+ convertLoopInstr(MF, MII, RS);
+ MII = MBB->erase(MII);
+ Changed = true;
+ } else {
+ ++MII;
+ }
+ } else {
+ ++MII;
+ }
+ InstOffset += 4;
+ }
+ }
+
+ return Changed;
+
+}
+
+/// convertLoopInstr - convert a loop instruction to a sequence of instructions
+/// that set the lc and sa register explicitly.
+void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF,
+ MachineBasicBlock::iterator &MII,
+ RegScavenger &RS) {
+ const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ MachineBasicBlock *MBB = MII->getParent();
+ DebugLoc DL = MII->getDebugLoc();
+ unsigned Scratch = RS.scavengeRegister(Hexagon::IntRegsRegisterClass, MII, 0);
+
+ // First, set the LC0 with the trip count.
+ if (MII->getOperand(1).isReg()) {
+ // Trip count is a register
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
+ .addReg(MII->getOperand(1).getReg());
+ } else {
+ // Trip count is an immediate.
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFRI), Scratch)
+ .addImm(MII->getOperand(1).getImm());
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
+ .addReg(Scratch);
+ }
+ // Then, set the SA0 with the loop start address.
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::CONST32_Label), Scratch)
+ .addMBB(MII->getOperand(0).getMBB());
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::SA0).addReg(Scratch);
+}
--- /dev/null
+//==-- HexagonISelDAGToDAG.cpp - A dag to dag inst selector for Hexagon ----==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the Hexagon target.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon-isel"
+#include "HexagonISelLowering.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+//===--------------------------------------------------------------------===//
+/// HexagonDAGToDAGISel - Hexagon specific code to select Hexagon machine
+/// instructions for SelectionDAG operations.
+///
+namespace {
+class HexagonDAGToDAGISel : public SelectionDAGISel {
+ /// Subtarget - Keep a pointer to the Hexagon Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const HexagonSubtarget &Subtarget;
+
+ // Keep a reference to HexagonTargetMachine.
+ HexagonTargetMachine& TM;
+ const HexagonInstrInfo *TII;
+
+public:
+ explicit HexagonDAGToDAGISel(HexagonTargetMachine &targetmachine)
+ : SelectionDAGISel(targetmachine),
+ Subtarget(targetmachine.getSubtarget<HexagonSubtarget>()),
+ TM(targetmachine),
+ TII(static_cast<const HexagonInstrInfo*>(TM.getInstrInfo())) {
+
+ }
+
+ SDNode *Select(SDNode *N);
+
+ // Complex Pattern Selectors.
+ bool SelectADDRri(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriS11_0(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriS11_1(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriS11_2(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectMEMriS11_2(SDValue& Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRriS11_3(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRrr(SDValue &Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRriU6_0(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriU6_1(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriU6_2(SDValue& N, SDValue &R1, SDValue &R2);
+
+ virtual const char *getPassName() const {
+ return "Hexagon DAG->DAG Pattern Instruction Selection";
+ }
+
+ /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
+ /// inline asm expressions.
+ virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps);
+ bool SelectAddr(SDNode *Op, SDValue Addr, SDValue &Base, SDValue &Offset);
+
+ SDNode *SelectLoad(SDNode *N);
+ SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl);
+ SDNode *SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl);
+ SDNode *SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode,
+ DebugLoc dl);
+ SDNode *SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode,
+ DebugLoc dl);
+ SDNode *SelectBaseOffsetStore(StoreSDNode *ST, DebugLoc dl);
+ SDNode *SelectIndexedStore(StoreSDNode *ST, DebugLoc dl);
+ SDNode *SelectStore(SDNode *N);
+ SDNode *SelectSHL(SDNode *N);
+ SDNode *SelectSelect(SDNode *N);
+ SDNode *SelectTruncate(SDNode *N);
+ SDNode *SelectMul(SDNode *N);
+ SDNode *SelectZeroExtend(SDNode *N);
+ SDNode *SelectIntrinsicWOChain(SDNode *N);
+ SDNode *SelectConstant(SDNode *N);
+ SDNode *SelectAdd(SDNode *N);
+
+ // Include the pieces autogenerated from the target description.
+#include "HexagonGenDAGISel.inc"
+};
+} // end anonymous namespace
+
+
+/// createHexagonISelDag - This pass converts a legalized DAG into a
+/// Hexagon-specific DAG, ready for instruction scheduling.
+///
+FunctionPass *llvm::createHexagonISelDag(HexagonTargetMachine &TM) {
+ return new HexagonDAGToDAGISel(TM);
+}
+
+static bool IsS11_0_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<11>(v);
+}
+
+
+static bool IsS11_1_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,1>(v);
+}
+
+
+static bool IsS11_2_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,2>(v);
+}
+
+
+static bool IsS11_3_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,3>(v);
+}
+
+
+static bool IsU6_0_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // u6 predicate - True if the immediate fits in a 6-bit unsigned extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<6>(v);
+}
+
+
+static bool IsU6_1_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // u6 predicate - True if the immediate fits in a 6-bit unsigned extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,1>(v);
+}
+
+
+static bool IsU6_2_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // u6 predicate - True if the immediate fits in a 6-bit unsigned extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,2>(v);
+}
+
+
+// Intrinsics that return a a predicate.
+static unsigned doesIntrinsicReturnPredicate(unsigned ID)
+{
+ switch (ID) {
+ default:
+ return 0;
+ case Intrinsic::hexagon_C2_cmpeq:
+ case Intrinsic::hexagon_C2_cmpgt:
+ case Intrinsic::hexagon_C2_cmpgtu:
+ case Intrinsic::hexagon_C2_cmpgtup:
+ case Intrinsic::hexagon_C2_cmpgtp:
+ case Intrinsic::hexagon_C2_cmpeqp:
+ case Intrinsic::hexagon_C2_bitsset:
+ case Intrinsic::hexagon_C2_bitsclr:
+ case Intrinsic::hexagon_C2_cmpeqi:
+ case Intrinsic::hexagon_C2_cmpgti:
+ case Intrinsic::hexagon_C2_cmpgtui:
+ case Intrinsic::hexagon_C2_cmpgei:
+ case Intrinsic::hexagon_C2_cmpgeui:
+ case Intrinsic::hexagon_C2_cmplt:
+ case Intrinsic::hexagon_C2_cmpltu:
+ case Intrinsic::hexagon_C2_bitsclri:
+ case Intrinsic::hexagon_C2_and:
+ case Intrinsic::hexagon_C2_or:
+ case Intrinsic::hexagon_C2_xor:
+ case Intrinsic::hexagon_C2_andn:
+ case Intrinsic::hexagon_C2_not:
+ case Intrinsic::hexagon_C2_orn:
+ case Intrinsic::hexagon_C2_pxfer_map:
+ case Intrinsic::hexagon_C2_any8:
+ case Intrinsic::hexagon_C2_all8:
+ case Intrinsic::hexagon_A2_vcmpbeq:
+ case Intrinsic::hexagon_A2_vcmpbgtu:
+ case Intrinsic::hexagon_A2_vcmpheq:
+ case Intrinsic::hexagon_A2_vcmphgt:
+ case Intrinsic::hexagon_A2_vcmphgtu:
+ case Intrinsic::hexagon_A2_vcmpweq:
+ case Intrinsic::hexagon_A2_vcmpwgt:
+ case Intrinsic::hexagon_A2_vcmpwgtu:
+ case Intrinsic::hexagon_C2_tfrrp:
+ case Intrinsic::hexagon_S2_tstbit_i:
+ case Intrinsic::hexagon_S2_tstbit_r:
+ return 1;
+ }
+}
+
+
+// Intrinsics that have predicate operands.
+static unsigned doesIntrinsicContainPredicate(unsigned ID)
+{
+ switch (ID) {
+ default:
+ return 0;
+ case Intrinsic::hexagon_C2_tfrpr:
+ return Hexagon::TFR_RsPd;
+ case Intrinsic::hexagon_C2_and:
+ return Hexagon::AND_pp;
+ case Intrinsic::hexagon_C2_xor:
+ return Hexagon::XOR_pp;
+ case Intrinsic::hexagon_C2_or:
+ return Hexagon::OR_pp;
+ case Intrinsic::hexagon_C2_not:
+ return Hexagon::NOT_pp;
+ case Intrinsic::hexagon_C2_any8:
+ return Hexagon::ANY_pp;
+ case Intrinsic::hexagon_C2_all8:
+ return Hexagon::ALL_pp;
+ case Intrinsic::hexagon_C2_vitpack:
+ return Hexagon::VITPACK_pp;
+ case Intrinsic::hexagon_C2_mask:
+ return Hexagon::MASK_p;
+ case Intrinsic::hexagon_C2_mux:
+ return Hexagon::MUX_rr;
+
+ // Mapping hexagon_C2_muxir to MUX_pri. This is pretty weird - but
+ // that's how it's mapped in q6protos.h.
+ case Intrinsic::hexagon_C2_muxir:
+ return Hexagon::MUX_ri;
+
+ // Mapping hexagon_C2_muxri to MUX_pir. This is pretty weird - but
+ // that's how it's mapped in q6protos.h.
+ case Intrinsic::hexagon_C2_muxri:
+ return Hexagon::MUX_ir;
+
+ case Intrinsic::hexagon_C2_muxii:
+ return Hexagon::MUX_ii;
+ case Intrinsic::hexagon_C2_vmux:
+ return Hexagon::VMUX_prr64;
+ case Intrinsic::hexagon_S2_valignrb:
+ return Hexagon::VALIGN_rrp;
+ case Intrinsic::hexagon_S2_vsplicerb:
+ return Hexagon::VSPLICE_rrp;
+ }
+}
+
+
+static bool OffsetFitsS11(EVT MemType, int64_t Offset) {
+ if (MemType == MVT::i64 && isShiftedInt<11,3>(Offset)) {
+ return true;
+ }
+ if (MemType == MVT::i32 && isShiftedInt<11,2>(Offset)) {
+ return true;
+ }
+ if (MemType == MVT::i16 && isShiftedInt<11,1>(Offset)) {
+ return true;
+ }
+ if (MemType == MVT::i8 && isInt<11>(Offset)) {
+ return true;
+ }
+ return false;
+}
+
+
+//
+// Try to lower loads of GlobalAdresses into base+offset loads. Custom
+// lowering for GlobalAddress nodes has already turned it into a
+// CONST32.
+//
+SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
+ EVT LoadedVT = LD->getMemoryVT();
+ SDValue Chain = LD->getChain();
+ SDNode* Const32 = LD->getBasePtr().getNode();
+ unsigned Opcode = 0;
+
+ if (Const32->getOpcode() == HexagonISD::CONST32 &&
+ ISD::isNormalLoad(LD)) {
+ SDValue Base = Const32->getOperand(0);
+ EVT LoadedVT = LD->getMemoryVT();
+ int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
+ if (Offset != 0 && OffsetFitsS11(LoadedVT, Offset)) {
+ MVT PointerTy = TLI.getPointerTy();
+ const GlobalValue* GV =
+ cast<GlobalAddressSDNode>(Base)->getGlobal();
+ SDValue TargAddr =
+ CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
+ SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
+ dl, PointerTy,
+ TargAddr);
+ // Figure out base + offset opcode
+ if (LoadedVT == MVT::i64) Opcode = Hexagon::LDrid_indexed;
+ else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed;
+ else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed;
+ else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed;
+ else assert (0 && "unknown memory type");
+
+ // Build indexed load.
+ SDValue TargetConstOff = CurDAG->getTargetConstant(Offset, PointerTy);
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ MVT::Other,
+ SDValue(NewBase,0),
+ TargetConstOff,
+ Chain);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ ReplaceUses(LD, Result);
+ return Result;
+ }
+ }
+
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
+ unsigned Opcode,
+ DebugLoc dl)
+{
+ SDValue Chain = LD->getChain();
+ EVT LoadedVT = LD->getMemoryVT();
+ SDValue Base = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ SDNode *OffsetNode = Offset.getNode();
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ SDValue N1 = LD->getOperand(1);
+ SDValue CPTmpN1_0;
+ SDValue CPTmpN1_1;
+ if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
+ N1.getNode()->getValueType(0) == MVT::i32) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val)) {
+ SDValue TargetConst = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32,
+ MVT::Other, Base, TargetConst,
+ Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::SXTW, dl, MVT::i64,
+ SDValue(Result_1, 0));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_2, 0),
+ SDValue(Result_1, 1),
+ SDValue(Result_1, 2)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_2;
+ }
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::Other, Base, TargetConst0,
+ Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::SXTW, dl,
+ MVT::i64, SDValue(Result_1, 0));
+ SDNode* Result_3 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl,
+ MVT::i32, Base, TargetConstVal,
+ SDValue(Result_1, 1));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_2, 0),
+ SDValue(Result_3, 0),
+ SDValue(Result_1, 1)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_2;
+ }
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
+ unsigned Opcode,
+ DebugLoc dl)
+{
+ SDValue Chain = LD->getChain();
+ EVT LoadedVT = LD->getMemoryVT();
+ SDValue Base = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ SDNode *OffsetNode = Offset.getNode();
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ SDValue N1 = LD->getOperand(1);
+ SDValue CPTmpN1_0;
+ SDValue CPTmpN1_1;
+ if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
+ N1.getNode()->getValueType(0) == MVT::i32) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val)) {
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::i32, MVT::Other, Base,
+ TargetConstVal, Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ TargetConst0);
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ MVT::i64, MVT::Other,
+ SDValue(Result_2,0),
+ SDValue(Result_1,0));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_3, 0),
+ SDValue(Result_1, 1),
+ SDValue(Result_1, 2)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_3;
+ }
+
+ // Generate an indirect load.
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::Other,
+ Base, TargetConst0, Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ TargetConst0);
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ MVT::i64, MVT::Other,
+ SDValue(Result_2,0),
+ SDValue(Result_1,0));
+ // Add offset to base.
+ SDNode* Result_4 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ Base, TargetConstVal,
+ SDValue(Result_1, 1));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_3, 0), // Load value.
+ SDValue(Result_4, 0), // New address.
+ SDValue(Result_1, 1)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_3;
+ }
+
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) {
+ SDValue Chain = LD->getChain();
+ SDValue Base = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ SDNode *OffsetNode = Offset.getNode();
+ // Get the constant value.
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ EVT LoadedVT = LD->getMemoryVT();
+ unsigned Opcode = 0;
+
+ // Check for zero ext loads.
+ bool zextval = (LD->getExtensionType() == ISD::ZEXTLOAD);
+
+ // Figure out the opcode.
+ if (LoadedVT == MVT::i64) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = Hexagon::POST_LDrid;
+ else
+ Opcode = Hexagon::LDrid;
+ } else if (LoadedVT == MVT::i32) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = Hexagon::POST_LDriw;
+ else
+ Opcode = Hexagon::LDriw;
+ } else if (LoadedVT == MVT::i16) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = zextval ? Hexagon::POST_LDriuh : Hexagon::POST_LDrih;
+ else
+ Opcode = zextval ? Hexagon::LDriuh : Hexagon::LDrih;
+ } else if (LoadedVT == MVT::i8) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = zextval ? Hexagon::POST_LDriub : Hexagon::POST_LDrib;
+ else
+ Opcode = zextval ? Hexagon::LDriub : Hexagon::LDrib;
+ } else
+ assert (0 && "unknown memory type");
+
+ // For zero ext i64 loads, we need to add combine instructions.
+ if (LD->getValueType(0) == MVT::i64 &&
+ LD->getExtensionType() == ISD::ZEXTLOAD) {
+ return SelectIndexedLoadZeroExtend64(LD, Opcode, dl);
+ }
+ if (LD->getValueType(0) == MVT::i64 &&
+ LD->getExtensionType() == ISD::SEXTLOAD) {
+ // Handle sign ext i64 loads.
+ return SelectIndexedLoadSignExtend64(LD, Opcode, dl);
+ }
+ if (TII->isValidAutoIncImm(LoadedVT, Val)) {
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ MVT::i32, MVT::Other, Base,
+ TargetConstVal, Chain);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result, 0),
+ SDValue(Result, 1),
+ SDValue(Result, 2)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result;
+ } else {
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ MVT::Other, Base, TargetConst0,
+ Chain);
+ SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ Base, TargetConstVal,
+ SDValue(Result_1, 1));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_1, 0),
+ SDValue(Result_2, 0),
+ SDValue(Result_1, 1)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_1;
+ }
+
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
+ SDNode *result;
+ DebugLoc dl = N->getDebugLoc();
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ ISD::MemIndexedMode AM = LD->getAddressingMode();
+
+ // Handle indexed loads.
+ if (AM != ISD::UNINDEXED) {
+ result = SelectIndexedLoad(LD, dl);
+ } else {
+ result = SelectBaseOffsetLoad(LD, dl);
+ }
+
+ return result;
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
+ SDValue Chain = ST->getChain();
+ SDValue Base = ST->getBasePtr();
+ SDValue Offset = ST->getOffset();
+ SDValue Value = ST->getValue();
+ SDNode *OffsetNode = Offset.getNode();
+ // Get the constant value.
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ EVT StoredVT = ST->getMemoryVT();
+
+ // Offset value must be within representable range
+ // and must have correct alignment properties.
+ if (TII->isValidAutoIncImm(StoredVT, Val)) {
+ SDValue Ops[] = { Value, Base,
+ CurDAG->getTargetConstant(Val, MVT::i32), Chain};
+ unsigned Opcode = 0;
+
+ // Figure out the post inc version of opcode.
+ if (StoredVT == MVT::i64) Opcode = Hexagon::POST_STdri;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::POST_STwri;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::POST_SThri;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::POST_STbri;
+ else assert (0 && "unknown memory type");
+
+ // Build post increment store.
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::Other, Ops, 4);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = ST->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+
+ ReplaceUses(ST, Result);
+ ReplaceUses(SDValue(ST,1), SDValue(Result,1));
+ return Result;
+ }
+
+ // Note: Order of operands matches the def of instruction:
+ // def STrid : STInst<(outs), (ins MEMri:$addr, DoubleRegs:$src1), ...
+ // and it differs for POST_ST* for instance.
+ SDValue Ops[] = { Base, CurDAG->getTargetConstant(0, MVT::i32), Value,
+ Chain};
+ unsigned Opcode = 0;
+
+ // Figure out the opcode.
+ if (StoredVT == MVT::i64) Opcode = Hexagon::STrid;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib;
+ else assert (0 && "unknown memory type");
+
+ // Build regular store.
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops,
+ 4);
+ // Build splitted incriment instruction.
+ SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ Base,
+ TargetConstVal,
+ SDValue(Result_1, 0));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = ST->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+
+ ReplaceUses(SDValue(ST,0), SDValue(Result_2,0));
+ ReplaceUses(SDValue(ST,1), SDValue(Result_1,0));
+ return Result_2;
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
+ DebugLoc dl) {
+ SDValue Chain = ST->getChain();
+ SDNode* Const32 = ST->getBasePtr().getNode();
+ SDValue Value = ST->getValue();
+ unsigned Opcode = 0;
+
+ // Try to lower stores of GlobalAdresses into indexed stores. Custom
+ // lowering for GlobalAddress nodes has already turned it into a
+ // CONST32. Avoid truncating stores for the moment. Post-inc stores
+ // do the same. Don't think there's a reason for it, so will file a
+ // bug to fix.
+ if ((Const32->getOpcode() == HexagonISD::CONST32) &&
+ !(Value.getValueType() == MVT::i64 && ST->isTruncatingStore())) {
+ SDValue Base = Const32->getOperand(0);
+ if (Base.getOpcode() == ISD::TargetGlobalAddress) {
+ EVT StoredVT = ST->getMemoryVT();
+ int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
+ if (Offset != 0 && OffsetFitsS11(StoredVT, Offset)) {
+ MVT PointerTy = TLI.getPointerTy();
+ const GlobalValue* GV =
+ cast<GlobalAddressSDNode>(Base)->getGlobal();
+ SDValue TargAddr =
+ CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
+ SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
+ dl, PointerTy,
+ TargAddr);
+
+ // Figure out base + offset opcode
+ if (StoredVT == MVT::i64) Opcode = Hexagon::STrid_indexed;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed;
+ else assert (0 && "unknown memory type");
+
+ SDValue Ops[] = {SDValue(NewBase,0),
+ CurDAG->getTargetConstant(Offset,PointerTy),
+ Value, Chain};
+ // build indexed store
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
+ MVT::Other, Ops, 4);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = ST->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ ReplaceUses(ST, Result);
+ return Result;
+ }
+ }
+ }
+
+ return SelectCode(ST);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+
+ // Handle indexed stores.
+ if (AM != ISD::UNINDEXED) {
+ return SelectIndexedStore(ST, dl);
+ }
+
+ return SelectBaseOffsetStore(ST, dl);
+}
+
+SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+
+ //
+ // %conv.i = sext i32 %tmp1 to i64
+ // %conv2.i = sext i32 %add to i64
+ // %mul.i = mul nsw i64 %conv2.i, %conv.i
+ //
+ // --- match with the following ---
+ //
+ // %mul.i = mpy (%tmp1, %add)
+ //
+
+ if (N->getValueType(0) == MVT::i64) {
+ // Shifting a i64 signed multiply.
+ SDValue MulOp0 = N->getOperand(0);
+ SDValue MulOp1 = N->getOperand(1);
+
+ SDValue OP0;
+ SDValue OP1;
+
+ // Handle sign_extend and sextload.
+ if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext0 = MulOp0.getOperand(0);
+ if (Sext0.getNode()->getValueType(0) != MVT::i32) {
+ SelectCode(N);
+ }
+
+ OP0 = Sext0;
+ } else if (MulOp0.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp0.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ SelectCode(N);
+ }
+
+ SDValue Base = LD->getBasePtr();
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP0 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(), TargetConst0,
+ Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Same goes for the second operand.
+ if (MulOp1.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext1 = MulOp1.getOperand(0);
+ if (Sext1.getNode()->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+
+ OP1 = Sext1;
+ } else if (MulOp1.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp1.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ return SelectCode(N);
+ }
+
+ SDValue Base = LD->getBasePtr();
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP1 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(), TargetConst0,
+ Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Generate a mpy instruction.
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MPY64, dl, MVT::i64,
+ OP0, OP1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+
+ return SelectCode(N);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ SDValue N0 = N->getOperand(0);
+ if (N0.getOpcode() == ISD::SETCC) {
+ SDValue N00 = N0.getOperand(0);
+ if (N00.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ SDValue N000 = N00.getOperand(0);
+ SDValue N001 = N00.getOperand(1);
+ if (cast<VTSDNode>(N001)->getVT() == MVT::i16) {
+ SDValue N01 = N0.getOperand(1);
+ SDValue N02 = N0.getOperand(2);
+
+ // Pattern: (select:i32 (setcc:i1 (sext_inreg:i32 IntRegs:i32:$src2,
+ // i16:Other),IntRegs:i32:$src1, SETLT:Other),IntRegs:i32:$src1,
+ // IntRegs:i32:$src2)
+ // Emits: (MAXh_rr:i32 IntRegs:i32:$src1, IntRegs:i32:$src2)
+ // Pattern complexity = 9 cost = 1 size = 0.
+ if (cast<CondCodeSDNode>(N02)->get() == ISD::SETLT) {
+ SDValue N1 = N->getOperand(1);
+ if (N01 == N1) {
+ SDValue N2 = N->getOperand(2);
+ if (N000 == N2 &&
+ N0.getNode()->getValueType(N0.getResNo()) == MVT::i1 &&
+ N00.getNode()->getValueType(N00.getResNo()) == MVT::i32) {
+ SDNode *SextNode = CurDAG->getMachineNode(Hexagon::SXTH, dl,
+ MVT::i32, N000);
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MAXw_rr, dl,
+ MVT::i32,
+ SDValue(SextNode, 0),
+ N1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+ }
+
+ // Pattern: (select:i32 (setcc:i1 (sext_inreg:i32 IntRegs:i32:$src2,
+ // i16:Other), IntRegs:i32:$src1, SETGT:Other), IntRegs:i32:$src1,
+ // IntRegs:i32:$src2)
+ // Emits: (MINh_rr:i32 IntRegs:i32:$src1, IntRegs:i32:$src2)
+ // Pattern complexity = 9 cost = 1 size = 0.
+ if (cast<CondCodeSDNode>(N02)->get() == ISD::SETGT) {
+ SDValue N1 = N->getOperand(1);
+ if (N01 == N1) {
+ SDValue N2 = N->getOperand(2);
+ if (N000 == N2 &&
+ N0.getNode()->getValueType(N0.getResNo()) == MVT::i1 &&
+ N00.getNode()->getValueType(N00.getResNo()) == MVT::i32) {
+ SDNode *SextNode = CurDAG->getMachineNode(Hexagon::SXTH, dl,
+ MVT::i32, N000);
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MINw_rr, dl,
+ MVT::i32,
+ SDValue(SextNode, 0),
+ N1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return SelectCode(N);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ SDValue Shift = N->getOperand(0);
+
+ //
+ // %conv.i = sext i32 %tmp1 to i64
+ // %conv2.i = sext i32 %add to i64
+ // %mul.i = mul nsw i64 %conv2.i, %conv.i
+ // %shr5.i = lshr i64 %mul.i, 32
+ // %conv3.i = trunc i64 %shr5.i to i32
+ //
+ // --- match with the following ---
+ //
+ // %conv3.i = mpy (%tmp1, %add)
+ //
+ // Trunc to i32.
+ if (N->getValueType(0) == MVT::i32) {
+ // Trunc from i64.
+ if (Shift.getNode()->getValueType(0) == MVT::i64) {
+ // Trunc child is logical shift right.
+ if (Shift.getOpcode() != ISD::SRL) {
+ return SelectCode(N);
+ }
+
+ SDValue ShiftOp0 = Shift.getOperand(0);
+ SDValue ShiftOp1 = Shift.getOperand(1);
+
+ // Shift by const 32
+ if (ShiftOp1.getOpcode() != ISD::Constant) {
+ return SelectCode(N);
+ }
+
+ int32_t ShiftConst =
+ cast<ConstantSDNode>(ShiftOp1.getNode())->getSExtValue();
+ if (ShiftConst != 32) {
+ return SelectCode(N);
+ }
+
+ // Shifting a i64 signed multiply
+ SDValue Mul = ShiftOp0;
+ if (Mul.getOpcode() != ISD::MUL) {
+ return SelectCode(N);
+ }
+
+ SDValue MulOp0 = Mul.getOperand(0);
+ SDValue MulOp1 = Mul.getOperand(1);
+
+ SDValue OP0;
+ SDValue OP1;
+
+ // Handle sign_extend and sextload
+ if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext0 = MulOp0.getOperand(0);
+ if (Sext0.getNode()->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+
+ OP0 = Sext0;
+ } else if (MulOp0.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp0.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ return SelectCode(N);
+ }
+
+ SDValue Base = LD->getBasePtr();
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP0 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(),
+ TargetConst0, Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Same goes for the second operand.
+ if (MulOp1.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext1 = MulOp1.getOperand(0);
+ if (Sext1.getNode()->getValueType(0) != MVT::i32)
+ return SelectCode(N);
+
+ OP1 = Sext1;
+ } else if (MulOp1.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp1.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ return SelectCode(N);
+ }
+
+ SDValue Base = LD->getBasePtr();
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP1 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(),
+ TargetConst0, Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Generate a mpy instruction.
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MPY, dl, MVT::i32,
+ OP0, OP1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+
+ return SelectCode(N);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ if (N->getValueType(0) == MVT::i32) {
+ SDValue Shl_0 = N->getOperand(0);
+ SDValue Shl_1 = N->getOperand(1);
+ // RHS is const.
+ if (Shl_1.getOpcode() == ISD::Constant) {
+ if (Shl_0.getOpcode() == ISD::MUL) {
+ SDValue Mul_0 = Shl_0.getOperand(0); // Val
+ SDValue Mul_1 = Shl_0.getOperand(1); // Const
+ // RHS of mul is const.
+ if (Mul_1.getOpcode() == ISD::Constant) {
+ int32_t ShlConst =
+ cast<ConstantSDNode>(Shl_1.getNode())->getSExtValue();
+ int32_t MulConst =
+ cast<ConstantSDNode>(Mul_1.getNode())->getSExtValue();
+ int32_t ValConst = MulConst << ShlConst;
+ SDValue Val = CurDAG->getTargetConstant(ValConst,
+ MVT::i32);
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val.getNode()))
+ if (isInt<9>(CN->getSExtValue())) {
+ SDNode* Result =
+ CurDAG->getMachineNode(Hexagon::MPYI_ri, dl,
+ MVT::i32, Mul_0, Val);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+
+ }
+ } else if (Shl_0.getOpcode() == ISD::SUB) {
+ SDValue Sub_0 = Shl_0.getOperand(0); // Const 0
+ SDValue Sub_1 = Shl_0.getOperand(1); // Val
+ if (Sub_0.getOpcode() == ISD::Constant) {
+ int32_t SubConst =
+ cast<ConstantSDNode>(Sub_0.getNode())->getSExtValue();
+ if (SubConst == 0) {
+ if (Sub_1.getOpcode() == ISD::SHL) {
+ SDValue Shl2_0 = Sub_1.getOperand(0); // Val
+ SDValue Shl2_1 = Sub_1.getOperand(1); // Const
+ if (Shl2_1.getOpcode() == ISD::Constant) {
+ int32_t ShlConst =
+ cast<ConstantSDNode>(Shl_1.getNode())->getSExtValue();
+ int32_t Shl2Const =
+ cast<ConstantSDNode>(Shl2_1.getNode())->getSExtValue();
+ int32_t ValConst = 1 << (ShlConst+Shl2Const);
+ SDValue Val = CurDAG->getTargetConstant(-ValConst, MVT::i32);
+ if (ConstantSDNode *CN =
+ dyn_cast<ConstantSDNode>(Val.getNode()))
+ if (isInt<9>(CN->getSExtValue())) {
+ SDNode* Result =
+ CurDAG->getMachineNode(Hexagon::MPYI_ri, dl, MVT::i32,
+ Shl2_0, Val);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return SelectCode(N);
+}
+
+
+//
+// If there is an zero_extend followed an intrinsic in DAG (this means - the
+// result of the intrinsic is predicate); convert the zero_extend to
+// transfer instruction.
+//
+// Zero extend -> transfer is lowered here. Otherwise, zero_extend will be
+// converted into a MUX as predicate registers defined as 1 bit in the
+// compiler. Architecture defines them as 8-bit registers.
+// We want to preserve all the lower 8-bits and, not just 1 LSB bit.
+//
+SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ SDNode *IsIntrinsic = N->getOperand(0).getNode();
+ if ((IsIntrinsic->getOpcode() == ISD::INTRINSIC_WO_CHAIN)) {
+ unsigned ID =
+ cast<ConstantSDNode>(IsIntrinsic->getOperand(0))->getZExtValue();
+ if (doesIntrinsicReturnPredicate(ID)) {
+ // Now we need to differentiate target data types.
+ if (N->getValueType(0) == MVT::i64) {
+ // Convert the zero_extend to Rs = Pd followed by COMBINE_rr(0,Rs).
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Hexagon::TFR_RsPd, dl,
+ MVT::i32,
+ SDValue(IsIntrinsic, 0));
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl,
+ MVT::i32,
+ TargetConst0);
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ MVT::i64, MVT::Other,
+ SDValue(Result_2, 0),
+ SDValue(Result_1, 0));
+ ReplaceUses(N, Result_3);
+ return Result_3;
+ }
+ if (N->getValueType(0) == MVT::i32) {
+ // Convert the zero_extend to Rs = Pd
+ SDNode* RsPd = CurDAG->getMachineNode(Hexagon::TFR_RsPd, dl,
+ MVT::i32,
+ SDValue(IsIntrinsic, 0));
+ ReplaceUses(N, RsPd);
+ return RsPd;
+ }
+ assert(0 && "Unexpected value type");
+ }
+ }
+ return SelectCode(N);
+}
+
+
+//
+// Checking for intrinsics which have predicate registers as operand(s)
+// and lowering to the actual intrinsic.
+//
+SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ unsigned ID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntrinsicWithPred = doesIntrinsicContainPredicate(ID);
+
+ // We are concerned with only those intrinsics that have predicate registers
+ // as at least one of the operands.
+ if (IntrinsicWithPred) {
+ SmallVector<SDValue, 8> Ops;
+ const MCInstrDesc &MCID = TII->get(IntrinsicWithPred);
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+
+ // Iterate over all the operands of the intrinsics.
+ // For PredRegs, do the transfer.
+ // For Double/Int Regs, just preserve the value
+ // For immediates, lower it.
+ for (unsigned i = 1; i < N->getNumOperands(); ++i) {
+ SDNode *Arg = N->getOperand(i).getNode();
+ const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
+
+ if (RC == Hexagon::IntRegsRegisterClass ||
+ RC == Hexagon::DoubleRegsRegisterClass) {
+ Ops.push_back(SDValue(Arg, 0));
+ } else if (RC == Hexagon::PredRegsRegisterClass) {
+ // Do the transfer.
+ SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
+ SDValue(Arg, 0));
+ Ops.push_back(SDValue(PdRs,0));
+ } else if (RC == NULL && (dyn_cast<ConstantSDNode>(Arg) != NULL)) {
+ // This is immediate operand. Lower it here making sure that we DO have
+ // const SDNode for immediate value.
+ int32_t Val = cast<ConstantSDNode>(Arg)->getSExtValue();
+ SDValue SDVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ Ops.push_back(SDVal);
+ } else {
+ assert(0 && "Unimplemented");
+ }
+ }
+ EVT ReturnValueVT = N->getValueType(0);
+ SDNode *Result = CurDAG->getMachineNode(IntrinsicWithPred, dl,
+ ReturnValueVT,
+ Ops.data(), Ops.size());
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ return SelectCode(N);
+}
+
+
+//
+// Map predicate true (encoded as -1 in LLVM) to a XOR.
+//
+SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ if (N->getValueType(0) == MVT::i1) {
+ SDNode* Result;
+ int32_t Val = cast<ConstantSDNode>(N)->getSExtValue();
+ if (Val == -1) {
+ unsigned NewIntReg = TM.getInstrInfo()->createVR(MF, MVT(MVT::i32));
+ SDValue Reg = CurDAG->getRegister(NewIntReg, MVT::i32);
+
+ // Create the IntReg = 1 node.
+ SDNode* IntRegTFR =
+ CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ CurDAG->getTargetConstant(0, MVT::i32));
+
+ // Pd = IntReg
+ SDNode* Pd = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
+ SDValue(IntRegTFR, 0));
+
+ // not(Pd)
+ SDNode* NotPd = CurDAG->getMachineNode(Hexagon::NOT_pp, dl, MVT::i1,
+ SDValue(Pd, 0));
+
+ // xor(not(Pd))
+ Result = CurDAG->getMachineNode(Hexagon::XOR_pp, dl, MVT::i1,
+ SDValue(Pd, 0), SDValue(NotPd, 0));
+
+ // We have just built:
+ // Rs = Pd
+ // Pd = xor(not(Pd), Pd)
+
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+
+ return SelectCode(N);
+}
+
+
+//
+// Map add followed by a asr -> asr +=.
+//
+SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ if (N->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+ // Identify nodes of the form: add(asr(...)).
+ SDNode* Src1 = N->getOperand(0).getNode();
+ if (Src1->getOpcode() != ISD::SRA || !Src1->hasOneUse()
+ || Src1->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+
+ // Build Rd = Rd' + asr(Rs, Rt). The machine constraints will ensure that
+ // Rd and Rd' are assigned to the same register
+ SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_rr_acc, dl, MVT::i32,
+ N->getOperand(1),
+ Src1->getOperand(0),
+ Src1->getOperand(1));
+ ReplaceUses(N, Result);
+
+ return Result;
+}
+
+
+SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
+ if (N->isMachineOpcode())
+ return NULL; // Already selected.
+
+
+ switch (N->getOpcode()) {
+ case ISD::Constant:
+ return SelectConstant(N);
+
+ case ISD::ADD:
+ return SelectAdd(N);
+
+ case ISD::SHL:
+ return SelectSHL(N);
+
+ case ISD::LOAD:
+ return SelectLoad(N);
+
+ case ISD::STORE:
+ return SelectStore(N);
+
+ case ISD::SELECT:
+ return SelectSelect(N);
+
+ case ISD::TRUNCATE:
+ return SelectTruncate(N);
+
+ case ISD::MUL:
+ return SelectMul(N);
+
+ case ISD::ZERO_EXTEND:
+ return SelectZeroExtend(N);
+
+ case ISD::INTRINSIC_WO_CHAIN:
+ return SelectIntrinsicWOChain(N);
+ }
+
+ return SelectCode(N);
+}
+
+
+//
+// Hexagon_TODO: Five functions for ADDRri?! Surely there must be a better way
+// to define these instructions.
+//
+bool HexagonDAGToDAGISel::SelectADDRri(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_0(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_0_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_0_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_1(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_1_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_1_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_2(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_2_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_2_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriU6_0(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_0_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_0_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriU6_1(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_1_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_1_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriU6_2(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_2_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_2_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectMEMriS11_2(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+
+ if (Addr.getOpcode() != ISD::ADD) {
+ return(SelectADDRriS11_2(Addr, Base, Offset));
+ }
+
+ return SelectADDRriS11_2(Addr, Base, Offset);
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_3(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_3_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_3_Offset(Offset.getNode()));
+}
+
+bool HexagonDAGToDAGISel::SelectADDRrr(SDValue &Addr, SDValue &R1,
+ SDValue &R2) {
+ if (Addr.getOpcode() == ISD::FrameIndex) return false;
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ if (isInt<13>(CN->getSExtValue()))
+ return false; // Let the reg+imm pattern catch this!
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ return true;
+ }
+
+ R1 = Addr;
+
+ return true;
+}
+
+
+// Handle generic address case. It is accessed from inlined asm =m constraints,
+// which could have any kind of pointer.
+bool HexagonDAGToDAGISel::SelectAddr(SDNode *Op, SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+
+bool HexagonDAGToDAGISel::
+SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
+ SDValue Op0, Op1;
+
+ switch (ConstraintCode) {
+ case 'o': // Offsetable.
+ case 'v': // Not offsetable.
+ default: return true;
+ case 'm': // Memory.
+ if (!SelectAddr(Op.getNode(), Op, Op0, Op1))
+ return true;
+ break;
+ }
+
+ OutOps.push_back(Op0);
+ OutOps.push_back(Op1);
+ return false;
+}
--- /dev/null
+//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interfaces that Hexagon uses to lower LLVM code
+// into a selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonISelLowering.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonTargetObjectFile.h"
+#include "HexagonSubtarget.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/GlobalAlias.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/Support/CommandLine.h"
+
+const unsigned Hexagon_MAX_RET_SIZE = 64;
+using namespace llvm;
+
+static cl::opt<bool>
+EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
+ cl::desc("Control jump table emission on Hexagon target"));
+
+int NumNamedVarArgParams = -1;
+
+// Implement calling convention for Hexagon.
+static bool
+CC_Hexagon(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+CC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+CC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+RetCC_Hexagon(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ // NumNamedVarArgParams can not be zero for a VarArg function.
+ assert ( (NumNamedVarArgParams > 0) &&
+ "NumNamedVarArgParams is not bigger than zero.");
+
+ if ( (int)ValNo < NumNamedVarArgParams ) {
+ // Deal with named arguments.
+ return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
+ }
+
+ // Deal with un-named arguments.
+ unsigned ofst;
+ if (ArgFlags.isByVal()) {
+ // If pass-by-value, the size allocated on stack is decided
+ // by ArgFlags.getByValSize(), not by the size of LocVT.
+ assert ((ArgFlags.getByValSize() > 8) &&
+ "ByValSize must be bigger than 8 bytes");
+ ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
+ return false;
+ }
+ if (LocVT == MVT::i32) {
+ ofst = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
+ return false;
+ }
+ if (LocVT == MVT::i64) {
+ ofst = State.AllocateStack(8, 8);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
+ return false;
+ }
+ llvm_unreachable(0);
+
+ return true;
+}
+
+
+static bool
+CC_Hexagon (unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (ArgFlags.isByVal()) {
+ // Passed on stack.
+ assert ((ArgFlags.getByValSize() > 8) &&
+ "ByValSize must be bigger than 8 bytes");
+ unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+ }
+
+ if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ ValVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ if (LocVT == MVT::i32) {
+ if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ if (LocVT == MVT::i64) {
+ if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ return true; // CC didn't match.
+}
+
+
+static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ static const unsigned RegList[] = {
+ Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
+ Hexagon::R5
+ };
+ if (unsigned Reg = State.AllocateReg(RegList, 6)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ unsigned Offset = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ static const unsigned RegList1[] = {
+ Hexagon::D1, Hexagon::D2
+ };
+ static const unsigned RegList2[] = {
+ Hexagon::R1, Hexagon::R3
+ };
+ if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+
+ if (LocVT == MVT::i1 ||
+ LocVT == MVT::i8 ||
+ LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ ValVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ if (LocVT == MVT::i32) {
+ if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ if (LocVT == MVT::i64) {
+ if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ return true; // CC didn't match.
+}
+
+static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (LocVT == MVT::i32) {
+ if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ unsigned Offset = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ if (LocVT == MVT::i64) {
+ if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ unsigned Offset = State.AllocateStack(8, 8);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+SDValue
+HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
+const {
+ return SDValue();
+}
+
+/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
+/// by "Src" to address "Dst" of size "Size". Alignment information is
+/// specified by the specific parameter attribute. The copy will be passed as
+/// a byval function parameter. Sometimes what we are copying is the end of a
+/// larger object, the part that does not fit in registers.
+static SDValue
+CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
+ ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
+ DebugLoc dl) {
+
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(), MachinePointerInfo());
+}
+
+
+// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
+// passed by value, the function prototype is modified to return void and
+// the value is stored in memory pointed by a pointer passed by caller.
+SDValue
+HexagonTargetLowering::LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const {
+
+ // CCValAssign - represent the assignment of the return value to locations.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), RVLocs, *DAG.getContext());
+
+ // Analyze return values of ISD::RET
+ CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
+
+ SDValue StackPtr = DAG.getRegister(TM.getRegisterInfo()->getStackRegister(),
+ MVT::i32);
+
+ // If this is the first return lowered for this function, add the regs to the
+ // liveout set for the function.
+ if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
+ for (unsigned i = 0; i != RVLocs.size(); ++i)
+ if (RVLocs[i].isRegLoc())
+ DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
+ }
+
+ SDValue Flag;
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ SDValue Ret = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ }
+
+ if (Flag.getNode())
+ return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+
+ return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+
+
+
+/// LowerCallResult - Lower the result values of an ISD::CALL into the
+/// appropriate copies out of appropriate physical registers. This assumes that
+/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
+/// being lowered. Returns a SDNode with the same number of values as the
+/// ISD::CALL.
+SDValue
+HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool isVarArg,
+ const
+ SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDValue Callee) const {
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), RVLocs, *DAG.getContext());
+
+ CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ Chain = DAG.getCopyFromReg(Chain, dl,
+ RVLocs[i].getLocReg(),
+ RVLocs[i].getValVT(), InFlag).getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ }
+
+ return Chain;
+}
+
+/// LowerCall - Functions arguments are copied from virtual regs to
+/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
+SDValue
+HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool &isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+
+ bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+
+ // Check for varargs.
+ NumNamedVarArgParams = -1;
+ if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
+ {
+ const Function* CalleeFn = NULL;
+ Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
+ if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
+ {
+ // If a function has zero args and is a vararg function, that's
+ // disallowed so it must be an undeclared function. Do not assume
+ // varargs if the callee is undefined.
+ if (CalleeFn->isVarArg() &&
+ CalleeFn->getFunctionType()->getNumParams() != 0) {
+ NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
+ }
+ }
+ }
+
+ if (NumNamedVarArgParams > 0)
+ CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
+ else
+ CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
+
+
+ if(isTailCall) {
+ bool StructAttrFlag =
+ DAG.getMachineFunction().getFunction()->hasStructRetAttr();
+ isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
+ isVarArg, IsStructRet,
+ StructAttrFlag,
+ Outs, OutVals, Ins, DAG);
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isMemLoc()) {
+ isTailCall = false;
+ break;
+ }
+ }
+ if (isTailCall) {
+ DEBUG(dbgs () << "Eligible for Tail Call\n");
+ } else {
+ DEBUG(dbgs () <<
+ "Argument must be passed on stack. Not eligible for Tail Call\n");
+ }
+ }
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
+ SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+
+ SDValue StackPtr =
+ DAG.getCopyFromReg(Chain, dl, TM.getRegisterInfo()->getStackRegister(),
+ getPointerTy());
+
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default:
+ // Loc info must be one of Full, SExt, ZExt, or AExt.
+ assert(0 && "Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (VA.isMemLoc()) {
+ unsigned LocMemOffset = VA.getLocMemOffset();
+ SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+
+ if (Flags.isByVal()) {
+ // The argument is a struct passed by value. According to LLVM, "Arg"
+ // is is pointer.
+ MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
+ Flags, DAG, dl));
+ } else {
+ // The argument is not passed by value. "Arg" is a buildin type. It is
+ // not a pointer.
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),false, false,
+ 0));
+ }
+ continue;
+ }
+
+ // Arguments that can be passed on register must be kept at RegsToPass
+ // vector.
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ }
+ }
+
+ // Transform all store nodes into one single node because all store
+ // nodes are independent of each other.
+ if (!MemOpChains.empty()) {
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0],
+ MemOpChains.size());
+ }
+
+ if (!isTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
+ getPointerTy(), true));
+
+ // Build a sequence of copy-to-reg nodes chained together with token
+ // chain and flag operands which copy the outgoing args into registers.
+ // The InFlag in necessary since all emited instructions must be
+ // stuck together.
+ SDValue InFlag;
+ if (!isTailCall) {
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+ }
+
+ // For tail calls lower the arguments to the 'real' stack slot.
+ if (isTailCall) {
+ // Force all the incoming stack arguments to be loaded from the stack
+ // before any new outgoing arguments are stored to the stack, because the
+ // outgoing stack slots may alias the incoming argument stack slots, and
+ // the alias isn't otherwise explicit. This is slightly more conservative
+ // than necessary, because it means that each store effectively depends
+ // on every argument instead of just those arguments it would clobber.
+ //
+ // Do not flag preceeding copytoreg stuff together with the following stuff.
+ InFlag = SDValue();
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+ InFlag =SDValue();
+ }
+
+ // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
+ // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
+ // node so that legalize doesn't hack it.
+ if (flag_aligned_memcpy) {
+ const char *MemcpyName =
+ "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
+ Callee =
+ DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
+ flag_aligned_memcpy = false;
+ } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
+ } else if (ExternalSymbolSDNode *S =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+ }
+
+ // Returns a chain & a flag for retval copy to use.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+ }
+
+ if (InFlag.getNode()) {
+ Ops.push_back(InFlag);
+ }
+
+ if (isTailCall)
+ return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+
+ Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ // Create the CALLSEQ_END node.
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(0, true), InFlag);
+ InFlag = Chain.getValue(1);
+
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
+ InVals, OutVals, Callee);
+}
+
+static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
+ bool isSEXTLoad, SDValue &Base,
+ SDValue &Offset, bool &isInc,
+ SelectionDAG &DAG) {
+ if (Ptr->getOpcode() != ISD::ADD)
+ return false;
+
+ if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
+ isInc = (Ptr->getOpcode() == ISD::ADD);
+ Base = Ptr->getOperand(0);
+ Offset = Ptr->getOperand(1);
+ // Ensure that Offset is a constant.
+ return (isa<ConstantSDNode>(Offset));
+ }
+
+ return false;
+}
+
+// TODO: Put this function along with the other isS* functions in
+// HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
+// functions defined in HexagonImmediates.td.
+static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ int64_t m = 0;
+ if (ShiftAmount > 0) {
+ m = v % ShiftAmount;
+ v = v >> ShiftAmount;
+ }
+ return (v <= 7) && (v >= -8) && (m == 0);
+}
+
+/// getPostIndexedAddressParts - returns true by value, base pointer and
+/// offset pointer and addressing mode by reference if this node can be
+/// combined with a load / store to form a post-indexed load / store.
+bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base,
+ SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const
+{
+ EVT VT;
+ SDValue Ptr;
+ bool isSEXTLoad = false;
+
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ VT = LD->getMemoryVT();
+ isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
+ VT = ST->getMemoryVT();
+ if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+
+ bool isInc;
+ bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
+ isInc, DAG);
+ // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
+ int ShiftAmount = VT.getSizeInBits() / 16;
+ if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
+ AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
+ return true;
+ }
+
+ return false;
+}
+
+SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ MachineFunction &MF = DAG.getMachineFunction();
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+ switch (Node->getOpcode()) {
+ case ISD::INLINEASM: {
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
+ --NumOps; // Ignore the flag operand.
+
+ for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
+ if (FuncInfo->hasClobberLR())
+ break;
+ unsigned Flags =
+ cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ ++i; // Skip the ID value.
+
+ switch (InlineAsm::getKind(Flags)) {
+ default: llvm_unreachable("Bad flags!");
+ case InlineAsm::Kind_RegDef:
+ case InlineAsm::Kind_RegUse:
+ case InlineAsm::Kind_Imm:
+ case InlineAsm::Kind_Clobber:
+ case InlineAsm::Kind_Mem: {
+ for (; NumVals; --NumVals, ++i) {}
+ break;
+ }
+ case InlineAsm::Kind_RegDefEarlyClobber: {
+ for (; NumVals; --NumVals, ++i) {
+ unsigned Reg =
+ cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+
+ // Check it to be lr
+ if (Reg == TM.getRegisterInfo()->getRARegister()) {
+ FuncInfo->setHasClobberLR(true);
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+ } // Node->getOpcode
+ return Op;
+}
+
+
+//
+// Taken from the XCore backend.
+//
+SDValue HexagonTargetLowering::
+LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
+{
+ SDValue Chain = Op.getOperand(0);
+ SDValue Table = Op.getOperand(1);
+ SDValue Index = Op.getOperand(2);
+ DebugLoc dl = Op.getDebugLoc();
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
+ unsigned JTI = JT->getIndex();
+ MachineFunction &MF = DAG.getMachineFunction();
+ const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
+ SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
+
+ // Mark all jump table targets as address taken.
+ const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
+ const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
+ for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = JTBBs[i];
+ MBB->setHasAddressTaken();
+ // This line is needed to set the hasAddressTaken flag on the BasicBlock
+ // object.
+ BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
+ }
+
+ SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
+ getPointerTy(), TargetJT);
+ SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
+ DAG.getConstant(2, MVT::i32));
+ SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
+ ShiftIndex);
+ SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
+ MachinePointerInfo(), false, false, false,
+ 0);
+ return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
+}
+
+
+SDValue
+HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Size = Op.getOperand(1);
+ DebugLoc dl = Op.getDebugLoc();
+
+ unsigned SPReg = getStackPointerRegisterToSaveRestore();
+
+ // Get a reference to the stack pointer.
+ SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
+
+ // Subtract the dynamic size from the actual stack size to
+ // obtain the new stack size.
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
+
+ //
+ // For Hexagon, the outgoing memory arguments area should be on top of the
+ // alloca area on the stack i.e., the outgoing memory arguments should be
+ // at a lower address than the alloca area. Move the alloca area down the
+ // stack by adding back the space reserved for outgoing arguments to SP
+ // here.
+ //
+ // We do not know what the size of the outgoing args is at this point.
+ // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
+ // stack pointer. We patch this instruction with the correct, known
+ // offset in emitPrologue().
+ //
+ // Use a placeholder immediate (zero) for now. This will be patched up
+ // by emitPrologue().
+ SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
+ MVT::i32,
+ Sub,
+ DAG.getConstant(0, MVT::i32));
+
+ // The Sub result contains the new stack start address, so it
+ // must be placed in the stack pointer register.
+ SDValue CopyChain = DAG.getCopyToReg(Chain, dl,
+ TM.getRegisterInfo()->getStackRegister(),
+ Sub);
+
+ SDValue Ops[2] = { ArgAdjust, CopyChain };
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
+SDValue
+HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const
+ SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals)
+const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
+
+ // For LLVM, in the case when returning a struct by value (>8byte),
+ // the first argument is a pointer that points to the location on caller's
+ // stack where the return value will be stored. For Hexagon, the location on
+ // caller's stack is passed only when the struct size is smaller than (and
+ // equal to) 8 bytes. If not, no address will be passed into callee and
+ // callee return the result direclty through R0/R1.
+
+ SmallVector<SDValue, 4> MemOps;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ unsigned ObjSize;
+ unsigned StackLocation;
+ int FI;
+
+ if ( (VA.isRegLoc() && !Flags.isByVal())
+ || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
+ // Arguments passed in registers
+ // 1. int, long long, ptr args that get allocated in register.
+ // 2. Large struct that gets an register to put its address in.
+ EVT RegVT = VA.getLocVT();
+ if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) {
+ unsigned VReg =
+ RegInfo.createVirtualRegister(Hexagon::IntRegsRegisterClass);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
+ } else if (RegVT == MVT::i64) {
+ unsigned VReg =
+ RegInfo.createVirtualRegister(Hexagon::DoubleRegsRegisterClass);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
+ } else {
+ assert (0);
+ }
+ } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
+ assert (0 && "ByValSize must be bigger than 8 bytes");
+ } else {
+ // Sanity check.
+ assert(VA.isMemLoc());
+
+ if (Flags.isByVal()) {
+ // If it's a byval parameter, then we need to compute the
+ // "real" size, not the size of the pointer.
+ ObjSize = Flags.getByValSize();
+ } else {
+ ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
+ }
+
+ StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
+ // Create the frame index object for this incoming parameter...
+ FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
+
+ // Create the SelectionDAG nodes cordl, responding to a load
+ // from this parameter.
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+
+ if (Flags.isByVal()) {
+ // If it's a pass-by-value aggregate, then do not dereference the stack
+ // location. Instead, we should generate a reference to the stack
+ // location.
+ InVals.push_back(FIN);
+ } else {
+ InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
+ MachinePointerInfo(), false, false,
+ false, 0));
+ }
+ }
+ }
+
+ if (!MemOps.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
+ MemOps.size());
+
+ if (isVarArg) {
+ // This will point to the next argument passed via stack.
+ int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
+ HEXAGON_LRFP_SIZE +
+ CCInfo.getNextStackOffset(),
+ true);
+ FuncInfo->setVarArgsFrameIndex(FrameIndex);
+ }
+
+ return Chain;
+}
+
+SDValue
+HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ // VASTART stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ MachineFunction &MF = DAG.getMachineFunction();
+ HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
+ SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), Addr,
+ Op.getOperand(1), MachinePointerInfo(SV), false,
+ false, 0);
+}
+
+SDValue
+HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDNode* OpNode = Op.getNode();
+
+ SDValue Cond = DAG.getNode(ISD::SETCC, Op.getDebugLoc(), MVT::i1,
+ Op.getOperand(2), Op.getOperand(3),
+ Op.getOperand(4));
+ return DAG.getNode(ISD::SELECT, Op.getDebugLoc(), OpNode->getValueType(0),
+ Cond, Op.getOperand(0),
+ Op.getOperand(1));
+}
+
+SDValue
+HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(4, MVT::i32);
+ return DAG.getLoad(VT, dl, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
+ MachinePointerInfo(), false, false, false, 0);
+ }
+
+ // Return LR, which contains the return address. Mark it an implicit live-in.
+ unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
+}
+
+SDValue
+HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
+ const HexagonRegisterInfo *TRI = TM.getRegisterInfo();
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ TRI->getFrameRegister(), VT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ return FrameAddr;
+}
+
+
+SDValue HexagonTargetLowering::LowerMEMBARRIER(SDValue Op,
+ SelectionDAG& DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
+}
+
+
+SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
+ SelectionDAG& DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
+}
+
+
+SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Result;
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
+ DebugLoc dl = Op.getDebugLoc();
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
+
+ HexagonTargetObjectFile &TLOF =
+ (HexagonTargetObjectFile&)getObjFileLowering();
+ if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
+ return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
+ }
+
+ return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
+}
+
+//===----------------------------------------------------------------------===//
+// TargetLowering Implementation
+//===----------------------------------------------------------------------===//
+
+HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
+ &targetmachine)
+ : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
+ TM(targetmachine) {
+
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, Hexagon::IntRegsRegisterClass);
+ addRegisterClass(MVT::i64, Hexagon::DoubleRegsRegisterClass);
+
+ addRegisterClass(MVT::i1, Hexagon::PredRegsRegisterClass);
+
+ computeRegisterProperties();
+
+ // Align loop entry
+ setPrefLoopAlignment(4);
+
+ // Limits for inline expansion of memcpy/memmove
+ maxStoresPerMemcpy = 6;
+ maxStoresPerMemmove = 6;
+
+ //
+ // Library calls for unsupported operations
+ //
+ setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
+
+ setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
+ setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
+ setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
+
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
+ setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
+
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
+ setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
+
+ setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
+ setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
+ setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
+ setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
+ setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
+
+ setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
+
+ setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
+ setOperationAction(ISD::SDIV, MVT::i32, Expand);
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
+ setOperationAction(ISD::SDIV, MVT::i64, Expand);
+ setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
+ setOperationAction(ISD::UDIV, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
+ setOperationAction(ISD::UDIV, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
+ setOperationAction(ISD::FDIV, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
+ setOperationAction(ISD::FDIV, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
+ setOperationAction(ISD::FADD, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
+ setOperationAction(ISD::FADD, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
+ setOperationAction(ISD::FADD, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
+ setOperationAction(ISD::FMUL, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
+ setOperationAction(ISD::MUL, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
+ setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
+
+
+ setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
+ setOperationAction(ISD::SUB, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
+ setOperationAction(ISD::SUB, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETO, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+
+ setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
+
+ setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
+
+ setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
+
+ // Turn FP extload into load/fextend.
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ // Hexagon has a i1 sign extending load.
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ // Turn FP truncstore into trunc + store.
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // Custom legalize GlobalAddress nodes into CONST32.
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
+ // Truncate action?
+ setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
+
+ // Hexagon doesn't have sext_inreg, replace them with shl/sra.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+
+ // Hexagon has no REM or DIVREM operations.
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Expand fp<->uint.
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+
+ // Hexagon has no select or setcc: expand to SELECT_CC.
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
+
+ // Lower SELECT_CC to SETCC and SELECT.
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
+ // This is a workaround documented in DAGCombiner.cpp:2892 We don't
+ // support SELECT_CC on every type.
+ setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+
+ setOperationAction(ISD::BR_CC, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+ if (EmitJumpTables) {
+ setOperationAction(ISD::BR_JT, MVT::Other, Custom);
+ } else {
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ }
+
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
+ setOperationAction(ISD::FSIN , MVT::f64, Expand);
+ setOperationAction(ISD::FCOS , MVT::f64, Expand);
+ setOperationAction(ISD::FREM , MVT::f64, Expand);
+ setOperationAction(ISD::FSIN , MVT::f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::f32, Expand);
+ setOperationAction(ISD::FREM , MVT::f32, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ , MVT::i32, Expand);
+ setOperationAction(ISD::ROTL , MVT::i32, Expand);
+ setOperationAction(ISD::ROTR , MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::f32, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+
+ setOperationAction(ISD::EH_RETURN, MVT::Other, Expand);
+
+ if (TM.getSubtargetImpl()->isSubtargetV2()) {
+ setExceptionPointerRegister(Hexagon::R20);
+ setExceptionSelectorRegister(Hexagon::R21);
+ } else {
+ setExceptionPointerRegister(Hexagon::R0);
+ setExceptionSelectorRegister(Hexagon::R1);
+ }
+
+ // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
+ setOperationAction(ISD::VASTART , MVT::Other, Custom);
+
+ // Use the default implementation.
+ setOperationAction(ISD::VAARG , MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY , MVT::Other, Expand);
+ setOperationAction(ISD::VAEND , MVT::Other, Expand);
+ setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
+
+
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
+ setOperationAction(ISD::INLINEASM , MVT::Other, Custom);
+
+ setMinFunctionAlignment(2);
+
+ // Needed for DYNAMIC_STACKALLOC expansion.
+ unsigned StackRegister = TM.getRegisterInfo()->getStackRegister();
+ setStackPointerRegisterToSaveRestore(StackRegister);
+}
+
+
+const char*
+HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default: return 0;
+ case HexagonISD::CONST32: return "HexagonISD::CONST32";
+ case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
+ case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
+ case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
+ case HexagonISD::BRICC: return "HexagonISD::BRICC";
+ case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
+ case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
+ case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
+ case HexagonISD::Hi: return "HexagonISD::Hi";
+ case HexagonISD::Lo: return "HexagonISD::Lo";
+ case HexagonISD::FTOI: return "HexagonISD::FTOI";
+ case HexagonISD::ITOF: return "HexagonISD::ITOF";
+ case HexagonISD::CALL: return "HexagonISD::CALL";
+ case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
+ case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
+ case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
+ }
+}
+
+bool
+HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
+ EVT MTy1 = EVT::getEVT(Ty1);
+ EVT MTy2 = EVT::getEVT(Ty2);
+ if (!MTy1.isSimple() || !MTy2.isSimple()) {
+ return false;
+ }
+ return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
+}
+
+bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
+ if (!VT1.isSimple() || !VT2.isSimple()) {
+ return false;
+ }
+ return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
+}
+
+SDValue
+HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default: assert(0 && "Should not custom lower this!");
+ // Frame & Return address. Currently unimplemented.
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ assert(0 && "TLS not implemented for Hexagon.");
+ case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
+ case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
+ case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
+ case ISD::VASTART: return LowerVASTART(Op, DAG);
+ case ISD::BR_JT: return LowerBR_JT(Op, DAG);
+
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
+
+ }
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Hexagon Scheduler Hooks
+//===----------------------------------------------------------------------===//
+MachineBasicBlock *
+HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB)
+const {
+ switch (MI->getOpcode()) {
+ case Hexagon::ADJDYNALLOC: {
+ MachineFunction *MF = BB->getParent();
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF->getInfo<HexagonMachineFunctionInfo>();
+ FuncInfo->addAllocaAdjustInst(MI);
+ return BB;
+ }
+ default:
+ assert(false && "Unexpected instr type to insert");
+ } // switch
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+std::pair<unsigned, const TargetRegisterClass*>
+HexagonTargetLowering::getRegForInlineAsmConstraint(const
+ std::string &Constraint,
+ EVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r': // R0-R31
+ switch (VT.getSimpleVT().SimpleTy) {
+ default:
+ assert(0 && "getRegForInlineAsmConstraint Unhandled data type");
+ case MVT::i32:
+ case MVT::i16:
+ case MVT::i8:
+ return std::make_pair(0U, Hexagon::IntRegsRegisterClass);
+ case MVT::i64:
+ return std::make_pair(0U, Hexagon::DoubleRegsRegisterClass);
+ }
+ default:
+ assert(0 && "Unknown asm register class");
+ }
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented by
+/// AM is legal for this target, for a load/store of the specified type.
+bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ // Allows a signed-extended 11-bit immediate field.
+ if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
+ return false;
+ }
+
+ // No global is ever allowed as a base.
+ if (AM.BaseGV) {
+ return false;
+ }
+
+ int Scale = AM.Scale;
+ if (Scale < 0) Scale = -Scale;
+ switch (Scale) {
+ case 0: // No scale reg, "r+i", "r", or just "i".
+ break;
+ default: // No scaled addressing mode.
+ return false;
+ }
+ return true;
+}
+
+/// isLegalICmpImmediate - Return true if the specified immediate is legal
+/// icmp immediate, that is the target has icmp instructions which can compare
+/// a register against the immediate without having to materialize the
+/// immediate into a register.
+bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ return Imm >= -512 && Imm <= 511;
+}
+
+/// IsEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization. Targets which want to do tail call
+/// optimization should implement this function.
+bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
+ SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const {
+ const Function *CallerF = DAG.getMachineFunction().getFunction();
+ CallingConv::ID CallerCC = CallerF->getCallingConv();
+ bool CCMatch = CallerCC == CalleeCC;
+
+ // ***************************************************************************
+ // Look for obvious safe cases to perform tail call optimization that do not
+ // require ABI changes.
+ // ***************************************************************************
+
+ // If this is a tail call via a function pointer, then don't do it!
+ if (!(dyn_cast<GlobalAddressSDNode>(Callee))
+ && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
+ return false;
+ }
+
+ // Do not optimize if the calling conventions do not match.
+ if (!CCMatch)
+ return false;
+
+ // Do not tail call optimize vararg calls.
+ if (isVarArg)
+ return false;
+
+ // Also avoid tail call optimization if either caller or callee uses struct
+ // return semantics.
+ if (isCalleeStructRet || isCallerStructRet)
+ return false;
+
+ // In addition to the cases above, we also disable Tail Call Optimization if
+ // the calling convention code that at least one outgoing argument needs to
+ // go on the stack. We cannot check that here because at this point that
+ // information is not available.
+ return true;
+}
--- /dev/null
+//==-- HexagonISelLowering.h - Hexagon DAG Lowering Interface ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that Hexagon uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef Hexagon_ISELLOWERING_H
+#define Hexagon_ISELLOWERING_H
+
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "Hexagon.h"
+
+namespace llvm {
+ namespace HexagonISD {
+ enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+ CONST32,
+ CONST32_GP, // For marking data present in GP.
+ SETCC,
+ ADJDYNALLOC,
+ ARGEXTEND,
+
+ CMPICC, // Compare two GPR operands, set icc.
+ CMPFCC, // Compare two FP operands, set fcc.
+ BRICC, // Branch to dest on icc condition
+ BRFCC, // Branch to dest on fcc condition
+ SELECT_ICC, // Select between two values using the current ICC flags.
+ SELECT_FCC, // Select between two values using the current FCC flags.
+
+ Hi, Lo, // Hi/Lo operations, typically on a global address.
+
+ FTOI, // FP to Int within a FP register.
+ ITOF, // Int to FP within a FP register.
+
+ CALL, // A call instruction.
+ RET_FLAG, // Return with a flag operand.
+ BR_JT, // Jump table.
+ BARRIER, // Memory barrier.
+ WrapperJT,
+ TC_RETURN
+ };
+ }
+
+ class HexagonTargetLowering : public TargetLowering {
+ int VarArgsFrameOffset; // Frame offset to start of varargs area.
+
+ bool CanReturnSmallStruct(const Function* CalleeFn,
+ unsigned& RetSize) const;
+
+ public:
+ HexagonTargetMachine &TM;
+ explicit HexagonTargetLowering(HexagonTargetMachine &targetmachine);
+
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization. Targets which want to do tail call
+ /// optimization should implement this function.
+ bool
+ IsEligibleForTailCallOptimization(SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
+ const
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const;
+
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+ virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+ virtual const char *getTargetNodeName(unsigned Opcode) const;
+ SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerCall(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool &isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDValue Callee) const;
+
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const;
+ SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const;
+
+ virtual MachineBasicBlock
+ *EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ virtual EVT getSetCCResultType(EVT VT) const {
+ return MVT::i1;
+ }
+
+ virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const;
+
+ std::pair<unsigned, const TargetRegisterClass*>
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ EVT VT) const;
+
+ // Intrinsics
+ virtual SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const;
+ /// isLegalAddressingMode - Return true if the addressing mode represented
+ /// by AM is legal for this target, for a load/store of the specified type.
+ /// The type may be VoidTy, in which case only return true if the addressing
+ /// mode is legal for a load/store of any legal type.
+ /// TODO: Handle pre/postinc as well.
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+
+ /// isLegalICmpImmediate - Return true if the specified immediate is legal
+ /// icmp immediate, that is the target has icmp instructions which can
+ /// compare a register against the immediate without having to materialize
+ /// the immediate into a register.
+ virtual bool isLegalICmpImmediate(int64_t Imm) const;
+ };
+} // end namespace llvm
+
+#endif // Hexagon_ISELLOWERING_H
--- /dev/null
+//=- HexagonImmediates.td - Hexagon immediate processing --*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illnois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// From IA64's InstrInfo file
+def s32Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s16Imm : Operand<i32> {
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s12Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s11Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s11_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s11_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s11_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s11_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s10Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s8Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s9Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s8Imm64 : Operand<i64> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s6Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s4Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s4_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s4_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s4_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def s4_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u64Imm : Operand<i64> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u32Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u16Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u16_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u16_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u16_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u11_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u10Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u9Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u8Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u7Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u6Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u6_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u6_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u6_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u6_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u5Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u4Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def u2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def n8Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+def m6Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printHexagonImmOperand";
+}
+
+//
+// Immediate predicates
+//
+def s32ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<32>(v);
+}]>;
+
+def s32_24ImmPred : PatLeaf<(i32 imm), [{
+ // s32_24ImmPred predicate - True if the immediate fits in a 32-bit sign
+ // extended field that is a multiple of 0x1000000.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<32,24>(v);
+}]>;
+
+def s32_16s8ImmPred : PatLeaf<(i32 imm), [{
+ // s32_16s8ImmPred predicate - True if the immediate fits in a 32-bit sign
+ // extended field that is a multiple of 0x10000.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<24,16>(v);
+}]>;
+
+def s16ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<16>(v);
+}]>;
+
+
+def s13ImmPred : PatLeaf<(i32 imm), [{
+ // immS13 predicate - True if the immediate fits in a 13-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<13>(v);
+}]>;
+
+
+def s12ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<12>(v);
+}]>;
+
+def s11_0ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<11>(v);
+}]>;
+
+
+def s11_1ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,1>(v);
+}]>;
+
+
+def s11_2ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,2>(v);
+}]>;
+
+
+def s11_3ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,3>(v);
+}]>;
+
+
+def s10ImmPred : PatLeaf<(i32 imm), [{
+ // s10ImmPred predicate - True if the immediate fits in a 10-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<10>(v);
+}]>;
+
+
+def s9ImmPred : PatLeaf<(i32 imm), [{
+ // s9ImmPred predicate - True if the immediate fits in a 9-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<9>(v);
+}]>;
+
+
+def s8ImmPred : PatLeaf<(i32 imm), [{
+ // s8ImmPred predicate - True if the immediate fits in a 8-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<8>(v);
+}]>;
+
+
+def s8Imm64Pred : PatLeaf<(i64 imm), [{
+ // s8ImmPred predicate - True if the immediate fits in a 8-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<8>(v);
+}]>;
+
+
+def s6ImmPred : PatLeaf<(i32 imm), [{
+ // s6ImmPred predicate - True if the immediate fits in a 6-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<6>(v);
+}]>;
+
+
+def s4_0ImmPred : PatLeaf<(i32 imm), [{
+ // s4_0ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<4>(v);
+}]>;
+
+
+def s4_1ImmPred : PatLeaf<(i32 imm), [{
+ // s4_1ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field of 2.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<4,1>(v);
+}]>;
+
+
+def s4_2ImmPred : PatLeaf<(i32 imm), [{
+ // s4_2ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field that is a multiple of 4.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<4,2>(v);
+}]>;
+
+
+def s4_3ImmPred : PatLeaf<(i32 imm), [{
+ // s4_3ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field that is a multiple of 8.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<4,3>(v);
+}]>;
+
+
+def u64ImmPred : PatLeaf<(i64 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ // Adding "N ||" to supress gcc unused warning.
+ return (N || true);
+}]>;
+
+def u32ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<32>(v);
+}]>;
+
+def u16ImmPred : PatLeaf<(i32 imm), [{
+ // u16ImmPred predicate - True if the immediate fits in a 16-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<16>(v);
+}]>;
+
+def u16_s8ImmPred : PatLeaf<(i32 imm), [{
+ // u16_s8ImmPred predicate - True if the immediate fits in a 16-bit sign
+ // extended s8 field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<16,8>(v);
+}]>;
+
+def u9ImmPred : PatLeaf<(i32 imm), [{
+ // u9ImmPred predicate - True if the immediate fits in a 9-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<9>(v);
+}]>;
+
+
+def u8ImmPred : PatLeaf<(i32 imm), [{
+ // u8ImmPred predicate - True if the immediate fits in a 8-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<8>(v);
+}]>;
+
+def u7ImmPred : PatLeaf<(i32 imm), [{
+ // u7ImmPred predicate - True if the immediate fits in a 8-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<7>(v);
+}]>;
+
+
+def u6ImmPred : PatLeaf<(i32 imm), [{
+ // u6ImmPred&nbs