// If we can't even use the basic vector operations of
// AND,OR,XOR, we will have to scalarize the op.
- if (!TLI.isOperationLegalOrCustom(ISD::AND, VT) ||
- !TLI.isOperationLegalOrCustom(ISD::XOR, VT) ||
- !TLI.isOperationLegalOrCustom(ISD::OR, VT))
- return DAG.UnrollVectorOp(Op.getNode());
+ // Notice that the operation may be 'promoted' which means that it is
+ // 'bitcasted' to another type which is handled.
+ if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand)
+ return DAG.UnrollVectorOp(Op.getNode());
assert(VT.getSizeInBits() == Op.getOperand(1).getValueType().getSizeInBits()
&& "Invalid mask size");
DebugLoc DL = Op.getDebugLoc();
// Make sure that the SINT_TO_FP and SRL instructions are available.
- if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, VT) ||
- !TLI.isOperationLegalOrCustom(ISD::SRL, VT))
- return DAG.UnrollVectorOp(Op.getNode());
+ if (TLI.getOperationAction(ISD::SINT_TO_FP, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand)
+ return DAG.UnrollVectorOp(Op.getNode());
EVT SVT = VT.getScalarType();
assert((SVT.getSizeInBits() == 64 || SVT.getSizeInBits() == 32) &&
-; RUN: llc < %s -march=x86 -mcpu=yonah -promote-elements -mattr=+sse2,-sse41 | FileCheck %s
-
-
-; currently (xor v4i32) is defined as illegal, so we scalarize the code.
+; RUN: llc < %s -march=x86 -mcpu=yonah -mattr=+sse2,-sse41 | FileCheck %s
+; CHECK: vsel_float
+; CHECK: pandn
+; CHECK: pand
+; CHECK: por
+; CHECK: ret
define void@vsel_float(<4 x float>* %v1, <4 x float>* %v2) {
%A = load <4 x float>* %v1
%B = load <4 x float>* %v2
ret void
}
-; currently (xor v4i32) is defined as illegal, so we scalarize the code.
-
+; CHECK: vsel_i32
+; CHECK: pandn
+; CHECK: pand
+; CHECK: por
+; CHECK: ret
define void@vsel_i32(<4 x i32>* %v1, <4 x i32>* %v2) {
%A = load <4 x i32>* %v1
%B = load <4 x i32>* %v2