ATOMIC_LOAD_AND,
ATOMIC_LOAD_OR,
ATOMIC_LOAD_XOR,
+ ATOMIC_LOAD_NAND,
ATOMIC_LOAD_MIN,
ATOMIC_LOAD_MAX,
ATOMIC_LOAD_UMIN,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
GCCBuiltin<"__sync_fetch_and_xor">;
+def int_atomic_load_nand : Intrinsic<[llvm_anyint_ty,
+ LLVMPointerType<LLVMMatchType<0>>,
+ LLVMMatchType<0>],
+ [IntrWriteArgMem]>,
+ GCCBuiltin<"__sync_fetch_and_nand">;
def int_atomic_load_min : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
assert(( Opcode == ISD::ATOMIC_LAS || Opcode == ISD::ATOMIC_LSS
|| Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND
|| Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR
+ || Opcode == ISD::ATOMIC_LOAD_NAND
|| Opcode == ISD::ATOMIC_LOAD_MIN || Opcode == ISD::ATOMIC_LOAD_MAX
|| Opcode == ISD::ATOMIC_LOAD_UMIN || Opcode == ISD::ATOMIC_LOAD_UMAX)
&& "Invalid Atomic Op");
case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
+ case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin";
case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
case Intrinsic::atomic_load_xor:
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
+ case Intrinsic::atomic_load_nand:
+ return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
case Intrinsic::atomic_load_min:
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
case Intrinsic::atomic_load_max:
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
MachineBasicBlock *MBB,
unsigned regOpc,
- unsigned immOpc) {
+ unsigned immOpc,
+ bool invSrc) {
// For the atomic bitwise operator, we generate
// thisMBB:
// newMBB:
MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), t1);
for (int i=0; i <= lastAddrIndx; ++i)
(*MIB).addOperand(*argOpers[i]);
-
+
+ unsigned tt = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
+ if (invSrc) {
+ MIB = BuildMI(newMBB, TII->get(X86::NOT32r), tt).addReg(t1);
+ }
+ else
+ tt = t1;
+
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
assert( (argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm())
&& "invalid operand");
MIB = BuildMI(newMBB, TII->get(regOpc), t2);
else
MIB = BuildMI(newMBB, TII->get(immOpc), t2);
- MIB.addReg(t1);
+ MIB.addReg(tt);
(*MIB).addOperand(*argOpers[valArgIndx]);
-
+
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), X86::EAX);
MIB.addReg(t1);
case X86::ATOMXOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
X86::XOR32ri);
+ case X86::ATOMNAND32:
+ return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
+ X86::AND32ri, true);
case X86::ATOMMIN32:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
case X86::ATOMMAX32:
MachineInstr *BInstr,
MachineBasicBlock *BB,
unsigned regOpc,
- unsigned immOpc);
+ unsigned immOpc,
+ bool invSrc = false);
/// Utility function to emit atomic min and max. It takes the min/max
// instruction to expand, the associated basic block, and the associated
def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMAND32 PSUEDO!",
[(set GR32:$dst, (atomic_load_and addr:$ptr, GR32:$val))]>;
-}
-
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomDAGSchedInserter = 1 in {
def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMOR32 PSUEDO!",
[(set GR32:$dst, (atomic_load_or addr:$ptr, GR32:$val))]>;
-}
-
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomDAGSchedInserter = 1 in {
def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMXOR32 PSUEDO!",
[(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>;
-}
+def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMXOR32 PSUEDO!",
+ [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>;
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomDAGSchedInserter = 1 in {
def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
"#ATOMMIN32 PSUEDO!",
[(set GR32:$dst, (atomic_load_min addr:$ptr, GR32:$val))]>;
-}
-
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomDAGSchedInserter = 1 in {
def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMMAX32 PSUEDO!",
[(set GR32:$dst, (atomic_load_max addr:$ptr, GR32:$val))]>;
-}
-
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomDAGSchedInserter = 1 in {
def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMUMIN32 PSUEDO!",
[(set GR32:$dst, (atomic_load_umin addr:$ptr, GR32:$val))]>;
-}
-
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomDAGSchedInserter = 1 in {
def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMUMAX32 PSUEDO!",
[(set GR32:$dst, (atomic_load_umax addr:$ptr, GR32:$val))]>;