From fbe910e7f432682457a7b3b9319f618dd66ddcd4 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Sun, 27 Nov 2011 06:56:53 +0000 Subject: [PATCH] remove asmparsing and documentation support for "volatile load", which was only produced by LLVM 2.9 and earlier. LLVM 3.0 and later prefers "load volatile". git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145172 91177308-0d34-0410-b5e6-96231b3b80d8 --- docs/LangRef.html | 8 ++++---- docs/ReleaseNotes.html | 4 ++-- lib/AsmParser/LLParser.cpp | 32 ++++++-------------------------- lib/AsmParser/LLParser.h | 4 ++-- 4 files changed, 14 insertions(+), 34 deletions(-) diff --git a/docs/LangRef.html b/docs/LangRef.html index a9ec80069ae..b133a532f1c 100644 --- a/docs/LangRef.html +++ b/docs/LangRef.html @@ -2583,7 +2583,7 @@ entry: store i32 %trap, i32* @g ; Trap value conceptually stored to memory. %trap2 = load i32* @g ; Returns a trap value, not just undef. - volatile store i32 %trap, i32* @g ; External observation; undefined behavior. + store volatile i32 %trap, i32* @g ; External observation; undefined behavior. %narrowaddr = bitcast i32* @g to i16* %wideaddr = bitcast i32* @g to i64* @@ -2594,7 +2594,7 @@ entry: br i1 %cmp, label %true, label %end ; Branch to either destination. true: - volatile store i32 0, i32* @g ; This is control-dependent on %cmp, so + store volatile i32 0, i32* @g ; This is control-dependent on %cmp, so ; it has undefined behavior. br label %end @@ -2604,7 +2604,7 @@ end: ; control-dependent on %cmp, so this ; always results in a trap value. - volatile store i32 0, i32* @g ; This would depend on the store in %true + store volatile i32 0, i32* @g ; This would depend on the store in %true ; if %cmp is true, or the store in %entry ; otherwise, so this is undefined behavior. @@ -2617,7 +2617,7 @@ second_true: ret void second_end: - volatile store i32 0, i32* @g ; This time, the instruction always depends + store volatile i32 0, i32* @g ; This time, the instruction always depends ; on the store in %end. Also, it is ; control-equivalent to %end, so this is ; well-defined (again, ignoring earlier diff --git a/docs/ReleaseNotes.html b/docs/ReleaseNotes.html index edfc1a0a078..7d82f5ea83b 100644 --- a/docs/ReleaseNotes.html +++ b/docs/ReleaseNotes.html @@ -776,14 +776,14 @@ be used to verify some algorithms. New PackedVector, TinyPtrVector class (see Programmer's Manual) New nonlazybind function attribute. ARC language specific optimizer (Transforms/ObjCARC) a decent example of language-specific transformation. - LLVM 3.0 removes support for reading LLVM 2.8 and earlier files. + LLVM 3.0 removes support for reading LLVM 2.8 and earlier files. Aim to maintain compatibility all the way back to 3.0 "forever". New llvm.expect intrinsic. Table generated MC expansion logic for pseudo instructions that expand to multiple MC instructions through the PseudoInstExpansion class. (JimG) New llvm.fma intrinsic. Euro dev meeting and main one too. - New atomics instructions, "#i_fence" instruction, cmpxchg, atomicrmw too. What target support? + New atomics instructions, "#i_fence" instruction, cmpxchg, atomicrmw too. What target support? Also 'atomic load/store'. See Atomics.html X86: inline assembler supports .code32 and .code64. Exception handling rewrite: new landingpad and resume instruction. Unwind gone. LowerSetJmp pass removed, unused. diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index f01e9954018..212c9fb38d5 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -2948,19 +2948,11 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB, case lltok::kw_tail: return ParseCall(Inst, PFS, true); // Memory. case lltok::kw_alloca: return ParseAlloc(Inst, PFS); - case lltok::kw_load: return ParseLoad(Inst, PFS, false); - case lltok::kw_store: return ParseStore(Inst, PFS, false); + case lltok::kw_load: return ParseLoad(Inst, PFS); + case lltok::kw_store: return ParseStore(Inst, PFS); case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS); case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS); case lltok::kw_fence: return ParseFence(Inst, PFS); - case lltok::kw_volatile: - // For compatibility; canonical location is after load - if (EatIfPresent(lltok::kw_load)) - return ParseLoad(Inst, PFS, true); - else if (EatIfPresent(lltok::kw_store)) - return ParseStore(Inst, PFS, true); - else - return TokError("expected 'load' or 'store'"); case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS); case lltok::kw_extractvalue: return ParseExtractValue(Inst, PFS); case lltok::kw_insertvalue: return ParseInsertValue(Inst, PFS); @@ -3684,10 +3676,7 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) { /// ::= 'load' 'volatile'? TypeAndValue (',' 'align' i32)? /// ::= 'load' 'atomic' 'volatile'? TypeAndValue /// 'singlethread'? AtomicOrdering (',' 'align' i32)? -/// Compatibility: -/// ::= 'volatile' 'load' TypeAndValue (',' 'align' i32)? -int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS, - bool isVolatile) { +int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) { Value *Val; LocTy Loc; unsigned Alignment = 0; bool AteExtraComma = false; @@ -3696,15 +3685,12 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS, SynchronizationScope Scope = CrossThread; if (Lex.getKind() == lltok::kw_atomic) { - if (isVolatile) - return TokError("mixing atomic with old volatile placement"); isAtomic = true; Lex.Lex(); } + bool isVolatile = false; if (Lex.getKind() == lltok::kw_volatile) { - if (isVolatile) - return TokError("duplicate volatile before and after store"); isVolatile = true; Lex.Lex(); } @@ -3731,10 +3717,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS, /// ::= 'store' 'volatile'? TypeAndValue ',' TypeAndValue (',' 'align' i32)? /// ::= 'store' 'atomic' 'volatile'? TypeAndValue ',' TypeAndValue /// 'singlethread'? AtomicOrdering (',' 'align' i32)? -/// Compatibility: -/// ::= 'volatile' 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)? -int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS, - bool isVolatile) { +int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) { Value *Val, *Ptr; LocTy Loc, PtrLoc; unsigned Alignment = 0; bool AteExtraComma = false; @@ -3743,15 +3726,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS, SynchronizationScope Scope = CrossThread; if (Lex.getKind() == lltok::kw_atomic) { - if (isVolatile) - return TokError("mixing atomic with old volatile placement"); isAtomic = true; Lex.Lex(); } + bool isVolatile = false; if (Lex.getKind() == lltok::kw_volatile) { - if (isVolatile) - return TokError("duplicate volatile before and after store"); isVolatile = true; Lex.Lex(); } diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h index cbc3c23e863..c2537d7bda7 100644 --- a/lib/AsmParser/LLParser.h +++ b/lib/AsmParser/LLParser.h @@ -363,8 +363,8 @@ namespace llvm { bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS); bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail); int ParseAlloc(Instruction *&I, PerFunctionState &PFS); - int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile); - int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile); + int ParseLoad(Instruction *&I, PerFunctionState &PFS); + int ParseStore(Instruction *&I, PerFunctionState &PFS); int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS); int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS); int ParseFence(Instruction *&I, PerFunctionState &PFS); -- 2.34.1