+define void @add_32r(i32* %p, i32 %v) {
+; X64-LABEL: add_32r:
+; X64-NOT: lock
+; X64: addl
+; X64-NOT: movl
+; X32-LABEL: add_32r:
+; X32-NOT: lock
+; X32: addl
+; X32-NOT: movl
+ %1 = load atomic i32, i32* %p acquire, align 4
+ %2 = add i32 %1, %v
+ store atomic i32 %2, i32* %p monotonic, align 4
+ ret void
+}
+
+; The following is a corner case where the load is added to itself. The pattern
+; matching should not fold this. We only test with 32-bit add, but the same
+; applies to other sizes and operations.
+define void @add_32r_self(i32* %p) {
+; X64-LABEL: add_32r_self:
+; X64-NOT: lock
+; X64: movl (%[[M:[a-z]+]]), %[[R:[a-z]+]]
+; X64: addl %[[R]], %[[R]]
+; X64: movl %[[R]], (%[[M]])
+; X32-LABEL: add_32r_self:
+; X32-NOT: lock
+; X32: movl (%[[M:[a-z]+]]), %[[R:[a-z]+]]
+; X32: addl %[[R]], %[[R]]
+; X32: movl %[[R]], (%[[M]])
+ %1 = load atomic i32, i32* %p acquire, align 4
+ %2 = add i32 %1, %1
+ store atomic i32 %2, i32* %p monotonic, align 4
+ ret void
+}
+
+; The following is a corner case where the load's result is returned. The
+; optimizer isn't allowed to duplicate the load because it's atomic.
+define i32 @add_32r_ret_load(i32* %p, i32 %v) {
+; X64-LABEL: add_32r_ret_load:
+; X64-NOT: lock
+; X64: movl (%rdi), %eax
+; X64-NEXT: leal (%rsi,%rax), %ecx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: retq
+; X32-LABEL: add_32r_ret_load:
+; X32-NOT: lock
+; X32: movl 4(%esp), %[[P:[a-z]+]]
+; X32-NEXT: movl (%[[P]]),
+; X32-NOT: %[[P]]
+; More code here, we just don't want it to load from P.
+; X32: movl %{{.*}}, (%[[P]])
+; X32-NEXT: retl
+ %1 = load atomic i32, i32* %p acquire, align 4
+ %2 = add i32 %1, %v
+ store atomic i32 %2, i32* %p monotonic, align 4
+ ret i32 %1
+}
+
+define void @add_64i(i64* %p) {
+; X64-LABEL: add_64i: