From b8cd3bb4751c78067022acacaa300b8f55db643d Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Wed, 11 Jan 2017 00:49:54 +0000 Subject: [PATCH] [X86] Dont run combineSetCCAtomicArith() when the cmp has multiple uses We would miscompile the following: void g(int); int f(volatile long long *p) { bool b = __atomic_fetch_add(p, 1, __ATOMIC_SEQ_CST) < 0; g(b ? 12 : 34); return b ? 56 : 78; } into pushq %rax lock incq (%rdi) movl $12, %eax movl $34, %edi cmovlel %eax, %edi callq g(int) testq %rax, %rax <---- Bad. movl $56, %ecx movl $78, %eax cmovsl %ecx, %eax popq %rcx retq because the code failed to take into account that the cmp has multiple uses, replaced one of them, and left the other one comparing garbage. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@291630 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 6 ++++++ test/CodeGen/X86/atomic-eflags-reuse.ll | 16 ++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 82457f2fec3..fac72049587 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -29404,6 +29404,12 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC, (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0)))) return SDValue(); + // Can't replace the cmp if it has more uses than the one we're looking at. + // FIXME: We would like to be able to handle this, but would need to make sure + // all uses were updated. + if (!Cmp.hasOneUse()) + return SDValue(); + // This only applies to variations of the common case: // (icmp slt x, 0) -> (icmp sle (add x, 1), 0) // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0) diff --git a/test/CodeGen/X86/atomic-eflags-reuse.ll b/test/CodeGen/X86/atomic-eflags-reuse.ll index dc1814b55cd..9521a2afefc 100644 --- a/test/CodeGen/X86/atomic-eflags-reuse.ll +++ b/test/CodeGen/X86/atomic-eflags-reuse.ll @@ -176,4 +176,20 @@ entry: ret i8 %tmp2 } +define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 { +; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated. +; CHECK-LABEL: test_add_1_cmov_cmov: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: movl $1, %eax +; CHECK-NEXT: lock xaddq %rax, (%rdi) +; CHECK-NEXT: testq %rax, %rax +entry: + %add = atomicrmw add i64* %p, i64 1 seq_cst + %cmp = icmp slt i64 %add, 0 + %s1 = select i1 %cmp, i8 12, i8 34 + store i8 %s1, i8* %q + %s2 = select i1 %cmp, i8 56, i8 78 + ret i8 %s2 +} + attributes #0 = { nounwind } -- 2.49.0