}
if (OldCC == X86::COND_INVALID) return false;
}
+ X86::CondCode ReplacementCC = X86::COND_INVALID;
if (IsCmpZero) {
switch (OldCC) {
default: break;
default:
return false;
case X86::COND_E:
+ ReplacementCC = NewCC;
break;
case X86::COND_NE:
- NewCC = GetOppositeBranchCondition(NewCC);
+ ReplacementCC = GetOppositeBranchCondition(NewCC);
break;
}
} else if (IsSwapped) {
// If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
// to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
// We swap the condition code and synthesize the new opcode.
- NewCC = getSwappedCondition(OldCC);
- if (NewCC == X86::COND_INVALID) return false;
+ ReplacementCC = getSwappedCondition(OldCC);
+ if (ReplacementCC == X86::COND_INVALID) return false;
}
- if ((ShouldUpdateCC || IsSwapped) && NewCC != OldCC) {
+ if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
// Synthesize the new opcode.
bool HasMemoryOperand = Instr.hasOneMemOperand();
unsigned NewOpc;
if (Instr.isBranch())
- NewOpc = GetCondBranchFromCond(NewCC);
+ NewOpc = GetCondBranchFromCond(ReplacementCC);
else if(OpcIsSET)
- NewOpc = getSETFromCond(NewCC, HasMemoryOperand);
+ NewOpc = getSETFromCond(ReplacementCC, HasMemoryOperand);
else {
unsigned DstReg = Instr.getOperand(0).getReg();
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
- NewOpc = getCMovFromCond(NewCC, TRI->getRegSizeInBits(*DstRC)/8,
+ NewOpc = getCMovFromCond(ReplacementCC, TRI->getRegSizeInBits(*DstRC)/8,
HasMemoryOperand);
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=lzcnt | FileCheck %s
+
+; Make sure we emit opoosite setcc instructions.
+define i64 @pr35399(i64, i8*, i8*) {
+; CHECK-LABEL: pr35399:
+; CHECK: # BB#0:
+; CHECK-NEXT: lzcntq %rdi, %rax
+; CHECK-NEXT: setae (%rsi)
+; CHECK-NEXT: setb (%rdx)
+; CHECK-NEXT: retq
+ %4 = tail call i64 @llvm.ctlz.i64(i64 %0, i1 false)
+ %5 = icmp ne i64 %0, 0
+ %6 = zext i1 %5 to i8
+ store i8 %6, i8* %1, align 1
+ %7 = xor i1 %5, true
+ %8 = zext i1 %7 to i8
+ store i8 %8, i8* %2, align 1
+ ret i64 %4
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1)