bool ComplexPatternFuncMutatesDAG() const override {
return true;
}
+
+ bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
+
+ /// Returns whether this is a relocatable immediate in the range
+ /// [-2^Width .. 2^Width-1].
+ template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
+ if (auto *CN = dyn_cast<ConstantSDNode>(N))
+ return isInt<Width>(CN->getSExtValue());
+ return isSExtAbsoluteSymbolRef(Width, N);
+ }
};
}
return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
}
+bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
+ if (N->getOpcode() == ISD::TRUNCATE)
+ N = N->getOperand(0).getNode();
+ if (N->getOpcode() != X86ISD::Wrapper)
+ return false;
+
+ auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
+ if (!GA)
+ return false;
+
+ Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
+ return CR && CR->getSignedMin().sge(-1ull << Width) &&
+ CR->getSignedMax().slt(1ull << Width);
+}
+
/// Test whether the given X86ISD::CMP node has any uses which require the SF
/// or OF bits to be accurate.
static bool hasNoSignedComparisonUses(SDNode *N) {
def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
+// sub reg, relocImm
+def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8_su:$src2),
+ (SUB64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>;
+def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt32_su:$src2),
+ (SUB64ri32 GR64:$src1, i64relocImmSExt32_su:$src2)>;
+
// mul reg, reg
def : Pat<(mul GR16:$src1, GR16:$src2),
(IMUL16rr GR16:$src1, GR16:$src2)>;
def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
+// FIXME: Ideally we would just replace the above i*immSExt* matchers with
+// relocImm-based matchers, but then FastISel would be unable to use them.
+def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
+ return isSExtRelocImm<8>(N);
+}]>;
+def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
+ return isSExtRelocImm<32>(N);
+}]>;
+
// If we have multiple users of an immediate, it's much smaller to reuse
// the register, rather than encode the immediate in every instruction.
// This has the risk of increasing register pressure from stretched live
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
+def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
+ return !shouldAvoidImmediateInstFormsForSize(N);
+}]>;
+def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
+ return !shouldAvoidImmediateInstFormsForSize(N);
+}]>;
+
// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
// unsigned field.
def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
--- /dev/null
+; RUN: llc < %s | FileCheck %s
+; RUN: llc -relocation-model=pic < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@cmp8 = external hidden global i8, !absolute_symbol !0
+@cmp32 = external hidden global i8, !absolute_symbol !1
+
+declare void @f()
+
+define void @foo8(i64 %val) {
+ ; CHECK: cmpq $cmp8@ABS8, %rdi
+ %cmp = icmp ule i64 %val, ptrtoint (i8* @cmp8 to i64)
+ br i1 %cmp, label %t, label %f
+
+t:
+ call void @f()
+ ret void
+
+f:
+ ret void
+}
+
+define void @foo32(i64 %val) {
+ ; CHECK: cmpq $cmp32, %rdi
+ %cmp = icmp ule i64 %val, ptrtoint (i8* @cmp32 to i64)
+ br i1 %cmp, label %t, label %f
+
+t:
+ call void @f()
+ ret void
+
+f:
+ ret void
+}
+
+!0 = !{i64 0, i64 128}
+!1 = !{i64 0, i64 2147483648}