BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
FPOptions FPFeatures;
const Expr *E; // Entire expr, for error unsupported. May not be binop.
+
+ /// Check if the binop can result in integer overflow.
+ bool mayHaveIntegerOverflow() const {
+ // Without constant input, we can't rule out overflow.
+ const auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
+ const auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
+ if (!LHSCI || !RHSCI)
+ return true;
+
+ // Assume overflow is possible, unless we can prove otherwise.
+ bool Overflow = true;
+ const auto &LHSAP = LHSCI->getValue();
+ const auto &RHSAP = RHSCI->getValue();
+ if (Opcode == BO_Add) {
+ if (Ty->hasSignedIntegerRepresentation())
+ (void)LHSAP.sadd_ov(RHSAP, Overflow);
+ else
+ (void)LHSAP.uadd_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Sub) {
+ if (Ty->hasSignedIntegerRepresentation())
+ (void)LHSAP.ssub_ov(RHSAP, Overflow);
+ else
+ (void)LHSAP.usub_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Mul) {
+ if (Ty->hasSignedIntegerRepresentation())
+ (void)LHSAP.smul_ov(RHSAP, Overflow);
+ else
+ (void)LHSAP.umul_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ if (Ty->hasSignedIntegerRepresentation() && !RHSCI->isZero())
+ (void)LHSAP.sdiv_ov(RHSAP, Overflow);
+ else
+ return false;
+ }
+ return Overflow;
+ }
+
+ /// Check if the binop computes a division or a remainder.
+ bool isDivisionLikeOperation() const {
+ return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
+ Opcode == BO_RemAssign;
+ }
+
+ /// Check if the binop can result in an integer division by zero.
+ bool mayHaveIntegerDivisionByZero() const {
+ if (isDivisionLikeOperation())
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
+ return CI->isZero();
+ return true;
+ }
+
+ /// Check if the binop can result in a float division by zero.
+ bool mayHaveFloatDivisionByZero() const {
+ if (isDivisionLikeOperation())
+ if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
+ return CFP->isZero();
+ return true;
+ }
};
static bool MustVisitNullValue(const Expr *E) {
assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
"Expected a unary or binary operator");
+ // If the binop has constant inputs and we can prove there is no overflow,
+ // we can elide the overflow check.
+ if (!Op.mayHaveIntegerOverflow())
+ return true;
+
+ // If a unary op has a widened operand, the op cannot overflow.
if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
return IsWidenedIntegerOp(Ctx, UO->getSubExpr());
+ // We usually don't need overflow checks for binops with widened operands.
+ // Multiplication with promoted unsigned operands is a special case.
const auto *BO = cast<BinaryOperator>(Op.E);
auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
if (!OptionalLHSTy)
QualType LHSTy = *OptionalLHSTy;
QualType RHSTy = *OptionalRHSTy;
- // We usually don't need overflow checks for binary operations with widened
- // operands. Multiplication with promoted unsigned operands is a special case.
+ // This is the simple case: binops without unsigned multiplication, and with
+ // widened operands. No overflow check is needed here.
if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
!LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
return true;
- // The overflow check can be skipped if either one of the unpromoted types
- // are less than half the size of the promoted type.
+ // For unsigned multiplication the overflow check can be elided if either one
+ // of the unpromoted types are less than half the size of the promoted type.
unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
(2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
const auto *BO = cast<BinaryOperator>(Ops.E);
if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
Ops.Ty->hasSignedIntegerRepresentation() &&
- !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS())) {
+ !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
+ Ops.mayHaveIntegerOverflow()) {
llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
llvm::Value *IntMin =
CodeGenFunction::SanitizerScope SanScope(&CGF);
if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
- Ops.Ty->isIntegerType()) {
+ Ops.Ty->isIntegerType() &&
+ (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
} else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
- Ops.Ty->isRealFloatingType()) {
+ Ops.Ty->isRealFloatingType() &&
+ Ops.mayHaveFloatDivisionByZero()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
// Rem in C can't be a floating point type: C99 6.5.5p2.
if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
- Ops.Ty->isIntegerType()) {
+ Ops.Ty->isIntegerType() &&
+ (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
--- /dev/null
+// RUN: %clang_cc1 -x c -S -emit-llvm -o - -triple x86_64-apple-darwin10 %s \
+// RUN: -w -fsanitize=signed-integer-overflow,unsigned-integer-overflow,integer-divide-by-zero,float-divide-by-zero \
+// RUN: | FileCheck %s
+
+// CHECK-LABEL: define void @foo
+// CHECK-NOT: !nosanitize
+void foo(const int *p) {
+ // __builtin_prefetch expects its optional arguments to be constant integers.
+ // Check that ubsan does not instrument any safe arithmetic performed in
+ // operands to __builtin_prefetch. (A clang frontend check should reject
+ // unsafe arithmetic in these operands.)
+
+ __builtin_prefetch(p, 0 + 1, 0 + 3);
+ __builtin_prefetch(p, 1 - 0, 3 - 0);
+ __builtin_prefetch(p, 1 * 1, 1 * 3);
+ __builtin_prefetch(p, 1 / 1, 3 / 1);
+ __builtin_prefetch(p, 3 % 2, 3 % 1);
+
+ __builtin_prefetch(p, 0U + 1U, 0U + 3U);
+ __builtin_prefetch(p, 1U - 0U, 3U - 0U);
+ __builtin_prefetch(p, 1U * 1U, 1U * 3U);
+ __builtin_prefetch(p, 1U / 1U, 3U / 1U);
+ __builtin_prefetch(p, 3U % 2U, 3U % 1U);
+}
+
+// CHECK-LABEL: define void @ub_constant_arithmetic
+void ub_constant_arithmetic() {
+ // Check that we still instrument unsafe arithmetic, even if it is known to
+ // be unsafe at compile time.
+
+ int INT_MIN = 0xffffffff;
+ int INT_MAX = 0x7fffffff;
+
+ // CHECK: call void @__ubsan_handle_add_overflow
+ // CHECK: call void @__ubsan_handle_add_overflow
+ INT_MAX + 1;
+ INT_MAX + -1;
+
+ // CHECK: call void @__ubsan_handle_negate_overflow
+ // CHECK: call void @__ubsan_handle_sub_overflow
+ -INT_MIN;
+ -INT_MAX - 2;
+
+ // CHECK: call void @__ubsan_handle_mul_overflow
+ // CHECK: call void @__ubsan_handle_mul_overflow
+ INT_MAX * INT_MAX;
+ INT_MIN * INT_MIN;
+
+ // CHECK: call void @__ubsan_handle_divrem_overflow
+ // CHECK: call void @__ubsan_handle_divrem_overflow
+ 1 / 0;
+ INT_MIN / -1;
+
+ // CHECK: call void @__ubsan_handle_divrem_overflow
+ // CHECK: call void @__ubsan_handle_divrem_overflow
+ 1 % 0;
+ INT_MIN % -1;
+
+ // CHECK: call void @__ubsan_handle_divrem_overflow
+ 1.0 / 0.0;
+}