return UnknownVal();
}
-static bool isHigherOrderVoidPtr(QualType T, ASTContext &C) {
+static bool isHigherOrderRawPtr(QualType T, ASTContext &C) {
bool foundPointer = false;
while (1) {
const PointerType *PT = T->getAsPointerType();
if (!foundPointer)
return false;
+ // intptr_t* or intptr_t**, etc?
+ if (T->isIntegerType() && C.getTypeSize(T) == C.getTypeSize(C.VoidPtrTy))
+ return true;
+
QualType X = C.getCanonicalType(T).getUnqualifiedType();
return X == C.VoidTy;
}
T = PT->getPointeeType();
}
}
-
+
SVal BasicStoreManager::Retrieve(const GRState* state, Loc loc, QualType T) {
if (isa<UnknownVal>(loc))
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
if (const TypedViewRegion *TR = dyn_cast<TypedViewRegion>(R)) {
- // Just support void**, void***, etc., for now. This is needed
- // to handle OSCompareAndSwapPtr().
+ // Just support void**, void***, intptr_t*, intptr_t**, etc., for now.
+ // This is needed to handle OSCompareAndSwapPtr() and friends.
ASTContext &Ctx = StateMgr.getContext();
QualType T = TR->getLValueType(Ctx);
- if (!isHigherOrderVoidPtr(T, Ctx))
+ if (!isHigherOrderRawPtr(T, Ctx))
return UnknownVal();
// Otherwise, strip the views.
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+ // Special case: handle store of pointer values (Loc) to pointers via
+ // a cast to intXX_t*, void*, etc. This is needed to handle
+ // OSCompareAndSwap32Barrier/OSCompareAndSwap64Barrier.
+ if (isa<Loc>(V) || isa<nonloc::LocAsInteger>(V))
+ if (const TypedViewRegion *TR = dyn_cast<TypedViewRegion>(R)) {
+ ASTContext &C = StateMgr.getContext();
+ QualType T = TR->getLValueType(C);
+
+ if (isHigherOrderRawPtr(T, C)) {
+ R = TR->removeViews();
+
+ if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) {
+ // Only convert 'V' to a location iff the underlying region type
+ // is a location as well.
+ // FIXME: We are allowing a store of an arbitrary location to
+ // a pointer. We may wish to flag a type error here if the types
+ // are incompatible. This may also cause lots of breakage
+ // elsewhere. Food for thought.
+ if (const TypedRegion *TyR = dyn_cast<TypedRegion>(R)) {
+ if (TyR->isBoundable(C) &&
+ Loc::IsLocType(TyR->getRValueType(C)))
+ V = X->getLoc();
+ }
+ }
+ }
+ }
+
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
return store;
}
SVal GRSimpleVals::EvalBinOp(GRExprEngine& Eng, BinaryOperator::Opcode Op,
- Loc L, NonLoc R) {
+ Loc L, NonLoc R) {
+
+ // Special case: 'R' is an integer that has the same width as a pointer and
+ // we are using the integer location in a comparison. Normally this cannot be
+ // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
+ // can generate comparisons that trigger this code.
+ // FIXME: Are all locations guaranteed to have pointer width?
+ if (BinaryOperator::isEqualityOp(Op)) {
+ if (nonloc::ConcreteInt *RInt = dyn_cast<nonloc::ConcreteInt>(&R)) {
+ const llvm::APSInt &X = RInt->getValue();
+ ASTContext &C = Eng.getContext();
+ if (C.getTypeSize(C.VoidPtrTy) == X.getBitWidth())
+ return EvalBinOp(Eng, Op, L, loc::ConcreteInt(X));
+ }
+ }
+
// Delegate pointer arithmetic to store manager.
return Eng.getStoreManager().EvalBinOp(Op, L, R);
}