// Convert the array size into a canonical width matching the pointer size for
// the target.
llvm::APInt ArySize(ArySizeIn);
- ArySize.zextOrTrunc(Target.getPointerWidth(EltTy.getAddressSpace()));
+ ArySize =
+ ArySize.zextOrTrunc(Target.getPointerWidth(EltTy.getAddressSpace()));
llvm::FoldingSetNodeID ID;
ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, EltTypeQuals);
return I1 == I2;
if (I1.getBitWidth() > I2.getBitWidth())
- return I1 == llvm::APInt(I2).zext(I1.getBitWidth());
+ return I1 == I2.zext(I1.getBitWidth());
- return llvm::APInt(I1).zext(I2.getBitWidth()) == I2;
+ return I1.zext(I2.getBitWidth()) == I2;
}
/// \brief Determine if two APSInts have the same value, zero- or sign-extending
// Check for a bit-width mismatch.
if (I1.getBitWidth() > I2.getBitWidth())
- return IsSameValue(I1, llvm::APSInt(I2).extend(I1.getBitWidth()));
+ return IsSameValue(I1, I2.extend(I1.getBitWidth()));
else if (I2.getBitWidth() > I1.getBitWidth())
- return IsSameValue(llvm::APSInt(I1).extend(I2.getBitWidth()), I2);
+ return IsSameValue(I1.extend(I2.getBitWidth()), I2);
// We have a signedness mismatch. Turn the signed value into an unsigned
// value.
APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
- Result.extOrTrunc(DestWidth);
+ Result = Result.extOrTrunc(DestWidth);
Result.setIsUnsigned(DestType->isUnsignedIntegerType());
return Result;
}
break;
if (Value.isInt()) {
- Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
+ Value.getInt() = Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
Result.Base = 0;
Result.Offset = CharUnits::fromQuantity(Value.getInt().getZExtValue());
return true;
llvm::SmallVector<APValue, 4> Elts;
for (unsigned i = 0; i != NElts; ++i) {
- APSInt Tmp = Init;
- Tmp.extOrTrunc(EltWidth);
+ APSInt Tmp = Init.extOrTrunc(EltWidth);
if (EltTy->isIntegerType())
Elts.push_back(APValue(Tmp));
const llvm::APInt &NumElements) {
llvm::APSInt SizeExtended(NumElements, true);
unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
- SizeExtended.extend(std::max(SizeTypeBits, SizeExtended.getBitWidth()) * 2);
+ SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
+ SizeExtended.getBitWidth()) * 2);
uint64_t ElementSize
= Context.getTypeSizeInChars(ElementType).getQuantity();
// Convert the adjustment.
Adjustment.setIsUnsigned(isSymUnsigned);
- Adjustment.extOrTrunc(bitwidth);
+ Adjustment = Adjustment.extOrTrunc(bitwidth);
// Convert the right-hand side integer.
llvm::APSInt ConvertedInt(Int, isSymUnsigned);
- ConvertedInt.extOrTrunc(bitwidth);
+ ConvertedInt = ConvertedInt.extOrTrunc(bitwidth);
switch (op) {
default:
llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
- i.extOrTrunc(Context.getTypeSize(castTy));
+ i = i.extOrTrunc(Context.getTypeSize(castTy));
if (isLocType)
return makeIntLocVal(i);
llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
- i.extOrTrunc(BitWidth);
+ i = i.extOrTrunc(BitWidth);
return makeIntVal(i);
}
// Transform the integer into a location and compare.
llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
i.setIsUnsigned(true);
- i.extOrTrunc(Context.getTypeSize(Context.VoidPtrTy));
+ i = i.extOrTrunc(Context.getTypeSize(Context.VoidPtrTy));
return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
}
default:
// Convert the bitwidth of rightI. This should deal with overflow
// since we are dealing with concrete values.
- rightI.extOrTrunc(leftI.getBitWidth());
+ rightI = rightI.extOrTrunc(leftI.getBitWidth());
// Offset the increment by the pointer size.
llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true);
unsigned SizeWidth = NEC.getBitWidth();
// Determine if there is an overflow here by doing an extended multiply.
- NEC.zext(SizeWidth*2);
+ NEC = NEC.zext(SizeWidth*2);
llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
SC *= NEC;
// overflow's already happened because SizeWithoutCookie isn't
// used if the allocator returns null or throws, as it should
// always do on an overflow.
- llvm::APInt SWC = SC;
- SWC.trunc(SizeWidth);
+ llvm::APInt SWC = SC.trunc(SizeWidth);
SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
// Add the cookie size.
}
if (SC.countLeadingZeros() >= SizeWidth) {
- SC.trunc(SizeWidth);
+ SC = SC.trunc(SizeWidth);
Size = llvm::ConstantInt::get(SizeTy, SC);
} else {
// On overflow, produce a -1 so operator new throws.
// constants are cast to bool, and because clang is not enforcing bitfield
// width limits.
if (FieldSize > FieldValue.getBitWidth())
- FieldValue.zext(FieldSize);
+ FieldValue = FieldValue.zext(FieldSize);
// Truncate the size of FieldValue to the bit field size.
if (FieldSize < FieldValue.getBitWidth())
- FieldValue.trunc(FieldSize);
+ FieldValue = FieldValue.trunc(FieldSize);
if (FieldOffset < NextFieldOffsetInBytes * 8) {
// Either part of the field or the entire field can go into the previous
if (CGM.getTargetData().isBigEndian()) {
Tmp = Tmp.lshr(NewFieldWidth);
- Tmp.trunc(BitsInPreviousByte);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining high bits.
- FieldValue.trunc(NewFieldWidth);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
} else {
- Tmp.trunc(BitsInPreviousByte);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining low bits.
FieldValue = FieldValue.lshr(BitsInPreviousByte);
- FieldValue.trunc(NewFieldWidth);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
}
}
- Tmp.zext(8);
+ Tmp = Tmp.zext(8);
if (CGM.getTargetData().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
if (CGM.getTargetData().isBigEndian()) {
// We want the high bits.
- Tmp = FieldValue;
- Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
- Tmp.trunc(8);
+ Tmp = FieldValue.lshr(Tmp.getBitWidth() - 8).trunc(8);
} else {
// We want the low bits.
- Tmp = FieldValue;
- Tmp.trunc(8);
+ Tmp = FieldValue.trunc(8);
FieldValue = FieldValue.lshr(8);
}
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
NextFieldOffsetInBytes++;
- FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - 8);
}
assert(FieldValue.getBitWidth() > 0 &&
if (CGM.getTargetData().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
- FieldValue.zext(8);
- FieldValue = FieldValue << (8 - BitWidth);
+ FieldValue = FieldValue.zext(8) << (8 - BitWidth);
} else
- FieldValue.zext(8);
+ FieldValue = FieldValue.zext(8);
}
// Append the last element.
return IntRange(value.getMinSignedBits(), false);
if (value.getBitWidth() > MaxWidth)
- value.trunc(MaxWidth);
+ value = value.trunc(MaxWidth);
// isNonNegative() just checks the sign bit without considering
// signedness.
if (OriginalWidth <= FieldWidth)
return false;
- llvm::APSInt TruncatedValue = Value;
- TruncatedValue.trunc(FieldWidth);
+ llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
// It's fairly common to write values into signed bitfields
// that, if sign-extended, would end up becoming a different
// value. We don't want to warn about that.
if (Value.isSigned() && Value.isNegative())
- TruncatedValue.sext(OriginalWidth);
+ TruncatedValue = TruncatedValue.sext(OriginalWidth);
else
- TruncatedValue.zext(OriginalWidth);
+ TruncatedValue = TruncatedValue.zext(OriginalWidth);
if (Value == TruncatedValue)
return false;
llvm::APSInt ValueInRange = Value;
ValueInRange.setIsSigned(!Range.NonNegative);
- ValueInRange.trunc(Range.Width);
+ ValueInRange = ValueInRange.trunc(Range.Width);
return ValueInRange.toString(10);
}
// There is no integral type larger enough to represent this
// value. Complain, then allow the value to wrap around.
EnumVal = LastEnumConst->getInitVal();
- EnumVal.zext(EnumVal.getBitWidth() * 2);
+ EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2);
++EnumVal;
if (Enum->isFixed())
// When the underlying type is fixed, this is ill-formed.
// value, then increment.
EnumVal = LastEnumConst->getInitVal();
EnumVal.setIsSigned(EltTy->isSignedIntegerType());
- EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
++EnumVal;
// If we're not in C++, diagnose the overflow of enumerator values,
if (!EltTy->isDependentType()) {
// Make the enumerator value match the signedness and size of the
// enumerator's type.
- EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
EnumVal.setIsSigned(EltTy->isSignedIntegerType());
}
}
// Adjust the APSInt value.
- InitVal.extOrTrunc(NewWidth);
+ InitVal = InitVal.extOrTrunc(NewWidth);
InitVal.setIsSigned(NewSign);
ECD->setInitVal(InitVal);
Stmt *InitStmt = new (S.Context) DeclStmt(DeclGroupRef(IterationVar),Loc,Loc);
// Create the comparison against the array bound.
- llvm::APInt Upper = ArrayTy->getSize();
- Upper.zextOrTrunc(S.Context.getTypeSize(SizeType));
+ llvm::APInt Upper
+ = ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
Expr *Comparison
= new (S.Context) BinaryOperator(IterationVarRef,
IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
= Context.getAsConstantArrayType(FieldType);
Array;
Array = Context.getAsConstantArrayType(Array->getElementType())) {
- llvm::APInt ArraySize = Array->getSize();
- ArraySize.zextOrTrunc(Size.getBitWidth());
+ llvm::APInt ArraySize
+ = Array->getSize().zextOrTrunc(Size.getBitWidth());
Size *= ArraySize;
}
}
if (ResultVal.getBitWidth() != Width)
- ResultVal.trunc(Width);
+ ResultVal = ResultVal.trunc(Width);
}
Res = IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation());
}
if (const ConstantArrayType *CAT =
SemaRef.Context.getAsConstantArrayType(DeclType)) {
maxElements = CAT->getSize();
- elementIndex.extOrTrunc(maxElements.getBitWidth());
+ elementIndex = elementIndex.extOrTrunc(maxElements.getBitWidth());
elementIndex.setIsUnsigned(maxElements.isUnsigned());
maxElementsKnown = true;
}
}
if (elementIndex.getBitWidth() > maxElements.getBitWidth())
- maxElements.extend(elementIndex.getBitWidth());
+ maxElements = maxElements.extend(elementIndex.getBitWidth());
else if (elementIndex.getBitWidth() < maxElements.getBitWidth())
- elementIndex.extend(maxElements.getBitWidth());
+ elementIndex = elementIndex.extend(maxElements.getBitWidth());
elementIndex.setIsUnsigned(maxElements.isUnsigned());
// If the array is of incomplete type, keep track of the number of
if (isa<ConstantArrayType>(AT)) {
llvm::APSInt MaxElements(cast<ConstantArrayType>(AT)->getSize(), false);
- DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedStartIndex
+ = DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
DesignatedStartIndex.setIsUnsigned(MaxElements.isUnsigned());
- DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedEndIndex
+ = DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
if (DesignatedEndIndex >= MaxElements) {
SemaRef.Diag(IndexExpr->getSourceRange().getBegin(),
} else {
// Make sure the bit-widths and signedness match.
if (DesignatedStartIndex.getBitWidth() > DesignatedEndIndex.getBitWidth())
- DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
+ DesignatedEndIndex
+ = DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
else if (DesignatedStartIndex.getBitWidth() <
DesignatedEndIndex.getBitWidth())
- DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
+ DesignatedStartIndex
+ = DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
DesignatedStartIndex.setIsUnsigned(true);
DesignatedEndIndex.setIsUnsigned(true);
}
if (StartDependent || EndDependent) {
// Nothing to compute.
} else if (StartValue.getBitWidth() > EndValue.getBitWidth())
- EndValue.extend(StartValue.getBitWidth());
+ EndValue = EndValue.extend(StartValue.getBitWidth());
else if (StartValue.getBitWidth() < EndValue.getBitWidth())
- StartValue.extend(EndValue.getBitWidth());
+ StartValue = StartValue.extend(EndValue.getBitWidth());
if (!StartDependent && !EndDependent && EndValue < StartValue) {
Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range)
// Perform a conversion to the promoted condition type if needed.
if (NewWidth > Val.getBitWidth()) {
// If this is an extension, just do it.
- Val.extend(NewWidth);
+ Val = Val.extend(NewWidth);
Val.setIsSigned(NewSign);
// If the input was signed and negative and the output is
} else if (NewWidth < Val.getBitWidth()) {
// If this is a truncation, check for overflow.
llvm::APSInt ConvVal(Val);
- ConvVal.trunc(NewWidth);
+ ConvVal = ConvVal.trunc(NewWidth);
ConvVal.setIsSigned(NewSign);
- ConvVal.extend(Val.getBitWidth());
+ ConvVal = ConvVal.extend(Val.getBitWidth());
ConvVal.setIsSigned(Val.isSigned());
if (ConvVal != Val)
Diag(Loc, DiagID) << Val.toString(10) << ConvVal.toString(10);
// Regardless of whether a diagnostic was emitted, really do the
// truncation.
- Val.trunc(NewWidth);
+ Val = Val.trunc(NewWidth);
Val.setIsSigned(NewSign);
} else if (NewSign != Val.isSigned()) {
// Convert the sign to match the sign of the condition. This can cause
static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
if (Val.getBitWidth() < BitWidth)
- Val.extend(BitWidth);
+ Val = Val.extend(BitWidth);
else if (Val.getBitWidth() > BitWidth)
- Val.trunc(BitWidth);
+ Val = Val.trunc(BitWidth);
Val.setIsSigned(IsSigned);
}
// based on the template parameter's type.
unsigned AllowedBits = Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
- Value.extOrTrunc(AllowedBits);
+ Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerType());
// Complain if an unsigned parameter received a negative value.
/// necessary to compare their values regardless of underlying type.
static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
if (Y.getBitWidth() > X.getBitWidth())
- X.extend(Y.getBitWidth());
+ X = X.extend(Y.getBitWidth());
else if (Y.getBitWidth() < X.getBitWidth())
- Y.extend(X.getBitWidth());
+ Y = Y.extend(X.getBitWidth());
// If there is a signedness mismatch, correct it.
if (X.isSigned() != Y.isSigned()) {