// We're running this loop for once for each value queried resulting in a
// runtime of ~O(#assumes * #values).
- assert(isa<IntrinsicInst>(I) &&
- dyn_cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::assume &&
+ assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
"must be an assume intrinsic");
-
+
Value *Arg = I->getArgOperand(0);
if (Arg == V && isValidAssumeForContext(I, Q)) {
N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
} else {
- N0IsConst = dyn_cast<ConstantSDNode>(N0) != nullptr;
- ConstValue0 = N0IsConst ? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue()
- : APInt();
- N1IsConst = dyn_cast<ConstantSDNode>(N1) != nullptr;
- ConstValue1 = N1IsConst ? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue()
- : APInt();
+ N0IsConst = isa<ConstantSDNode>(N0);
+ if (N0IsConst)
+ ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
+ N1IsConst = isa<ConstantSDNode>(N1);
+ if (N1IsConst)
+ ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
}
// fold (mul c1, c2) -> c1*c2
// type.
if (V->getOperand(0).getValueType() != NVT)
return SDValue();
- unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned Idx = N->getConstantOperandVal(1);
unsigned NumElems = NVT.getVectorNumElements();
assert((Idx % NumElems) == 0 &&
"IDX in concat is not a multiple of the result vector length.");
// Add a leading constant argument with the Flags and the calling convention
// masked together
CallingConv::ID CallConv = CS.getCallingConv();
- int Flags = dyn_cast<ConstantInt>(CS.getArgument(2))->getZExtValue();
+ int Flags = cast<ConstantInt>(CS.getArgument(2))->getZExtValue();
assert(Flags == 0 && "not expected to be used");
Ops.push_back(DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
Ops.push_back(
#define IMPLEMENT_VECTOR_FCMP(OP) \
case Type::VectorTyID: \
- if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
} else { \
IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
if (TY->isVectorTy()) { \
- if (dyn_cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
+ if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
} else { \
MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
- if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
Dest.AggregateVal[_i].IntVal = APInt(1,
( (Src1.AggregateVal[_i].FloatVal ==
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
- if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
Dest.AggregateVal[_i].IntVal = APInt(1,
( (Src1.AggregateVal[_i].FloatVal !=
// Macros to choose appropriate TY: float or double and run operation
// execution
#define FLOAT_VECTOR_OP(OP) { \
- if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
else { \
- if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
else { \
dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
case Instruction::FRem:
- if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
R.AggregateVal[i].FloatVal =
fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
else {
- if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
R.AggregateVal[i].DoubleVal =
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
/// to CurRec's name.
Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass,
Init *Name, const std::string &Scoper) {
- RecTy *Type = dyn_cast<TypedInit>(Name)->getType();
+ RecTy *Type = cast<TypedInit>(Name)->getType();
BinOpInit *NewName =
BinOpInit::get(BinOpInit::STRCONCAT,
// MOV X0, WideImmediate
// LDR X2, [BaseReg, X0]
if (isa<ConstantSDNode>(RHS)) {
- int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
+ int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
unsigned Scale = Log2_32(Size);
// Skip the immediate can be seleced by load/store addressing mode.
// Also skip the immediate can be encoded by a single ADD (SUB is also
}
case ISD::FrameIndex: {
- int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
+ int FI = cast<FrameIndexSDNode>(Node)->getIndex();
EVT VT = Node->getValueType(0);
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
unsigned Opc = BPF::MOV_rr;
// is Big Endian.
unsigned OpIdx = NElts - i - 1;
SDValue Operand = BVN->getOperand(OpIdx);
- if (dyn_cast<ConstantSDNode>(Operand))
+ if (isa<ConstantSDNode>(Operand))
// This operand is already in ConstVal.
continue;
bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
const GlobalValue *GV = dyn_cast<GlobalValue>(V);
- if (GV && isa<Function>(GV) && dyn_cast<Function>(GV)->isIntrinsic())
+ if (GV && isa<Function>(GV) && cast<Function>(GV)->isIntrinsic())
return false;
if (!GV)
return false;
case Type::IntegerTyID: {
const Type *ETy = CPV->getType();
if (ETy == Type::getInt8Ty(CPV->getContext())) {
- unsigned char c =
- (unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
+ unsigned char c = (unsigned char)cast<ConstantInt>(CPV)->getZExtValue();
ptr = &c;
aggBuffer->addBytes(ptr, 1, Bytes);
} else if (ETy == Type::getInt16Ty(CPV->getContext())) {
- short int16 = (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
+ short int16 = (short)cast<ConstantInt>(CPV)->getZExtValue();
ptr = (unsigned char *)&int16;
aggBuffer->addBytes(ptr, 2, Bytes);
} else if (ETy == Type::getInt32Ty(CPV->getContext())) {
const SDNode *left = N0.getOperand(0).getNode();
const SDNode *right = N0.getOperand(1).getNode();
- if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right))
+ if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
opIsLive = true;
if (!opIsLive)
// srcAddr and dstAddr are expected to be pointer types,
// so no check is made here.
- unsigned srcAS = dyn_cast<PointerType>(srcAddr->getType())->getAddressSpace();
- unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace();
+ unsigned srcAS = cast<PointerType>(srcAddr->getType())->getAddressSpace();
+ unsigned dstAS = cast<PointerType>(dstAddr->getType())->getAddressSpace();
// Cast pointers to (char *)
srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS));
origBB->getTerminator()->setSuccessor(0, loopBB);
IRBuilder<> builder(origBB, origBB->getTerminator());
- unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace();
+ unsigned dstAS = cast<PointerType>(dstAddr->getType())->getAddressSpace();
// Cast pointer to the type of value getting stored
dstAddr =
unsigned NOps = N->getNumOperands();
for (unsigned i = 0; i < NOps; i++) {
// XXX: Why is this here?
- if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
+ if (isa<RegisterSDNode>(N->getOperand(i))) {
IsRegSeq = false;
break;
}
def mskor_global : PatFrag<(ops node:$val, node:$ptr),
(AMDGPUstore_mskor node:$val, node:$ptr), [{
- return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
}]>;
def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
(AMDGPUstore_mskor node:$val, node:$ptr), [{
- return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
}]>;
class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
}
SwizzleRemap.clear();
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
}
Value *SamplerId = I.getArgOperand(2);
unsigned TextureType =
- dyn_cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
unsigned SrcSelect[4] = { 0, 1, 2, 3 };
unsigned CT[4] = {1, 1, 1, 1};
Value *SamplerId = I.getArgOperand(5);
unsigned TextureType =
- dyn_cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
+ cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
unsigned SrcSelect[4] = { 0, 1, 2, 3 };
unsigned CT[4] = {1, 1, 1, 1};
// Now we have only mask extension
assert(InVT.getVectorElementType() == MVT::i1);
SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
- const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
+ const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
}
SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
- const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
+ const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
- unsigned HintVal;
- if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
- (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
- llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
+ unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
+ assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1");
unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
SDValue Chain = Op.getOperand(0);
SDValue Mask = Op.getOperand(2);
break;
case 'G':
case 'C':
- if (dyn_cast<ConstantFP>(CallOperandVal)) {
+ if (isa<ConstantFP>(CallOperandVal)) {
weight = CW_Constant;
}
break;
def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedLoadSDNode>(N))
- return cast<MaskedLoadSDNode>(N)->getAlignment() >= 16;
+ if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
+ return Load->getAlignment() >= 16;
return false;
}]>;
def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedLoadSDNode>(N))
- return cast<MaskedLoadSDNode>(N)->getAlignment() >= 32;
+ if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
+ return Load->getAlignment() >= 32;
return false;
}]>;
def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedLoadSDNode>(N))
- return cast<MaskedLoadSDNode>(N)->getAlignment() >= 64;
+ if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
+ return Load->getAlignment() >= 64;
return false;
}]>;
def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_load node:$src1, node:$src2, node:$src3), [{
- return (dyn_cast<MaskedLoadSDNode>(N) != 0);
+ return isa<MaskedLoadSDNode>(N);
}]>;
def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedStoreSDNode>(N))
- return cast<MaskedStoreSDNode>(N)->getAlignment() >= 16;
+ if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
+ return Store->getAlignment() >= 16;
return false;
}]>;
def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedStoreSDNode>(N))
- return cast<MaskedStoreSDNode>(N)->getAlignment() >= 32;
+ if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
+ return Store->getAlignment() >= 32;
return false;
}]>;
def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- if (dyn_cast<MaskedStoreSDNode>(N))
- return cast<MaskedStoreSDNode>(N)->getAlignment() >= 64;
+ if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
+ return Store->getAlignment() >= 64;
return false;
}]>;
def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
- return (dyn_cast<MaskedStoreSDNode>(N) != 0);
+ return isa<MaskedStoreSDNode>(N);
}]>;
// path is rarely taken. This seems to be the case for SPEC benchmarks.
TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
- assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
+ assert(cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
IRBuilder<> IRB(I);
CallSite CS(I);
Value *Callee = CS.getCalledValue();
- if (dyn_cast<InlineAsm>(Callee)) continue;
+ if (isa<InlineAsm>(Callee)) continue;
GlobalVariable *CalleeCache = new GlobalVariable(
*F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
Constant::getNullValue(Ty), "__sancov_gen_callee_cache");
case Instruction::ICmp:
case Instruction::FCmp: {
// Check that all of the compares have the same predicate.
- CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
+ CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
for (unsigned i = 1, e = VL.size(); i < e; ++i) {
CmpInst *Cmp = cast<CmpInst>(VL[i]);
if (Value *V = alreadyVectorized(E->Scalars))
return V;
- CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
+ CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
Value *V;
if (Opcode == Instruction::FCmp)
V = Builder.CreateFCmp(P0, L, R);
outs() << indent(Indent) << "}";
} else if (yaml::AliasNode *an = dyn_cast<yaml::AliasNode>(n)){
outs() << "*" << an->getName();
- } else if (dyn_cast<yaml::NullNode>(n)) {
+ } else if (isa<yaml::NullNode>(n)) {
outs() << prettyTag(n) << " null";
}
}