case TSK_ExplicitInstantiationDefinition:
if (!DumpExplicitInst)
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
if (DumpRefOnly)
if (DCE->getTypeAsWritten()->isReferenceType() &&
DCE->getCastKind() == CK_Dynamic)
return true;
- } // Fall through.
+ }
+ LLVM_FALLTHROUGH;
case ImplicitCastExprClass:
case CStyleCastExprClass:
case CXXStaticCastExprClass:
switch (Kind) {
case LCK_StarThis:
Bits |= Capture_ByCopy;
- // Fall through
+ LLVM_FALLTHROUGH;
case LCK_This:
assert(!Var && "'this' capture cannot have a variable!");
Bits |= Capture_This;
case LCK_ByCopy:
Bits |= Capture_ByCopy;
- // Fall through
+ LLVM_FALLTHROUGH;
case LCK_ByRef:
assert(Var && "capture must have a variable!");
break;
<< (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_strchr:
case Builtin::BI__builtin_wcschr:
case Builtin::BI__builtin_memchr:
Desired))
return ZeroInitialization(E);
StopAtNull = true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BImemchr:
case Builtin::BI__builtin_memchr:
case Builtin::BI__builtin_char_memchr:
case Builtin::BIwcschr:
case Builtin::BI__builtin_wcschr:
StopAtNull = true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BIwmemchr:
case Builtin::BI__builtin_wmemchr:
// wcschr and wmemchr are given a wchar_t to look for. Just use it.
case BuiltinType::Dependent:
llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
};
+ break;
case Type::Enum:
return LangOpts.CPlusPlus ? enumeral_type_class : integer_type_class;
<< (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_strlen:
case Builtin::BI__builtin_wcslen: {
// As an extension, we support __builtin_strlen() as a constant expression,
<< (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- // Fall through.
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_strcmp:
case Builtin::BI__builtin_wcscmp:
case Builtin::BI__builtin_strncmp:
if (!MD->isStatic())
Arity++;
}
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXLiteralOperatorName:
mangleOperatorName(Name, Arity);
LeftMost = Cur;
break;
}
- /* Fall through */
+ LLVM_FALLTHROUGH;
case FunctionNoProto:
case ConstantArray:
case DependentSizedArray:
case Type::VariableArray:
case Type::DependentSizedArray:
NeedARCStrongQualifier = true;
- // Fall through
+ LLVM_FALLTHROUGH;
case Type::Adjusted:
case Type::Decayed:
case AtomicExpr::AO__atomic_add_fetch:
PostOp = llvm::Instruction::Add;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_sub_fetch:
PostOp = llvm::Instruction::Sub;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__atomic_or_fetch:
PostOp = llvm::Instruction::Or;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
// T __atomic_fetch_add_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_add_fetch:
PostOp = llvm::Instruction::Add;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_and_fetch:
PostOp = llvm::Instruction::And;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_or_fetch:
PostOp = llvm::Instruction::Or;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_sub_fetch:
PostOp = llvm::Instruction::Sub;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
// T __atomic_fetch_nand_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
}
- // fall through
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Tys), Ops);
}
- // fall through
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
case NEON::BI__builtin_neon_vcvts_u32_f32:
case NEON::BI__builtin_neon_vcvtd_u64_f64:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_s32_f32:
case NEON::BI__builtin_neon_vcvtd_s64_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_f32_s32:
case NEON::BI__builtin_neon_vcvtd_f64_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
case NEON::BI__builtin_neon_vaddv_u8:
// FIXME: These are handled by the AArch64 scalar code.
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
RetAttrs.addAttribute(llvm::Attribute::SExt);
else if (RetTy->hasUnsignedIntegerRepresentation())
RetAttrs.addAttribute(llvm::Attribute::ZExt);
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case ABIArgInfo::Direct:
if (RetAI.getInReg())
RetAttrs.addAttribute(llvm::Attribute::InReg);
else
Attrs.addAttribute(llvm::Attribute::ZExt);
}
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case ABIArgInfo::Direct:
if (ArgNo == 0 && FI.isChainCall())
Attrs.addAttribute(llvm::Attribute::Nest);
case ObjCRuntime::GNUstep:
if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
return EHPersonality::GNUstep_ObjC;
- // fallthrough
+ LLVM_FALLTHROUGH;
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
if (L.SjLjExceptions)
return Visit(E->getSubExpr());
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case CK_NoOp:
case CK_UserDefinedConversion:
case CXXConstructExpr::CK_VirtualBase:
ForVirtualBase = true;
- // fall-through
+ LLVM_FALLTHROUGH;
case CXXConstructExpr::CK_NonVirtualBase:
Type = Ctor_Base;
// Do nothing: Patterns can come with cursor kinds!
break;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case RK_Declaration: {
// Set the availability based on attributes.
BuildScopeInformation(Var, ParentScope);
++StmtsToSkip;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Stmt::GotoStmtClass:
// Remember both what scope a goto is in as well as the fact that we have
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
IsAddSub = true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
return IntRange(R.Width, /*NonNegative*/ true);
}
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case BO_ShlAssign:
return IntRange::forValueOfType(C, GetExprType(E));
Results.AddResult(Result("mutable"));
Results.AddResult(Result("virtual"));
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Sema::PCC_ObjCInterface:
case Sema::PCC_ObjCImplementation:
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
- // Fall through
+ LLVM_FALLTHROUGH;
case Sema::PCC_Class:
if (SemaRef.getLangOpts().CPlusPlus) {
Results.AddResult(Result(Builder.TakeString()));
}
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
// that has an in-class initializer, so we type-check this like
// a declaration.
//
- // Fall through
+ LLVM_FALLTHROUGH;
case VarDecl::DeclarationOnly:
// It's only a declaration.
PastFunctionChunk = true;
break;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case DeclaratorChunk::Array:
NeedsTypedef = true;
extendRight(After, Chunk.getSourceRange());
if (!Args[1]->isDefaultArgument())
return false;
- // fall through
+ LLVM_FALLTHROUGH;
case 1:
return !Args[0]->isDefaultArgument();
}
E->getLocStart(), nullptr,
PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
<< Ty << CT);
- // Fall through.
+ LLVM_FALLTHROUGH;
case VAK_Valid:
if (Ty->isRecordType()) {
// This is unlikely to be what the user intended. If the class has a
valueKind = VK_RValue;
break;
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case Decl::ImplicitParam:
case Decl::ParmVar: {
valueKind = VK_LValue;
break;
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case Decl::CXXConversion:
case Decl::CXXDestructor:
case IMA_Field_Uneval_Context:
Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
<< R.getLookupNameInfo().getName();
- // Fall through.
+ LLVM_FALLTHROUGH;
case IMA_Static:
case IMA_Abstract:
case IMA_Mixed_StaticContext:
case OMF_init:
if (Method)
checkInitMethod(Method, ReceiverType);
+ break;
case OMF_None:
case OMF_alloc:
if (Entity->getParent())
return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
Entity);
- // Fall through.
+ LLVM_FALLTHROUGH;
case InitializedEntity::EK_Delegating:
// We can reach this case for aggregate initialization in a constructor:
// struct A { int &&r; };
<< Args[0]->getSourceRange();
break;
}
- // Intentional fallthrough
+ LLVM_FALLTHROUGH;
case FK_NonConstLValueReferenceBindingToUnrelated:
S.Diag(Kind.getLocation(),
HadMultipleCandidates,
ExplicitConversions))
return ExprError();
- // fall through 'OR_Deleted' case.
+ LLVM_FALLTHROUGH;
case OR_Deleted:
// We'll complain below about a non-integral condition type.
break;
case OO_Plus: // '+' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryPlusPointerOverloads();
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_Minus: // '-' is either unary or binary
if (Args.size() == 1) {
case OO_EqualEqual:
case OO_ExclaimEqual:
OpBuilder.addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads();
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_Less:
case OO_Greater:
case OO_Equal:
OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_PlusEqual:
case OO_MinusEqual:
OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
- // Fall through.
+ LLVM_FALLTHROUGH;
case OO_StarEqual:
case OO_SlashEqual:
case Type::InjectedClassName:
T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
- // fall through
+ LLVM_FALLTHROUGH;
case Type::TemplateSpecialization: {
const TemplateSpecializationType *Spec
case TSK_ExplicitInstantiationDefinition:
// We only need an instantiation if the pending instantiation *is* the
// explicit instantiation.
- if (Var != Var->getMostRecentDecl()) continue;
+ if (Var != Var->getMostRecentDecl())
+ continue;
+ break;
case TSK_ImplicitInstantiation:
break;
}
if (onlyBlockPointers)
continue;
- // fallthrough
+ LLVM_FALLTHROUGH;
case DeclaratorChunk::BlockPointer:
result = &ptrChunk;
}
}
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
case DeclSpec::TST_int: {
if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
switch (DS.getTypeSpecWidth()) {
case Declarator::PrototypeContext:
case Declarator::TrailingReturnContext:
isFunctionOrMethod = true;
- // fallthrough
+ LLVM_FALLTHROUGH;
case Declarator::MemberContext:
if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
break;
}
- // fallthrough
+ LLVM_FALLTHROUGH;
case Declarator::FileContext:
case Declarator::KNRTypeListContext: {
case CAMN_InnerPointers:
if (NumPointersRemaining == 0)
break;
- // Fallthrough.
+ LLVM_FALLTHROUGH;
case CAMN_Yes:
checkNullabilityConsistency(S, pointerKind, pointerLoc, pointerEndLoc);
break;
case 'h':
Float = true;
- // Fall through
+ LLVM_FALLTHROUGH;
case 's':
ElementBitwidth = 16;
break;
case 'f':
Float = true;
- // Fall through
+ LLVM_FALLTHROUGH;
case 'i':
ElementBitwidth = 32;
break;
case 'd':
Float = true;
- // Fall through
+ LLVM_FALLTHROUGH;
case 'l':
ElementBitwidth = 64;
break;