return llvm::isBytewiseValue(Init);
}
+static llvm::Constant *patternFor(CodeGenModule &CGM, llvm::Type *Ty) {
+ // The following value is a guaranteed unmappable pointer value and has a
+ // repeated byte-pattern which makes it easier to synthesize. We use it for
+ // pointers as well as integers so that aggregates are likely to be
+ // initialized with this repeated value.
+ constexpr uint64_t LargeValue = 0xAAAAAAAAAAAAAAAAull;
+ // For 32-bit platforms it's a bit trickier because, across systems, only the
+ // zero page can reasonably be expected to be unmapped, and even then we need
+ // a very low address. We use a smaller value, and that value sadly doesn't
+ // have a repeated byte-pattern. We don't use it for integers.
+ constexpr uint32_t SmallValue = 0x000000AA;
+ // Floating-point values are initialized as NaNs because they propagate. Using
+ // a repeated byte pattern means that it will be easier to initialize
+ // all-floating-point aggregates and arrays with memset. Further, aggregates
+ // which mix integral and a few floats might also initialize with memset
+ // followed by a handful of stores for the floats. Using fairly unique NaNs
+ // also means they'll be easier to distinguish in a crash.
+ constexpr bool NegativeNaN = true;
+ constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
+ if (Ty->isIntOrIntVectorTy()) {
+ unsigned BitWidth = cast<llvm::IntegerType>(
+ Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
+ ->getBitWidth();
+ if (BitWidth <= 64)
+ return llvm::ConstantInt::get(Ty, LargeValue);
+ return llvm::ConstantInt::get(
+ Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, LargeValue)));
+ }
+ if (Ty->isPtrOrPtrVectorTy()) {
+ auto *PtrTy = cast<llvm::PointerType>(
+ Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
+ unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
+ PtrTy->getAddressSpace());
+ llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
+ uint64_t IntValue;
+ switch (PtrWidth) {
+ default:
+ llvm_unreachable("pattern initialization of unsupported pointer width");
+ case 64:
+ IntValue = LargeValue;
+ break;
+ case 32:
+ IntValue = SmallValue;
+ break;
+ }
+ auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
+ return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
+ }
+ if (Ty->isFPOrFPVectorTy()) {
+ unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
+ (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
+ ->getFltSemantics());
+ llvm::APInt Payload(64, NaNPayload);
+ if (BitWidth >= 64)
+ Payload = llvm::APInt::getSplat(BitWidth, Payload);
+ return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
+ }
+ if (Ty->isArrayTy()) {
+ // Note: this doesn't touch tail padding (at the end of an object, before
+ // the next array object). It is instead handled by replaceUndef.
+ auto *ArrTy = cast<llvm::ArrayType>(Ty);
+ llvm::SmallVector<llvm::Constant *, 8> Element(
+ ArrTy->getNumElements(), patternFor(CGM, ArrTy->getElementType()));
+ return llvm::ConstantArray::get(ArrTy, Element);
+ }
+
+ // Note: this doesn't touch struct padding. It will initialize as much union
+ // padding as is required for the largest type in the union. Padding is
+ // instead handled by replaceUndef. Stores to structs with volatile members
+ // don't have a volatile qualifier when initialized according to C++. This is
+ // fine because stack-based volatiles don't really have volatile semantics
+ // anyways, and the initialization shouldn't be observable.
+ auto *StructTy = cast<llvm::StructType>(Ty);
+ llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
+ for (unsigned El = 0; El != Struct.size(); ++El)
+ Struct[El] = patternFor(CGM, StructTy->getElementType(El));
+ return llvm::ConstantStruct::get(StructTy, Struct);
+}
+
static Address createUnnamedGlobalFrom(CodeGenModule &CGM, const VarDecl &D,
CGBuilderTy &Builder,
llvm::Constant *Constant,
Address Loc, bool isVolatile,
CGBuilderTy &Builder,
llvm::Constant *constant) {
+ auto *Ty = constant->getType();
+ bool isScalar = Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy() ||
+ Ty->isFPOrFPVectorTy();
+ if (isScalar) {
+ Builder.CreateStore(constant, Loc, isVolatile);
+ return;
+ }
+
auto *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
auto *IntPtrTy = CGM.getDataLayout().getIntPtrType(CGM.getLLVMContext());
// If the initializer is all or mostly the same, codegen with bzero / memset
// then do a few stores afterward.
- uint64_t ConstantSize =
- CGM.getDataLayout().getTypeAllocSize(constant->getType());
+ uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
auto *SizeVal = llvm::ConstantInt::get(IntPtrTy, ConstantSize);
if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
- Loc = Builder.CreateBitCast(
- Loc, constant->getType()->getPointerTo(Loc.getAddressSpace()));
+ Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
}
return;
SizeVal, isVolatile);
}
+static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
+ Address Loc, bool isVolatile,
+ CGBuilderTy &Builder) {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *constant = llvm::Constant::getNullValue(ElTy);
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+}
+
+static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
+ Address Loc, bool isVolatile,
+ CGBuilderTy &Builder) {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *constant = patternFor(CGM, ElTy);
+ assert(!isa<llvm::UndefValue>(constant));
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+}
+
+static bool containsUndef(llvm::Constant *constant) {
+ auto *Ty = constant->getType();
+ if (isa<llvm::UndefValue>(constant))
+ return true;
+ if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
+ for (llvm::Use &Op : constant->operands())
+ if (containsUndef(cast<llvm::Constant>(Op)))
+ return true;
+ return false;
+}
+
+static llvm::Constant *replaceUndef(llvm::Constant *constant) {
+ // FIXME: when doing pattern initialization, replace undef with 0xAA instead.
+ // FIXME: also replace padding between values by creating a new struct type
+ // which has no padding.
+ auto *Ty = constant->getType();
+ if (isa<llvm::UndefValue>(constant))
+ return llvm::Constant::getNullValue(Ty);
+ if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
+ return constant;
+ if (!containsUndef(constant))
+ return constant;
+ llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
+ for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
+ auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
+ Values[Op] = replaceUndef(OpValue);
+ }
+ if (Ty->isStructTy())
+ return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
+ if (Ty->isArrayTy())
+ return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
+ assert(Ty->isVectorTy());
+ return llvm::ConstantVector::get(Values);
+}
+
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
QualType type = D.getType();
+ bool isVolatile = type.isVolatileQualified();
+
// If this local has an initializer, emit it now.
const Expr *Init = D.getInit();
return;
}
- if (isTrivialInitializer(Init))
- return;
-
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
- bool capturedByInit = emission.IsEscapingByRef && isCapturedBy(D, Init);
+ bool capturedByInit =
+ Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
Address Loc =
- capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
+ capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
+
+ // Note: constexpr already initializes everything correctly.
+ LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
+ (D.isConstexpr()
+ ? LangOptions::TrivialAutoVarInitKind::Uninitialized
+ : (D.getAttr<UninitializedAttr>()
+ ? LangOptions::TrivialAutoVarInitKind::Uninitialized
+ : getContext().getLangOpts().getTrivialAutoVarInit()));
+
+ auto initializeWhatIsTechnicallyUninitialized = [&]() {
+ if (trivialAutoVarInit ==
+ LangOptions::TrivialAutoVarInitKind::Uninitialized)
+ return;
+
+ CharUnits Size = getContext().getTypeSizeInChars(type);
+ if (!Size.isZero()) {
+ switch (trivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ llvm_unreachable("Uninitialized handled above");
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
+ break;
+ case LangOptions::TrivialAutoVarInitKind::Pattern:
+ emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
+ break;
+ }
+ return;
+ }
+
+ // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
+ // them, so emit a memcpy with the VLA size to initialize each element.
+ // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
+ // will catch that code, but there exists code which generates zero-sized
+ // VLAs. Be nice and initialize whatever they requested.
+ const VariableArrayType *VlaType =
+ dyn_cast_or_null<VariableArrayType>(getContext().getAsArrayType(type));
+ if (!VlaType)
+ return;
+ auto VlaSize = getVLASize(VlaType);
+ auto SizeVal = VlaSize.NumElts;
+ CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
+ switch (trivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ llvm_unreachable("Uninitialized handled above");
+
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (!EltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ isVolatile);
+ break;
+
+ case LangOptions::TrivialAutoVarInitKind::Pattern: {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *Constant = patternFor(CGM, ElTy);
+ CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
+ llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
+ llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
+ llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
+ SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
+ "vla.iszerosized");
+ Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
+ EmitBlock(SetupBB);
+ if (!EltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
+ llvm::Value *BaseSizeInChars =
+ llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
+ Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
+ llvm::Value *End =
+ Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
+ llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
+ EmitBlock(LoopBB);
+ llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
+ Cur->addIncoming(Begin.getPointer(), OriginBB);
+ CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
+ Builder.CreateMemCpy(
+ Address(Cur, CurAlign),
+ createUnnamedGlobalFrom(CGM, D, Builder, Constant, ConstantAlign),
+ BaseSizeInChars, isVolatile);
+ llvm::Value *Next =
+ Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
+ llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
+ Builder.CreateCondBr(Done, ContBB, LoopBB);
+ Cur->addIncoming(Next, LoopBB);
+ EmitBlock(ContBB);
+ } break;
+ }
+ };
+
+ if (isTrivialInitializer(Init)) {
+ initializeWhatIsTechnicallyUninitialized();
+ return;
+ }
llvm::Constant *constant = nullptr;
if (emission.IsConstantAggregate || D.isConstexpr()) {
assert(!capturedByInit && "constant init contains a capturing block?");
constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
+ if (constant && trivialAutoVarInit !=
+ LangOptions::TrivialAutoVarInitKind::Uninitialized)
+ constant = replaceUndef(constant);
}
if (!constant) {
+ initializeWhatIsTechnicallyUninitialized();
LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
- // If this is a simple aggregate initialization, we can optimize it
- // in various ways.
- bool isVolatile = type.isVolatileQualified();
-
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fblocks %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=pattern %s -emit-llvm -o - | FileCheck %s -check-prefix=PATTERN
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=zero %s -emit-llvm -o - | FileCheck %s -check-prefix=ZERO
template<typename T> void used(T &) noexcept;
used(custom); \
}
+// None of the synthesized globals should contain `undef`.
+// PATTERN-NOT: undef
+// ZERO-NOT: undef
+
+// PATTERN: @__const.test_empty_uninit.uninit = private unnamed_addr constant %struct.empty { i8 -86 }, align 1
struct empty {};
+// PATTERN: @__const.test_small_uninit.uninit = private unnamed_addr constant %struct.small { i8 -86 }, align 1
+// PATTERN: @__const.test_small_custom.custom = private unnamed_addr constant %struct.small { i8 42 }, align 1
+// ZERO: @__const.test_small_custom.custom = private unnamed_addr constant %struct.small { i8 42 }, align 1
struct small { char c; };
+// PATTERN: @__const.test_smallinit_uninit.uninit = private unnamed_addr constant %struct.smallinit { i8 -86 }, align 1
+// PATTERN: @__const.test_smallinit_braces.braces = private unnamed_addr constant %struct.smallinit { i8 -86 }, align 1
+// PATTERN: @__const.test_smallinit_custom.custom = private unnamed_addr constant %struct.smallinit { i8 -86 }, align 1
struct smallinit { char c = 42; };
+// PATTERN: @__const.test_smallpartinit_uninit.uninit = private unnamed_addr constant %struct.smallpartinit { i8 -86, i8 -86 }, align 1
+// PATTERN: @__const.test_smallpartinit_braces.braces = private unnamed_addr constant %struct.smallpartinit { i8 -86, i8 -86 }, align 1
+// PATTERN: @__const.test_smallpartinit_custom.custom = private unnamed_addr constant %struct.smallpartinit { i8 -86, i8 -86 }, align 1
struct smallpartinit { char c = 42, d; };
+// PATTERN: @__const.test_nullinit_uninit.uninit = private unnamed_addr constant %struct.nullinit { i8* inttoptr (i64 -6148914691236517206 to i8*) }, align 8
+// PATTERN: @__const.test_nullinit_braces.braces = private unnamed_addr constant %struct.nullinit { i8* inttoptr (i64 -6148914691236517206 to i8*) }, align 8
+// PATTERN: @__const.test_nullinit_custom.custom = private unnamed_addr constant %struct.nullinit { i8* inttoptr (i64 -6148914691236517206 to i8*) }, align 8
struct nullinit { char* null = nullptr; };
+// PATTERN: @__const.test_padded_uninit.uninit = private unnamed_addr constant %struct.padded { i8 -86, i32 -1431655766 }, align 4
+// PATTERN: @__const.test_padded_custom.custom = private unnamed_addr constant %struct.padded { i8 42, i32 13371337 }, align 4
+// ZERO: @__const.test_padded_custom.custom = private unnamed_addr constant %struct.padded { i8 42, i32 13371337 }, align 4
struct padded { char c; int i; };
+// PATTERN: @__const.test_paddednullinit_uninit.uninit = private unnamed_addr constant %struct.paddednullinit { i8 -86, i32 -1431655766 }, align 4
+// PATTERN: @__const.test_paddednullinit_braces.braces = private unnamed_addr constant %struct.paddednullinit { i8 -86, i32 -1431655766 }, align 4
+// PATTERN: @__const.test_paddednullinit_custom.custom = private unnamed_addr constant %struct.paddednullinit { i8 -86, i32 -1431655766 }, align 4
struct paddednullinit { char c = 0; int i = 0; };
+// PATTERN: @__const.test_bitfield_uninit.uninit = private unnamed_addr constant %struct.bitfield { i8 -86, [3 x i8] c"\AA\AA\AA" }, align 4
+// PATTERN: @__const.test_bitfield_custom.custom = private unnamed_addr constant %struct.bitfield { i8 20, [3 x i8] zeroinitializer }, align 4
+// ZERO: @__const.test_bitfield_custom.custom = private unnamed_addr constant %struct.bitfield { i8 20, [3 x i8] zeroinitializer }, align 4
struct bitfield { int i : 4; int j : 2; };
+// PATTERN: @__const.test_bitfieldaligned_uninit.uninit = private unnamed_addr constant %struct.bitfieldaligned { i8 -86, [3 x i8] c"\AA\AA\AA", i8 -86, [3 x i8] c"\AA\AA\AA" }, align 4
+// PATTERN: @__const.test_bitfieldaligned_custom.custom = private unnamed_addr constant %struct.bitfieldaligned { i8 4, [3 x i8] zeroinitializer, i8 1, [3 x i8] zeroinitializer }, align 4
+// ZERO: @__const.test_bitfieldaligned_custom.custom = private unnamed_addr constant %struct.bitfieldaligned { i8 4, [3 x i8] zeroinitializer, i8 1, [3 x i8] zeroinitializer }, align 4
struct bitfieldaligned { int i : 4; int : 0; int j : 2; };
struct big { unsigned a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z; };
+// PATTERN: @__const.test_arraytail_uninit.uninit = private unnamed_addr constant %struct.arraytail { i32 -1431655766, [0 x i32] zeroinitializer }, align 4
+// PATTERN: @__const.test_arraytail_custom.custom = private unnamed_addr constant %struct.arraytail { i32 57005, [0 x i32] zeroinitializer }, align 4
+// ZERO: @__const.test_arraytail_custom.custom = private unnamed_addr constant %struct.arraytail { i32 57005, [0 x i32] zeroinitializer }, align 4
struct arraytail { int i; int arr[]; };
+// PATTERN: @__const.test_int1_uninit.uninit = private unnamed_addr constant [1 x i32] [i32 -1431655766], align 4
+// PATTERN: @__const.test_int1_custom.custom = private unnamed_addr constant [1 x i32] [i32 858993459], align 4
+// ZERO: @__const.test_int1_custom.custom = private unnamed_addr constant [1 x i32] [i32 858993459], align 4
+// PATTERN: @__const.test_bool4_uninit.uninit = private unnamed_addr constant [4 x i8] c"\AA\AA\AA\AA", align 1
+// PATTERN: @__const.test_bool4_custom.custom = private unnamed_addr constant [4 x i8] c"\01\01\01\01", align 1
+// ZERO: @__const.test_bool4_custom.custom = private unnamed_addr constant [4 x i8] c"\01\01\01\01", align 1
+// PATTERN: @__const.test_intptr4_uninit.uninit = private unnamed_addr constant [4 x i32*] [i32* inttoptr (i64 -6148914691236517206 to i32*), i32* inttoptr (i64 -6148914691236517206 to i32*), i32* inttoptr (i64 -6148914691236517206 to i32*), i32* inttoptr (i64 -6148914691236517206 to i32*)], align 16
+// PATTERN: @__const.test_intptr4_custom.custom = private unnamed_addr constant [4 x i32*] [i32* inttoptr (i64 572662306 to i32*), i32* inttoptr (i64 572662306 to i32*), i32* inttoptr (i64 572662306 to i32*), i32* inttoptr (i64 572662306 to i32*)], align 16
+// ZERO: @__const.test_intptr4_custom.custom = private unnamed_addr constant [4 x i32*] [i32* inttoptr (i64 572662306 to i32*), i32* inttoptr (i64 572662306 to i32*), i32* inttoptr (i64 572662306 to i32*), i32* inttoptr (i64 572662306 to i32*)], align 16
+// PATTERN: @__const.test_tailpad4_uninit.uninit = private unnamed_addr constant [4 x %struct.tailpad] [%struct.tailpad { i16 -21846, i8 -86 }, %struct.tailpad { i16 -21846, i8 -86 }, %struct.tailpad { i16 -21846, i8 -86 }, %struct.tailpad { i16 -21846, i8 -86 }], align 16
+// PATTERN: @__const.test_tailpad4_custom.custom = private unnamed_addr constant [4 x %struct.tailpad] [%struct.tailpad { i16 257, i8 1 }, %struct.tailpad { i16 257, i8 1 }, %struct.tailpad { i16 257, i8 1 }, %struct.tailpad { i16 257, i8 1 }], align 16
+// ZERO: @__const.test_tailpad4_custom.custom = private unnamed_addr constant [4 x %struct.tailpad] [%struct.tailpad { i16 257, i8 1 }, %struct.tailpad { i16 257, i8 1 }, %struct.tailpad { i16 257, i8 1 }, %struct.tailpad { i16 257, i8 1 }], align 16
struct tailpad { short s; char c; };
+// PATTERN: @__const.test_atomicnotlockfree_uninit.uninit = private unnamed_addr constant %struct.notlockfree { [4 x i64] [i64 -6148914691236517206, i64 -6148914691236517206, i64 -6148914691236517206, i64 -6148914691236517206] }, align 8
struct notlockfree { long long a[4]; };
+// PATTERN: @__const.test_atomicpadded_uninit.uninit = private unnamed_addr constant %struct.padded { i8 -86, i32 -1431655766 }, align 8
+// PATTERN: @__const.test_atomictailpad_uninit.uninit = private unnamed_addr constant %struct.tailpad { i16 -21846, i8 -86 }, align 4
+// PATTERN: @__const.test_complexfloat_uninit.uninit = private unnamed_addr constant { float, float } { float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000 }, align 4
+// PATTERN: @__const.test_complexfloat_braces.braces = private unnamed_addr constant { float, float } { float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000 }, align 4
+// PATTERN: @__const.test_complexfloat_custom.custom = private unnamed_addr constant { float, float } { float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000 }, align 4
+// PATTERN: @__const.test_complexdouble_uninit.uninit = private unnamed_addr constant { double, double } { double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF }, align 8
+// PATTERN: @__const.test_complexdouble_braces.braces = private unnamed_addr constant { double, double } { double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF }, align 8
+// PATTERN: @__const.test_complexdouble_custom.custom = private unnamed_addr constant { double, double } { double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF }, align 8
+// PATTERN: @__const.test_semivolatile_uninit.uninit = private unnamed_addr constant %struct.semivolatile { i32 -1431655766, i32 -1431655766 }, align 4
+// PATTERN: @__const.test_semivolatile_custom.custom = private unnamed_addr constant %struct.semivolatile { i32 1145324612, i32 1145324612 }, align 4
struct semivolatile { int i; volatile int vi; };
+// PATTERN: @__const.test_semivolatileinit_uninit.uninit = private unnamed_addr constant %struct.semivolatileinit { i32 -1431655766, i32 -1431655766 }, align 4
+// PATTERN: @__const.test_semivolatileinit_braces.braces = private unnamed_addr constant %struct.semivolatileinit { i32 -1431655766, i32 -1431655766 }, align 4
+// PATTERN: @__const.test_semivolatileinit_custom.custom = private unnamed_addr constant %struct.semivolatileinit { i32 -1431655766, i32 -1431655766 }, align 4
+// ZERO: @__const.test_semivolatile_custom.custom = private unnamed_addr constant %struct.semivolatile { i32 1145324612, i32 1145324612 }, align 4
struct semivolatileinit { int i = 0x11111111; volatile int vi = 0x11111111; };
+// PATTERN: @__const.test_base_uninit.uninit = private unnamed_addr constant %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) }, align 8
+// PATTERN: @__const.test_base_braces.braces = private unnamed_addr constant %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) }, align 8
struct base { virtual ~base(); };
+// PATTERN: @__const.test_derived_uninit.uninit = private unnamed_addr constant %struct.derived { %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) } }, align 8
+// PATTERN: @__const.test_derived_braces.braces = private unnamed_addr constant %struct.derived { %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) } }, align 8
struct derived : public base {};
+// PATTERN: @__const.test_virtualderived_uninit.uninit = private unnamed_addr constant %struct.virtualderived { %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) }, %struct.derived { %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) } } }, align 8
+// PATTERN: @__const.test_virtualderived_braces.braces = private unnamed_addr constant %struct.virtualderived { %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) }, %struct.derived { %struct.base { i32 (...)** inttoptr (i64 -6148914691236517206 to i32 (...)**) } } }, align 8
struct virtualderived : public virtual base, public virtual derived {};
+// PATTERN: @__const.test_matching_uninit.uninit = private unnamed_addr constant %union.matching { i32 -1431655766 }, align 4
+// PATTERN: @__const.test_matching_custom.custom = private unnamed_addr constant { float } { float 6.145500e+04 }, align 4
union matching { int i; float f; };
+// PATTERN: @__const.test_matchingreverse_uninit.uninit = private unnamed_addr constant %union.matchingreverse { float 0xFFFFFFFFE0000000 }, align 4
+// PATTERN: @__const.test_matchingreverse_custom.custom = private unnamed_addr constant { i32 } { i32 61455 }, align 4
+// ZERO: @__const.test_matching_custom.custom = private unnamed_addr constant { float } { float 6.145500e+04 }, align 4
union matchingreverse { float f; int i; };
+// PATTERN: @__const.test_unmatched_uninit.uninit = private unnamed_addr constant %union.unmatched { i32 -1431655766 }, align 4
+// PATTERN: @__const.test_unmatched_custom.custom = private unnamed_addr constant %union.unmatched { i32 1001242351 }, align 4
+// ZERO: @__const.test_matchingreverse_custom.custom = private unnamed_addr constant { i32 } { i32 61455 }, align 4
union unmatched { char c; int i; };
+// PATTERN: @__const.test_unmatchedreverse_uninit.uninit = private unnamed_addr constant %union.unmatchedreverse { i32 -1431655766 }, align 4
+// PATTERN: @__const.test_unmatchedreverse_custom.custom = private unnamed_addr constant { i8, [3 x i8] } { i8 42, [3 x i8] zeroinitializer }, align 4
+// ZERO: @__const.test_unmatched_custom.custom = private unnamed_addr constant %union.unmatched { i32 1001242351 }, align 4
union unmatchedreverse { int i; char c; };
+// PATTERN: @__const.test_unmatchedfp_uninit.uninit = private unnamed_addr constant %union.unmatchedfp { double 0xFFFFFFFFFFFFFFFF }, align 8
+// PATTERN: @__const.test_unmatchedfp_custom.custom = private unnamed_addr constant %union.unmatchedfp { double 0x400921FB54442D18 }, align 8
+// ZERO: @__const.test_unmatchedreverse_custom.custom = private unnamed_addr constant { i8, [3 x i8] } { i8 42, [3 x i8] zeroinitializer }, align 4
+// ZERO: @__const.test_unmatchedfp_custom.custom = private unnamed_addr constant %union.unmatchedfp { double 0x400921FB54442D18 }, align 8
union unmatchedfp { float f; double d; };
enum emptyenum {};
enum smallenum { VALUE };
// CHECK-LABEL: @test_char_uninit()
// CHECK: %uninit = alloca i8, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_char_uninit()
+// PATTERN: store i8 -86, i8* %uninit, align 1
+// ZERO-LABEL: @test_char_uninit()
+// ZERO: store i8 0, i8* %uninit, align 1
TEST_BRACES(char, char);
// CHECK-LABEL: @test_char_braces()
// CHECK-LABEL: @test_uchar_uninit()
// CHECK: %uninit = alloca i8, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_uchar_uninit()
+// PATTERN: store i8 -86, i8* %uninit, align 1
+// ZERO-LABEL: @test_uchar_uninit()
+// ZERO: store i8 0, i8* %uninit, align 1
TEST_BRACES(uchar, unsigned char);
// CHECK-LABEL: @test_uchar_braces()
// CHECK-LABEL: @test_schar_uninit()
// CHECK: %uninit = alloca i8, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_schar_uninit()
+// PATTERN: store i8 -86, i8* %uninit, align 1
+// ZERO-LABEL: @test_schar_uninit()
+// ZERO: store i8 0, i8* %uninit, align 1
TEST_BRACES(schar, signed char);
// CHECK-LABEL: @test_schar_braces()
// CHECK-LABEL: @test_wchar_t_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_wchar_t_uninit()
+// PATTERN: store i32 -1431655766, i32* %uninit, align
+// ZERO-LABEL: @test_wchar_t_uninit()
+// ZERO: store i32 0, i32* %uninit, align
TEST_BRACES(wchar_t, wchar_t);
// CHECK-LABEL: @test_wchar_t_braces()
// CHECK-LABEL: @test_short_uninit()
// CHECK: %uninit = alloca i16, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_short_uninit()
+// PATTERN: store i16 -21846, i16* %uninit, align
+// ZERO-LABEL: @test_short_uninit()
+// ZERO: store i16 0, i16* %uninit, align
TEST_BRACES(short, short);
// CHECK-LABEL: @test_short_braces()
// CHECK-LABEL: @test_ushort_uninit()
// CHECK: %uninit = alloca i16, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_ushort_uninit()
+// PATTERN: store i16 -21846, i16* %uninit, align
+// ZERO-LABEL: @test_ushort_uninit()
+// ZERO: store i16 0, i16* %uninit, align
TEST_BRACES(ushort, unsigned short);
// CHECK-LABEL: @test_ushort_braces()
// CHECK-LABEL: @test_int_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_int_uninit()
+// PATTERN: store i32 -1431655766, i32* %uninit, align
+// ZERO-LABEL: @test_int_uninit()
+// ZERO: store i32 0, i32* %uninit, align
TEST_BRACES(int, int);
// CHECK-LABEL: @test_int_braces()
// CHECK-LABEL: @test_unsigned_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_unsigned_uninit()
+// PATTERN: store i32 -1431655766, i32* %uninit, align
+// ZERO-LABEL: @test_unsigned_uninit()
+// ZERO: store i32 0, i32* %uninit, align
TEST_BRACES(unsigned, unsigned);
// CHECK-LABEL: @test_unsigned_braces()
// CHECK-LABEL: @test_long_uninit()
// CHECK: %uninit = alloca i64, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_long_uninit()
+// PATTERN: store i64 -6148914691236517206, i64* %uninit, align
+// ZERO-LABEL: @test_long_uninit()
+// ZERO: store i64 0, i64* %uninit, align
TEST_BRACES(long, long);
// CHECK-LABEL: @test_long_braces()
// CHECK-LABEL: @test_ulong_uninit()
// CHECK: %uninit = alloca i64, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_ulong_uninit()
+// PATTERN: store i64 -6148914691236517206, i64* %uninit, align
+// ZERO-LABEL: @test_ulong_uninit()
+// ZERO: store i64 0, i64* %uninit, align
TEST_BRACES(ulong, unsigned long);
// CHECK-LABEL: @test_ulong_braces()
// CHECK-LABEL: @test_longlong_uninit()
// CHECK: %uninit = alloca i64, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_longlong_uninit()
+// PATTERN: store i64 -6148914691236517206, i64* %uninit, align
+// ZERO-LABEL: @test_longlong_uninit()
+// ZERO: store i64 0, i64* %uninit, align
TEST_BRACES(longlong, long long);
// CHECK-LABEL: @test_longlong_braces()
// CHECK-LABEL: @test_ulonglong_uninit()
// CHECK: %uninit = alloca i64, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_ulonglong_uninit()
+// PATTERN: store i64 -6148914691236517206, i64* %uninit, align
+// ZERO-LABEL: @test_ulonglong_uninit()
+// ZERO: store i64 0, i64* %uninit, align
TEST_BRACES(ulonglong, unsigned long long);
// CHECK-LABEL: @test_ulonglong_braces()
// CHECK-LABEL: @test_int128_uninit()
// CHECK: %uninit = alloca i128, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_int128_uninit()
+// PATTERN: store i128 -113427455640312821154458202477256070486, i128* %uninit, align
+// ZERO-LABEL: @test_int128_uninit()
+// ZERO: store i128 0, i128* %uninit, align
TEST_BRACES(int128, __int128);
// CHECK-LABEL: @test_int128_braces()
// CHECK-LABEL: @test_uint128_uninit()
// CHECK: %uninit = alloca i128, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_uint128_uninit()
+// PATTERN: store i128 -113427455640312821154458202477256070486, i128* %uninit, align
+// ZERO-LABEL: @test_uint128_uninit()
+// ZERO: store i128 0, i128* %uninit, align
TEST_BRACES(uint128, unsigned __int128);
// CHECK-LABEL: @test_uint128_braces()
// CHECK-LABEL: @test_fp16_uninit()
// CHECK: %uninit = alloca half, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_fp16_uninit()
+// PATTERN: store half 0xHFFFF, half* %uninit, align
+// ZERO-LABEL: @test_fp16_uninit()
+// ZERO: store half 0xH0000, half* %uninit, align
TEST_BRACES(fp16, __fp16);
// CHECK-LABEL: @test_fp16_braces()
// CHECK-LABEL: @test_float_uninit()
// CHECK: %uninit = alloca float, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_float_uninit()
+// PATTERN: store float 0xFFFFFFFFE0000000, float* %uninit, align
+// ZERO-LABEL: @test_float_uninit()
+// ZERO: store float 0.000000e+00, float* %uninit, align
TEST_BRACES(float, float);
// CHECK-LABEL: @test_float_braces()
// CHECK-LABEL: @test_double_uninit()
// CHECK: %uninit = alloca double, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_double_uninit()
+// PATTERN: store double 0xFFFFFFFFFFFFFFFF, double* %uninit, align
+// ZERO-LABEL: @test_double_uninit()
+// ZERO: store double 0.000000e+00, double* %uninit, align
TEST_BRACES(double, double);
// CHECK-LABEL: @test_double_braces()
// CHECK-LABEL: @test_longdouble_uninit()
// CHECK: %uninit = alloca x86_fp80, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_longdouble_uninit()
+// PATTERN: store x86_fp80 0xKFFFFFFFFFFFFFFFFFFFF, x86_fp80* %uninit, align
+// ZERO-LABEL: @test_longdouble_uninit()
+// ZERO: store x86_fp80 0xK00000000000000000000, x86_fp80* %uninit, align
TEST_BRACES(longdouble, long double);
// CHECK-LABEL: @test_longdouble_braces()
// CHECK-LABEL: @test_intptr_uninit()
// CHECK: %uninit = alloca i32*, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_intptr_uninit()
+// PATTERN: store i32* inttoptr (i64 -6148914691236517206 to i32*), i32** %uninit, align
+// ZERO-LABEL: @test_intptr_uninit()
+// ZERO: store i32* null, i32** %uninit, align
TEST_BRACES(intptr, int*);
// CHECK-LABEL: @test_intptr_braces()
// CHECK-LABEL: @test_intptrptr_uninit()
// CHECK: %uninit = alloca i32**, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_intptrptr_uninit()
+// PATTERN: store i32** inttoptr (i64 -6148914691236517206 to i32**), i32*** %uninit, align
+// ZERO-LABEL: @test_intptrptr_uninit()
+// ZERO: store i32** null, i32*** %uninit, align
TEST_BRACES(intptrptr, int**);
// CHECK-LABEL: @test_intptrptr_braces()
// CHECK-LABEL: @test_function_uninit()
// CHECK: %uninit = alloca void ()*, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_function_uninit()
+// PATTERN: store void ()* inttoptr (i64 -6148914691236517206 to void ()*), void ()** %uninit, align
+// ZERO-LABEL: @test_function_uninit()
+// ZERO: store void ()* null, void ()** %uninit, align
TEST_BRACES(function, void(*)());
// CHECK-LABEL: @test_function_braces()
// CHECK-LABEL: @test_bool_uninit()
// CHECK: %uninit = alloca i8, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_bool_uninit()
+// PATTERN: store i8 -86, i8* %uninit, align 1
+// ZERO-LABEL: @test_bool_uninit()
+// ZERO: store i8 0, i8* %uninit, align 1
TEST_BRACES(bool, bool);
// CHECK-LABEL: @test_bool_braces()
// CHECK-LABEL: @test_empty_uninit()
// CHECK: %uninit = alloca %struct.empty, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_empty_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_empty_uninit.uninit
+// ZERO-LABEL: @test_empty_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(empty, empty);
// CHECK-LABEL: @test_empty_braces()
// CHECK-LABEL: @test_small_uninit()
// CHECK: %uninit = alloca %struct.small, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_small_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_small_uninit.uninit
+// ZERO-LABEL: @test_small_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(small, small);
// CHECK-LABEL: @test_small_braces()
// CHECK-NEXT: call void @llvm.memset{{.*}}(i8* align [[ALIGN]] %{{.*}}, i8 0, i64 1, i1 false)
// CHECK-NEXT: call void @{{.*}}used{{.*}}%braces)
- TEST_CUSTOM(small, small, { 42 });
+TEST_CUSTOM(small, small, { 42 });
// CHECK-LABEL: @test_small_custom()
// CHECK: %custom = alloca %struct.small, align
// CHECK-NEXT: bitcast
// CHECK: %uninit = alloca %struct.smallpartinit, align
// CHECK-NEXT: call void @{{.*}}smallpartinit{{.*}}%uninit)
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_smallpartinit_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_smallpartinit_uninit.uninit
+// ZERO-LABEL: @test_smallpartinit_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(smallpartinit, smallpartinit);
// CHECK-LABEL: @test_smallpartinit_braces()
// CHECK-LABEL: @test_padded_uninit()
// CHECK: %uninit = alloca %struct.padded, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_padded_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_padded_uninit.uninit
+// ZERO-LABEL: @test_padded_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(padded, padded);
// CHECK-LABEL: @test_padded_braces()
// CHECK: %uninit = alloca %struct.paddednullinit, align
// CHECK-NEXT: call void @{{.*}}paddednullinit{{.*}}%uninit)
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_paddednullinit_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_paddednullinit_uninit.uninit
+// ZERO-LABEL: @test_paddednullinit_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(paddednullinit, paddednullinit);
// CHECK-LABEL: @test_paddednullinit_braces()
// CHECK-LABEL: @test_bitfield_uninit()
// CHECK: %uninit = alloca %struct.bitfield, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_bitfield_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_bitfield_uninit.uninit
+// ZERO-LABEL: @test_bitfield_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(bitfield, bitfield);
// CHECK-LABEL: @test_bitfield_braces()
// CHECK-LABEL: @test_bitfieldaligned_uninit()
// CHECK: %uninit = alloca %struct.bitfieldaligned, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_bitfieldaligned_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_bitfieldaligned_uninit.uninit
+// ZERO-LABEL: @test_bitfieldaligned_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(bitfieldaligned, bitfieldaligned);
// CHECK-LABEL: @test_bitfieldaligned_braces()
// CHECK-LABEL: @test_big_uninit()
// CHECK: %uninit = alloca %struct.big, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_big_uninit()
+// PATTERN: call void @llvm.memset{{.*}}, i8 -86,
+// ZERO-LABEL: @test_big_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(big, big);
// CHECK-LABEL: @test_big_braces()
// CHECK-LABEL: @test_arraytail_uninit()
// CHECK: %uninit = alloca %struct.arraytail, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_arraytail_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_arraytail_uninit.uninit
+// ZERO-LABEL: @test_arraytail_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(arraytail, arraytail);
// CHECK-LABEL: @test_arraytail_braces()
// CHECK-LABEL: @test_int0_uninit()
// CHECK: %uninit = alloca [0 x i32], align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_int0_uninit()
+// PATTERN: %uninit = alloca [0 x i32], align
+// PATTERN-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// ZERO-LABEL: @test_int0_uninit()
+// ZERO: %uninit = alloca [0 x i32], align
+// ZERO-NEXT: call void @{{.*}}used{{.*}}%uninit)
TEST_BRACES(int0, int[0]);
// CHECK-LABEL: @test_int0_braces()
// CHECK-LABEL: @test_int1_uninit()
// CHECK: %uninit = alloca [1 x i32], align [[ALIGN:[0-9]*]]
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_int1_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_int1_uninit.uninit
+// ZERO-LABEL: @test_int1_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(int1, int[1]);
// CHECK-LABEL: @test_int1_braces()
// CHECK-LABEL: @test_int64_uninit()
// CHECK: %uninit = alloca [64 x i32], align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_int64_uninit()
+// PATTERN: call void @llvm.memset{{.*}}, i8 -86,
+// ZERO-LABEL: @test_int64_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(int64, int[64]);
// CHECK-LABEL: @test_int64_braces()
// CHECK-LABEL: @test_bool4_uninit()
// CHECK: %uninit = alloca [4 x i8], align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_bool4_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_bool4_uninit.uninit
+// ZERO-LABEL: @test_bool4_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(bool4, bool[4]);
// CHECK-LABEL: @test_bool4_braces()
// CHECK-LABEL: @test_intptr4_uninit()
// CHECK: %uninit = alloca [4 x i32*], align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_intptr4_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_intptr4_uninit.uninit
+// ZERO-LABEL: @test_intptr4_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(intptr4, int*[4]);
// CHECK-LABEL: @test_intptr4_braces()
// CHECK-LABEL: @test_tailpad4_uninit()
// CHECK: %uninit = alloca [4 x %struct.tailpad], align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_tailpad4_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_tailpad4_uninit.uninit
+// ZERO-LABEL: @test_tailpad4_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(tailpad4, tailpad[4]);
// CHECK-LABEL: @test_tailpad4_braces()
// CHECK-LABEL: @test_tailpad9_uninit()
// CHECK: %uninit = alloca [9 x %struct.tailpad], align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_tailpad9_uninit()
+// PATTERN: call void @llvm.memset{{.*}}, i8 -86,
+// ZERO-LABEL: @test_tailpad9_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(tailpad9, tailpad[9]);
// CHECK-LABEL: @test_tailpad9_braces()
// CHECK-LABEL: @test_atomicbool_uninit()
// CHECK: %uninit = alloca i8, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_atomicbool_uninit()
+// PATTERN: store i8 -86, i8* %uninit, align 1
+// ZERO-LABEL: @test_atomicbool_uninit()
+// ZERO: store i8 0, i8* %uninit, align 1
TEST_UNINIT(atomicint, _Atomic(int));
// CHECK-LABEL: @test_atomicint_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_atomicint_uninit()
+// PATTERN: store i32 -1431655766, i32* %uninit, align 4
+// ZERO-LABEL: @test_atomicint_uninit()
+// ZERO: store i32 0, i32* %uninit, align 4
TEST_UNINIT(atomicdouble, _Atomic(double));
// CHECK-LABEL: @test_atomicdouble_uninit()
// CHECK: %uninit = alloca double, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_atomicdouble_uninit()
+// PATTERN: store double 0xFFFFFFFFFFFFFFFF, double* %uninit, align 8
+// ZERO-LABEL: @test_atomicdouble_uninit()
+// ZERO: store double 0.000000e+00, double* %uninit, align 8
TEST_UNINIT(atomicnotlockfree, _Atomic(notlockfree));
// CHECK-LABEL: @test_atomicnotlockfree_uninit()
// CHECK: %uninit = alloca %struct.notlockfree, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_atomicnotlockfree_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_atomicnotlockfree_uninit.uninit
+// ZERO-LABEL: @test_atomicnotlockfree_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_UNINIT(atomicpadded, _Atomic(padded));
// CHECK-LABEL: @test_atomicpadded_uninit()
// CHECK: %uninit = alloca %struct.padded, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_atomicpadded_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_atomicpadded_uninit.uninit
+// ZERO-LABEL: @test_atomicpadded_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_UNINIT(atomictailpad, _Atomic(tailpad));
// CHECK-LABEL: @test_atomictailpad_uninit()
// CHECK: %uninit = alloca %struct.tailpad, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_atomictailpad_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_atomictailpad_uninit.uninit
+// ZERO-LABEL: @test_atomictailpad_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_UNINIT(complexfloat, _Complex float);
// CHECK-LABEL: @test_complexfloat_uninit()
// CHECK: %uninit = alloca { float, float }, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_complexfloat_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_complexfloat_uninit.uninit
+// ZERO-LABEL: @test_complexfloat_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(complexfloat, _Complex float);
// CHECK-LABEL: @test_complexfloat_braces()
// CHECK-LABEL: @test_complexdouble_uninit()
// CHECK: %uninit = alloca { double, double }, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_complexdouble_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_complexdouble_uninit.uninit
+// ZERO-LABEL: @test_complexdouble_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(complexdouble, _Complex double);
// CHECK-LABEL: @test_complexdouble_braces()
// CHECK-LABEL: @test_volatileint_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_volatileint_uninit()
+// PATTERN: store volatile i32 -1431655766, i32* %uninit, align 4
+// ZERO-LABEL: @test_volatileint_uninit()
+// ZERO: store volatile i32 0, i32* %uninit, align 4
TEST_BRACES(volatileint, volatile int);
// CHECK-LABEL: @test_volatileint_braces()
// CHECK-LABEL: @test_semivolatile_uninit()
// CHECK: %uninit = alloca %struct.semivolatile, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_semivolatile_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_semivolatile_uninit.uninit
+// ZERO-LABEL: @test_semivolatile_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(semivolatile, semivolatile);
// CHECK-LABEL: @test_semivolatile_braces()
// CHECK: %uninit = alloca %struct.base, align
// CHECK-NEXT: call void @{{.*}}base{{.*}}%uninit)
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_base_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_base_uninit.uninit
+// ZERO-LABEL: @test_base_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(base, base);
// CHECK-LABEL: @test_base_braces()
// CHECK: %uninit = alloca %struct.derived, align
// CHECK-NEXT: call void @{{.*}}derived{{.*}}%uninit)
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_derived_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_derived_uninit.uninit
+// ZERO-LABEL: @test_derived_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(derived, derived);
// CHECK-LABEL: @test_derived_braces()
// CHECK: %uninit = alloca %struct.virtualderived, align
// CHECK-NEXT: call void @{{.*}}virtualderived{{.*}}%uninit)
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_virtualderived_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_virtualderived_uninit.uninit
+// ZERO-LABEL: @test_virtualderived_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(virtualderived, virtualderived);
// CHECK-LABEL: @test_virtualderived_braces()
// CHECK-LABEL: @test_matching_uninit()
// CHECK: %uninit = alloca %union.matching, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_matching_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_matching_uninit.uninit
+// ZERO-LABEL: @test_matching_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(matching, matching);
// CHECK-LABEL: @test_matching_braces()
// CHECK-LABEL: @test_matchingreverse_uninit()
// CHECK: %uninit = alloca %union.matchingreverse, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_matchingreverse_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_matchingreverse_uninit.uninit
+// ZERO-LABEL: @test_matchingreverse_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(matchingreverse, matchingreverse);
// CHECK-LABEL: @test_matchingreverse_braces()
// CHECK-LABEL: @test_unmatched_uninit()
// CHECK: %uninit = alloca %union.unmatched, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_unmatched_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_unmatched_uninit.uninit
+// ZERO-LABEL: @test_unmatched_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(unmatched, unmatched);
// CHECK-LABEL: @test_unmatched_braces()
// CHECK-LABEL: @test_unmatchedreverse_uninit()
// CHECK: %uninit = alloca %union.unmatchedreverse, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_unmatchedreverse_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_unmatchedreverse_uninit.uninit
+// ZERO-LABEL: @test_unmatchedreverse_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(unmatchedreverse, unmatchedreverse);
// CHECK-LABEL: @test_unmatchedreverse_braces()
// CHECK-LABEL: @test_unmatchedfp_uninit()
// CHECK: %uninit = alloca %union.unmatchedfp, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_unmatchedfp_uninit()
+// PATTERN: call void @llvm.memcpy{{.*}} @__const.test_unmatchedfp_uninit.uninit
+// ZERO-LABEL: @test_unmatchedfp_uninit()
+// ZERO: call void @llvm.memset{{.*}}, i8 0,
TEST_BRACES(unmatchedfp, unmatchedfp);
// CHECK-LABEL: @test_unmatchedfp_braces()
// CHECK-LABEL: @test_emptyenum_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_emptyenum_uninit()
+// PATTERN: store i32 -1431655766, i32* %braces, align 4
+// ZERO-LABEL: @test_emptyenum_uninit()
+// ZERO: store i32 0, i32* %braces, align 4
TEST_BRACES(emptyenum, emptyenum);
// CHECK-LABEL: @test_emptyenum_braces()
// CHECK-LABEL: @test_smallenum_uninit()
// CHECK: %uninit = alloca i32, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_smallenum_uninit()
+// PATTERN: store i32 -1431655766, i32* %braces, align 4
+// ZERO-LABEL: @test_smallenum_uninit()
+// ZERO: store i32 0, i32* %braces, align 4
TEST_BRACES(smallenum, smallenum);
// CHECK-LABEL: @test_smallenum_braces()
// CHECK-LABEL: @test_intvec16_uninit()
// CHECK: %uninit = alloca <4 x i32>, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_intvec16_uninit()
+// PATTERN: store <4 x i32> <i32 -1431655766, i32 -1431655766, i32 -1431655766, i32 -1431655766>, <4 x i32>* %uninit, align 16
+// ZERO-LABEL: @test_intvec16_uninit()
+// ZERO: store <4 x i32> zeroinitializer, <4 x i32>* %uninit, align 16
TEST_BRACES(intvec16, int __attribute__((vector_size(16))));
// CHECK-LABEL: @test_intvec16_braces()
// CHECK-NEXT: store <4 x i32> zeroinitializer, <4 x i32>* %braces, align [[ALIGN]]
// CHECK-NEXT: call void @{{.*}}used{{.*}}%braces)
- TEST_CUSTOM(intvec16, int __attribute__((vector_size(16))), { 0x44444444, 0x44444444, 0x44444444, 0x44444444 });
+TEST_CUSTOM(intvec16, int __attribute__((vector_size(16))), { 0x44444444, 0x44444444, 0x44444444, 0x44444444 });
// CHECK-LABEL: @test_intvec16_custom()
// CHECK: %custom = alloca <4 x i32>, align [[ALIGN:[0-9]*]]
// CHECK-NEXT: store <4 x i32> <i32 1145324612, i32 1145324612, i32 1145324612, i32 1145324612>, <4 x i32>* %custom, align [[ALIGN]]
// CHECK-LABEL: @test_longlongvec32_uninit()
// CHECK: %uninit = alloca <4 x i64>, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_longlongvec32_uninit()
+// PATTERN: store <4 x i64> <i64 -6148914691236517206, i64 -6148914691236517206, i64 -6148914691236517206, i64 -6148914691236517206>, <4 x i64>* %uninit, align 32
+// ZERO-LABEL: @test_longlongvec32_uninit()
+// ZERO: store <4 x i64> zeroinitializer, <4 x i64>* %uninit, align 32
TEST_BRACES(longlongvec32, long long __attribute__((vector_size(32))));
// CHECK-LABEL: @test_longlongvec32_braces()
// CHECK-LABEL: @test_floatvec16_uninit()
// CHECK: %uninit = alloca <4 x float>, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_floatvec16_uninit()
+// PATTERN: store <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, <4 x float>* %uninit, align 16
+// ZERO-LABEL: @test_floatvec16_uninit()
+// ZERO: store <4 x float> zeroinitializer, <4 x float>* %uninit, align 16
TEST_BRACES(floatvec16, float __attribute__((vector_size(16))));
// CHECK-LABEL: @test_floatvec16_braces()
// CHECK-LABEL: @test_doublevec32_uninit()
// CHECK: %uninit = alloca <4 x double>, align
// CHECK-NEXT: call void @{{.*}}used{{.*}}%uninit)
+// PATTERN-LABEL: @test_doublevec32_uninit()
+// PATTERN: store <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>, <4 x double>* %uninit, align 32
+// ZERO-LABEL: @test_doublevec32_uninit()
+// ZERO: store <4 x double> zeroinitializer, <4 x double>* %uninit, align 32
TEST_BRACES(doublevec32, double __attribute__((vector_size(32))));
// CHECK-LABEL: @test_doublevec32_braces()