/// bit-fields.
bool isZeroLengthBitField(const ASTContext &Ctx) const;
+ /// Determine if this field is a subobject of zero size, that is, either a
+ /// zero-length bit-field or a field of empty class type with the
+ /// [[no_unique_address]] attribute.
+ bool isZeroSize(const ASTContext &Ctx) const;
+
/// Get the kind of (C++11) default member initializer that this field has.
InClassInitStyle getInClassInitStyle() const {
InitStorageKind storageKind = InitStorage.getInt();
/// True when this class is a POD-type.
unsigned PlainOldData : 1;
- /// true when this class is empty for traits purposes,
- /// i.e. has no data members other than 0-width bit-fields, has no
- /// virtual function/base, and doesn't inherit from a non-empty
- /// class. Doesn't take union-ness into account.
+ /// True when this class is empty for traits purposes, that is:
+ /// * has no data members other than 0-width bit-fields and empty fields
+ /// marked [[no_unique_address]]
+ /// * has no virtual function/base, and
+ /// * doesn't inherit from a non-empty class.
+ /// Doesn't take union-ness into account.
unsigned Empty : 1;
/// True when this class is polymorphic, i.e., has at
}
def DocCatFunction : DocumentationCategory<"Function Attributes">;
def DocCatVariable : DocumentationCategory<"Variable Attributes">;
+def DocCatField : DocumentationCategory<"Field Attributes">;
def DocCatType : DocumentationCategory<"Type Attributes">;
def DocCatStmt : DocumentationCategory<"Statement Attributes">;
def DocCatDecl : DocumentationCategory<"Declaration Attributes">;
// Specifies Operating Systems for which the target applies, based off the
// OSType enumeration in Triple.h
list<string> OSes;
- // Specifies the C++ ABIs for which the target applies, based off the
- // TargetCXXABI::Kind in TargetCXXABI.h.
- list<string> CXXABIs;
// Specifies Object Formats for which the target applies, based off the
// ObjectFormatType enumeration in Triple.h
list<string> ObjectFormats;
+ // A custom predicate, written as an expression evaluated in a context
+ // with the following declarations in scope:
+ // const clang::TargetInfo &Target;
+ // const llvm::Triple &T = Target.getTriple();
+ code CustomCode = [{}];
}
class TargetArch<list<string> arches> : TargetSpec {
def TargetWindows : TargetArch<["x86", "x86_64", "arm", "thumb", "aarch64"]> {
let OSes = ["Win32"];
}
+def TargetItaniumCXXABI : TargetSpec {
+ let CustomCode = [{ Target.getCXXABI().isItaniumFamily() }];
+}
def TargetMicrosoftCXXABI : TargetArch<["x86", "x86_64", "arm", "thumb", "aarch64"]> {
- let CXXABIs = ["Microsoft"];
+ let CustomCode = [{ Target.getCXXABI().isMicrosoft() }];
}
def TargetELF : TargetSpec {
let ObjectFormats = ["ELF"];
let ASTNode = 0;
}
+def NoUniqueAddress : InheritableAttr, TargetSpecificAttr<TargetItaniumCXXABI> {
+ let Spellings = [CXX11<"", "no_unique_address", 201803>];
+ let Subjects = SubjectList<[NonBitField], ErrorDiag>;
+ let Documentation = [NoUniqueAddressDocs];
+}
+
def ReturnsTwice : InheritableAttr {
let Spellings = [GCC<"returns_twice">];
let Subjects = SubjectList<[Function]>;
}];
}
+def NoUniqueAddressDocs : Documentation {
+ let Category = DocCatField;
+ let Content = [{
+The ``no_unique_address`` attribute allows tail padding in a non-static data
+member to overlap other members of the enclosing class (and in the special
+case when the type is empty, permits it to fully overlap other members).
+The field is laid out as if a base class were encountered at the corresponding
+point within the class (except that it does not share a vptr with the enclosing
+object).
+
+Example usage:
+
+.. code-block:: c++
+
+ template<typename T, typename Alloc> struct my_vector {
+ T *p;
+ [[no_unique_address]] Alloc alloc;
+ // ...
+ };
+ static_assert(sizeof(my_vector<int, std::allocator<int>>) == sizeof(int*));
+
+``[[no_unique_address]]`` is a standard C++20 attribute. Clang supports its use
+in C++11 onwards.
+ }];
+}
+
def ObjCRequiresSuperDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
getBitWidthValue(Ctx) == 0;
}
+bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
+ if (isZeroLengthBitField(Ctx))
+ return true;
+
+ // C++2a [intro.object]p7:
+ // An object has nonzero size if it
+ // -- is not a potentially-overlapping subobject, or
+ if (!hasAttr<NoUniqueAddressAttr>())
+ return false;
+
+ // -- is not of class type, or
+ const auto *RT = getType()->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ if (!RD) {
+ assert(isInvalidDecl() && "valid field has incomplete type");
+ return false;
+ }
+
+ // -- [has] virtual member functions or virtual base classes, or
+ // -- has subobjects of nonzero size or bit-fields of nonzero length
+ const auto *CXXRD = cast<CXXRecordDecl>(RD);
+ if (!CXXRD->isEmpty())
+ return false;
+
+ // Otherwise, [...] the circumstances under which the object has zero size
+ // are implementation-defined.
+ // FIXME: This might be Itanium ABI specific; we don't yet know what the MS
+ // ABI will do.
+ return true;
+}
+
unsigned FieldDecl::getFieldIndex() const {
const FieldDecl *Canonical = getCanonicalDecl();
if (Canonical != this)
// that sure looks like a wording bug.
// -- If X is a non-union class type with a non-static data member
- // [recurse to] the first non-static data member of X
+ // [recurse to each field] that is either of zero size or is the
+ // first non-static data member of X
// -- If X is a union type, [recurse to union members]
+ bool IsFirstField = true;
for (auto *FD : X->fields()) {
// FIXME: Should we really care about the type of the first non-static
// data member of a non-union if there are preceding unnamed bit-fields?
if (FD->isUnnamedBitfield())
continue;
+ if (!IsFirstField && !FD->isZeroSize(Ctx))
+ continue;
+
// -- If X is n array type, [visit the element type]
QualType T = Ctx.getBaseElementType(FD->getType());
if (auto *RD = T->getAsCXXRecordDecl())
return true;
if (!X->isUnion())
- break;
+ IsFirstField = false;
}
}
if (T->isReferenceType())
data().DefaultedMoveAssignmentIsDeleted = true;
+ // Bitfields of length 0 are also zero-sized, but we already bailed out for
+ // those because they are always unnamed.
+ bool IsZeroSize = Field->isZeroSize(Context);
+
if (const auto *RecordTy = T->getAs<RecordType>()) {
auto *FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
if (FieldRec->getDefinition()) {
// A standard-layout class is a class that:
// [...]
// -- has no element of the set M(S) of types as a base class.
- if (data().IsStandardLayout && (isUnion() || IsFirstField) &&
+ if (data().IsStandardLayout &&
+ (isUnion() || IsFirstField || IsZeroSize) &&
hasSubobjectAtOffsetZeroOfEmptyBaseType(Context, FieldRec))
data().IsStandardLayout = false;
}
// C++14 [meta.unary.prop]p4:
- // T is a class type [...] with [...] no non-static data members
- data().Empty = false;
+ // T is a class type [...] with [...] no non-static data members other
+ // than subobjects of zero size
+ if (data().Empty && !IsZeroSize)
+ data().Empty = false;
}
// Handle using declarations of conversion functions.
CharUnits Offset, bool PlacingEmptyBase);
void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
- const CXXRecordDecl *Class,
- CharUnits Offset);
- void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
+ const CXXRecordDecl *Class, CharUnits Offset,
+ bool PlacingOverlappingField);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset,
+ bool PlacingOverlappingField);
/// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
/// subobjects beyond the given offset.
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- UpdateEmptyFieldSubobjects(*I, FieldOffset);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset, PlacingEmptyBase);
}
}
return false;
// We are able to place the member variable at this offset.
- // Make sure to update the empty base subobject map.
- UpdateEmptyFieldSubobjects(FD, Offset);
+ // Make sure to update the empty field subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset, FD->hasAttr<NoUniqueAddressAttr>());
return true;
}
-void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
- const CXXRecordDecl *Class,
- CharUnits Offset) {
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(
+ const CXXRecordDecl *RD, const CXXRecordDecl *Class, CharUnits Offset,
+ bool PlacingOverlappingField) {
// We know that the only empty subobjects that can conflict with empty
- // field subobjects are subobjects of empty bases that can be placed at offset
- // zero. Because of this, we only need to keep track of empty field
- // subobjects with offsets less than the size of the largest empty
- // subobject for our class.
- if (Offset >= SizeOfLargestEmptySubobject)
+ // field subobjects are subobjects of empty bases and potentially-overlapping
+ // fields that can be placed at offset zero. Because of this, we only need to
+ // keep track of empty field subobjects with offsets less than the size of
+ // the largest empty subobject for our class.
+ //
+ // (Proof: we will only consider placing a subobject at offset zero or at
+ // >= the current dsize. The only cases where the earlier subobject can be
+ // placed beyond the end of dsize is if it's an empty base or a
+ // potentially-overlapping field.)
+ if (!PlacingOverlappingField && Offset >= SizeOfLargestEmptySubobject)
return;
AddSubobjectAtOffset(RD, Offset);
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
- UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset,
+ PlacingOverlappingField);
}
if (RD == Class) {
const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
- UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset,
+ PlacingOverlappingField);
}
}
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- UpdateEmptyFieldSubobjects(*I, FieldOffset);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset, PlacingOverlappingField);
}
}
-void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
- CharUnits Offset) {
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(
+ const FieldDecl *FD, CharUnits Offset, bool PlacingOverlappingField) {
QualType T = FD->getType();
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
- UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ UpdateEmptyFieldSubobjects(RD, RD, Offset, PlacingOverlappingField);
return;
}
// offset zero. Because of this, we only need to keep track of empty field
// subobjects with offsets less than the size of the largest empty
// subobject for our class.
- if (ElementOffset >= SizeOfLargestEmptySubobject)
+ if (!PlacingOverlappingField &&
+ ElementOffset >= SizeOfLargestEmptySubobject)
return;
- UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset,
+ PlacingOverlappingField);
ElementOffset += Layout.getSize();
}
}
CharUnits NonVirtualSize;
CharUnits NonVirtualAlignment;
+ /// If we've laid out a field but not included its tail padding in Size yet,
+ /// this is the size up to the end of that field.
+ CharUnits PaddedFieldSize;
+
/// PrimaryBase - the primary base class (if one exists) of the class
/// we're laying out.
const CXXRecordDecl *PrimaryBase;
UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
MaxFieldAlignment(CharUnits::Zero()), DataSize(0),
NonVirtualSize(CharUnits::Zero()),
- NonVirtualAlignment(CharUnits::One()), PrimaryBase(nullptr),
+ NonVirtualAlignment(CharUnits::One()),
+ PaddedFieldSize(CharUnits::Zero()), PrimaryBase(nullptr),
PrimaryBaseIsVirtual(false), HasOwnVFPtr(false),
HasPackedField(false), FirstNearlyEmptyVBase(nullptr) {}
// Round up the current record size to pointer alignment.
setSize(getSize().alignTo(BaseAlign));
- setDataSize(getSize());
// Update the alignment.
UpdateAlignment(BaseAlign, UnpackedBaseAlign);
// Query the external layout to see if it provides an offset.
bool HasExternalLayout = false;
if (UseExternalLayout) {
+ // FIXME: This appears to be reversed.
if (Base->IsVirtual)
HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
else
// We start laying out ivars not at the end of the superclass
// structure, but at the next byte following the last field.
- setSize(SL.getDataSize());
- setDataSize(getSize());
+ setDataSize(SL.getDataSize());
+ setSize(getDataSize());
}
InitializeLayout(D);
UnfilledBitsInLastUnit = 0;
LastBitfieldTypeSize = 0;
+ auto *FieldClass = D->getType()->getAsCXXRecordDecl();
+ bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass;
+ bool IsOverlappingEmptyField = PotentiallyOverlapping && FieldClass->isEmpty();
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
- CharUnits FieldOffset =
- IsUnion ? CharUnits::Zero() : getDataSize();
+
+ CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField)
+ ? CharUnits::Zero()
+ : getDataSize();
CharUnits FieldSize;
CharUnits FieldAlign;
+ // The amount of this class's dsize occupied by the field.
+ // This is equal to FieldSize unless we're permitted to pack
+ // into the field's tail padding.
+ CharUnits EffectiveFieldSize;
if (D->getType()->isIncompleteArrayType()) {
// This is a flexible array member; we can't directly
// query getTypeInfo about these, so we figure it out here.
// Flexible array members don't have any size, but they
// have to be aligned appropriately for their element type.
- FieldSize = CharUnits::Zero();
+ EffectiveFieldSize = FieldSize = CharUnits::Zero();
const ArrayType* ATy = Context.getAsArrayType(D->getType());
FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
} else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType());
- FieldSize =
+ EffectiveFieldSize = FieldSize =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
FieldAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
} else {
std::pair<CharUnits, CharUnits> FieldInfo =
Context.getTypeInfoInChars(D->getType());
- FieldSize = FieldInfo.first;
+ EffectiveFieldSize = FieldSize = FieldInfo.first;
FieldAlign = FieldInfo.second;
+ // A potentially-overlapping field occupies its dsize or nvsize, whichever
+ // is larger.
+ if (PotentiallyOverlapping) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(FieldClass);
+ EffectiveFieldSize =
+ std::max(Layout.getNonVirtualSize(), Layout.getDataSize());
+ }
+
if (IsMsStruct) {
// If MS bitfield layout is required, figure out what type is being
// laid out and align the field to the width of that type.
// Check if we can place the field at this offset.
while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
// We couldn't place the field at the offset. Try again at a new offset.
- FieldOffset += FieldAlign;
+ // We try offset 0 (for an empty field) and then dsize(C) onwards.
+ if (FieldOffset == CharUnits::Zero() &&
+ getDataSize() != CharUnits::Zero())
+ FieldOffset = getDataSize().alignTo(FieldAlign);
+ else
+ FieldOffset += FieldAlign;
}
}
}
if (FieldSize % ASanAlignment)
ExtraSizeForAsan +=
ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment);
- FieldSize += ExtraSizeForAsan;
+ EffectiveFieldSize = FieldSize = FieldSize + ExtraSizeForAsan;
}
// Reserve space for this field.
- uint64_t FieldSizeInBits = Context.toBits(FieldSize);
- if (IsUnion)
- setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
- else
- setDataSize(FieldOffset + FieldSize);
+ if (!IsOverlappingEmptyField) {
+ uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize);
+ if (IsUnion)
+ setDataSize(std::max(getDataSizeInBits(), EffectiveFieldSizeInBits));
+ else
+ setDataSize(FieldOffset + EffectiveFieldSize);
- // Update the size.
- setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ PaddedFieldSize = std::max(PaddedFieldSize, FieldOffset + FieldSize);
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ } else {
+ setSize(std::max(getSizeInBits(),
+ (uint64_t)Context.toBits(FieldOffset + FieldSize)));
+ }
// Remember max struct/class alignment.
UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign);
setSize(CharUnits::One());
}
+ // If we have any remaining field tail padding, include that in the overall
+ // size.
+ setSize(std::max(getSizeInBits(), (uint64_t)Context.toBits(PaddedFieldSize)));
+
// Finally, round the size of the record up to the alignment of the
// record itself.
uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit;
return EmitLValueForField(LambdaLV, Field);
}
+/// Get the address of a zero-sized field within a record. The resulting
+/// address doesn't necessarily have the right type.
+static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
+ const FieldDecl *Field) {
+ CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
+ CGF.getContext().getFieldOffset(Field));
+ if (Offset.isZero())
+ return Base;
+ Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty);
+ return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
+}
+
/// Drill down to the storage of a field without walking into
/// reference types.
///
/// The resulting address doesn't necessarily have the right type.
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
const FieldDecl *field) {
+ if (field->isZeroSize(CGF.getContext()))
+ return emitAddrOfZeroSizeField(CGF, base, field);
+
const RecordDecl *rec = field->getParent();
unsigned idx =
return LV;
}
+AggValueSlot::Overlap_t
+CodeGenFunction::overlapForFieldInit(const FieldDecl *FD) {
+ if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
+ return AggValueSlot::DoesNotOverlap;
+
+ // If the field lies entirely within the enclosing class's nvsize, its tail
+ // padding cannot overlap any already-initialized object. (The only subobjects
+ // with greater addresses that might already be initialized are vbases.)
+ const RecordDecl *ClassRD = FD->getParent();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
+ if (Layout.getFieldOffset(FD->getFieldIndex()) +
+ getContext().getTypeSize(FD->getType()) <=
+ (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
+ return AggValueSlot::DoesNotOverlap;
+
+ // The tail padding may contain values we need to preserve.
+ return AggValueSlot::MayOverlap;
+}
+
AggValueSlot::Overlap_t CodeGenFunction::overlapForBaseInit(
const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
- // Virtual bases are initialized first, in address order, so there's never
- // any overlap during their initialization.
- //
- // FIXME: Under P0840, this is no longer true: the tail padding of a vbase
- // of a field could be reused by a vbase of a containing class.
+ // If the most-derived object is a field declared with [[no_unique_address]],
+ // the tail padding of any virtual base could be reused for other subobjects
+ // of that field's class.
if (IsVirtual)
- return AggValueSlot::DoesNotOverlap;
+ return AggValueSlot::MayOverlap;
// If the base class is laid out entirely within the nvsize of the derived
// class, its tail padding cannot yet be initialized, so we can issue
++FieldNo;
// If this is a union, skip all the fields that aren't being initialized.
- if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field)
+ if (RD->isUnion() &&
+ !declaresSameEntity(ILE->getInitializedFieldInUnion(), Field))
continue;
- // Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield())
+ // Don't emit anonymous bitfields or zero-sized fields.
+ if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
continue;
// Get the initializer. A struct can include fields without initializers,
if (!AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit,
AllowOverwrite))
return false;
+ // After emitting a non-empty field with [[no_unique_address]], we may
+ // need to overwrite its tail padding.
+ if (Field->hasAttr<NoUniqueAddressAttr>())
+ AllowOverwrite = true;
} else {
// Otherwise we have a bitfield.
if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) {
unsigned FieldNo = 0;
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+ bool AllowOverwrite = false;
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
// If this is a union, skip all the fields that aren't being initialized.
if (RD->isUnion() && !declaresSameEntity(Val.getUnionField(), *Field))
continue;
- // Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield())
+ // Don't emit anonymous bitfields or zero-sized fields.
+ if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
continue;
// Emit the value of the initializer.
if (!Field->isBitField()) {
// Handle non-bitfield members.
if (!AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
- EltInit))
+ EltInit, AllowOverwrite))
return false;
+ // After emitting a non-empty field with [[no_unique_address]], we may
+ // need to overwrite its tail padding.
+ if (Field->hasAttr<NoUniqueAddressAttr>())
+ AllowOverwrite = true;
} else {
// Otherwise we have a bitfield.
if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
- cast<llvm::ConstantInt>(EltInit)))
+ cast<llvm::ConstantInt>(EltInit), AllowOverwrite))
return false;
}
}
for (const auto *Field : record->fields()) {
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
// will fill in later.)
- if (!Field->isBitField()) {
+ if (!Field->isBitField() && !Field->isZeroSize(CGM.getContext())) {
unsigned fieldIndex = layout.getLLVMFieldNo(Field);
elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
}
for (const auto *Field : RD->fields()) {
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
// will fill in later.)
- if (!Field->isBitField()) {
+ if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
unsigned FieldIndex = RL.getLLVMFieldNo(Field);
RecordLayout[FieldIndex] = Field;
}
void CGRecordLowering::accumulateFields() {
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end();
- Field != FieldEnd;)
+ Field != FieldEnd;) {
if (Field->isBitField()) {
RecordDecl::field_iterator Start = Field;
// Iterate to gather the list of bitfields.
for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
accumulateBitFields(Start, Field);
- } else {
+ } else if (!Field->isZeroSize(Context)) {
Members.push_back(MemberInfo(
bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
getStorageType(*Field), *Field));
++Field;
+ } else {
+ ++Field;
}
+ }
}
void
if (!Member->Data && Member->Kind != MemberInfo::Scissor)
continue;
if (Member->Offset < Tail) {
- assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
+ assert(Prior->Kind == MemberInfo::Field &&
"Only storage fields have tail padding!");
- Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
- cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
+ if (!Prior->FD || Prior->FD->isBitField())
+ Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
+ cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
+ else {
+ assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
+ "should not have reused this field's tail padding");
+ Prior->Data = getByteArrayType(
+ Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first);
+ }
}
if (Member->Data)
Prior = Member;
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
+ // Ignore zero-sized fields.
+ if (FD->isZeroSize(getContext()))
+ continue;
+
// For non-bit-fields, just check that the LLVM struct offset matches the
// AST offset.
if (!FD->isBitField()) {
if (!FD->getDeclName())
continue;
- // Don't inspect zero-length bitfields.
- if (FD->isZeroLengthBitField(getContext()))
- continue;
-
const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
}
/// Determine whether a field initialization may overlap some other object.
- AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD) {
- // FIXME: These cases can result in overlap as a result of P0840R0's
- // [[no_unique_address]] attribute. We can still infer NoOverlap in the
- // presence of that attribute if the field is within the nvsize of its
- // containing class, because non-virtual subobjects are initialized in
- // address order.
- return AggValueSlot::DoesNotOverlap;
- }
+ AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD);
/// Determine whether a base class initialization may overlap some other
/// object.
case ParsedAttr::AT_Deprecated:
case ParsedAttr::AT_FallThrough:
case ParsedAttr::AT_CXX11NoReturn:
+ case ParsedAttr::AT_NoUniqueAddress:
return true;
case ParsedAttr::AT_WarnUnusedResult:
return !ScopeName && AttrName->getName().equals("nodiscard");
case ParsedAttr::AT_NoSplitStack:
handleSimpleAttribute<NoSplitStackAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_NoUniqueAddress:
+ handleSimpleAttribute<NoUniqueAddressAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_NonNull:
if (auto *PVD = dyn_cast<ParmVarDecl>(D))
handleNonNullAttrParameter(S, PVD, AL);
--- /dev/null
+// RUN: %clang_cc1 -std=c++2a %s -emit-llvm -o - -triple x86_64-linux-gnu | FileCheck %s
+
+struct A { ~A(); int n; char c[3]; };
+struct B { [[no_unique_address]] A a; char k; };
+// CHECK-DAG: @b = global { i32, [3 x i8], i8 } { i32 1, [3 x i8] c"\02\03\04", i8 5 }
+B b = {1, 2, 3, 4, 5};
+
+struct C : A {};
+struct D : C {};
+struct E { int e; [[no_unique_address]] D d; char k; };
+// CHECK-DAG: @e = global { i32, i32, [3 x i8], i8 } { i32 1, i32 2, [3 x i8] c"\03\04\05", i8 6 }
+E e = {1, 2, 3, 4, 5, 6};
+
+struct Empty1 {};
+struct Empty2 {};
+struct Empty3 {};
+struct HasEmpty {
+ [[no_unique_address]] Empty1 e1;
+ int a;
+ [[no_unique_address]] Empty2 e2;
+ int b;
+ [[no_unique_address]] Empty3 e3;
+};
+// CHECK-DAG: @he = global %{{[^ ]*}} { i32 1, i32 2 }
+HasEmpty he = {{}, 1, {}, 2, {}};
+
+struct HasEmptyDuplicates {
+ [[no_unique_address]] Empty1 e1; // +0
+ int a;
+ [[no_unique_address]] Empty1 e2; // +4
+ int b;
+ [[no_unique_address]] Empty1 e3; // +8
+};
+// CHECK-DAG: @off1 = global i64 0
+Empty1 HasEmptyDuplicates::*off1 = &HasEmptyDuplicates::e1;
+// CHECK-DAG: @off2 = global i64 4
+Empty1 HasEmptyDuplicates::*off2 = &HasEmptyDuplicates::e2;
+// CHECK-DAG: @off3 = global i64 8
+Empty1 HasEmptyDuplicates::*off3 = &HasEmptyDuplicates::e3;
+
+// CHECK-DAG: @hed = global %{{[^ ]*}} { i32 1, i32 2, [4 x i8] undef }
+HasEmptyDuplicates hed = {{}, 1, {}, 2, {}};
+
+struct __attribute__((packed, aligned(2))) PackedAndPadded {
+ ~PackedAndPadded();
+ char c;
+ int n;
+};
+struct WithPackedAndPadded {
+ [[no_unique_address]] PackedAndPadded pap;
+ char d;
+};
+// CHECK-DAG: @wpap = global <{ i8, i32, i8 }> <{ i8 1, i32 2, i8 3 }>
+WithPackedAndPadded wpap = {1, 2, 3};
+
+struct FieldOverlap {
+ [[no_unique_address]] Empty1 e1, e2, e3, e4;
+ int n;
+};
+static_assert(sizeof(FieldOverlap) == 4);
+// CHECK-DAG: @fo = global %{{[^ ]*}} { i32 1234 }
+FieldOverlap fo = {{}, {}, {}, {}, 1234};
+
+// CHECK-DAG: @e1 = constant %[[E1:[^ ]*]]* bitcast (%[[FO:[^ ]*]]* @fo to %[[E1]]*)
+Empty1 &e1 = fo.e1;
+// CHECK-DAG: @e2 = constant %[[E1]]* bitcast (i8* getelementptr (i8, i8* bitcast (%[[FO]]* @fo to i8*), i64 1) to %[[E1]]*)
+Empty1 &e2 = fo.e2;
+
+// CHECK-LABEL: accessE1
+// CHECK: %[[RET:.*]] = bitcast %[[FO]]* %{{.*}} to %[[E1]]*
+// CHECK: ret %[[E1]]* %[[RET]]
+Empty1 &accessE1(FieldOverlap &fo) { return fo.e1; }
+
+// CHECK-LABEL: accessE2
+// CHECK: %[[AS_I8:.*]] = bitcast %[[FO]]* %{{.*}} to i8*
+// CHECK: %[[ADJUSTED:.*]] = getelementptr inbounds i8, i8* %[[AS_I8]], i64 1
+// CHECK: %[[RET:.*]] = bitcast i8* %[[ADJUSTED]] to %[[E1]]*
+// CHECK: ret %[[E1]]* %[[RET]]
+Empty1 &accessE2(FieldOverlap &fo) { return fo.e2; }
// CHECK: store i32 {{.*}} @_ZTVN16InitWithinNVSize1CE
// CHECK: store i8
}
+
+namespace NoUniqueAddr {
+ struct A { char c; A(const A&); };
+ struct B { int n; char c[3]; ~B(); };
+ struct C : virtual A { B b; };
+ struct D : virtual A { [[no_unique_address]] B b; };
+ struct E : virtual A { [[no_unique_address]] B b; char x; };
+ static_assert(sizeof(C) == sizeof(void*) + 8 + alignof(void*));
+ static_assert(sizeof(D) == sizeof(void*) + 8);
+ static_assert(sizeof(E) == sizeof(void*) + 8 + alignof(void*));
+
+ // CHECK: define {{.*}} @_ZN12NoUniqueAddr1CC1EOS0_
+ // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_(
+ // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1CE
+ // Copy the full size of B.
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false)
+ C f(C c) { return c; }
+
+ // CHECK: define {{.*}} @_ZN12NoUniqueAddr1DC1EOS0_
+ // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_(
+ // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1DE
+ // Copy just the data size of B, to avoid overwriting the A base class.
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 7, i1 false)
+ D f(D d) { return d; }
+
+ // CHECK: define {{.*}} @_ZN12NoUniqueAddr1EC1EOS0_
+ // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_(
+ // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1EE
+ // We can copy the full size of B here. (As it happens, we fold the copy of 'x' into
+ // this memcpy, so we're copying 8 bytes either way.)
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false)
+ E f(E e) { return e; }
+
+ struct F : virtual A {
+ F(const F &o) : A(o), b(o.b) {}
+ [[no_unique_address]] B b;
+ };
+
+ // CHECK: define {{.*}} @_ZN12NoUniqueAddr1FC1ERKS0_
+ // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_(
+ // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1FE
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 7, i1 false)
+ F f(F x) { return x; }
+}
--- /dev/null
+// RUN: %clang_cc1 -std=c++2a -fsyntax-only -triple x86_64-linux-gnu -fdump-record-layouts %s | FileCheck %s
+
+namespace Empty {
+ struct A {};
+ struct B { [[no_unique_address]] A a; char b; };
+ static_assert(sizeof(B) == 1);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::B
+ // CHECK-NEXT: 0 | struct Empty::A a (empty)
+ // CHECK-NEXT: 0 | char b
+ // CHECK-NEXT: | [sizeof=1, dsize=1, align=1,
+ // CHECK-NEXT: | nvsize=1, nvalign=1]
+
+ struct C {};
+ struct D {
+ [[no_unique_address]] A a;
+ [[no_unique_address]] C c;
+ char d;
+ };
+ static_assert(sizeof(D) == 1);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::D
+ // CHECK-NEXT: 0 | struct Empty::A a (empty)
+ // CHECK-NEXT: 0 | struct Empty::C c (empty)
+ // CHECK-NEXT: 0 | char d
+ // CHECK-NEXT: | [sizeof=1, dsize=1, align=1,
+ // CHECK-NEXT: | nvsize=1, nvalign=1]
+
+ struct E {
+ [[no_unique_address]] A a1;
+ [[no_unique_address]] A a2;
+ char e;
+ };
+ static_assert(sizeof(E) == 2);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::E
+ // CHECK-NEXT: 0 | struct Empty::A a1 (empty)
+ // CHECK-NEXT: 1 | struct Empty::A a2 (empty)
+ // CHECK-NEXT: 0 | char e
+ // CHECK-NEXT: | [sizeof=2, dsize=2, align=1,
+ // CHECK-NEXT: | nvsize=2, nvalign=1]
+
+ struct F {
+ ~F();
+ [[no_unique_address]] A a1;
+ [[no_unique_address]] A a2;
+ char f;
+ };
+ static_assert(sizeof(F) == 2);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::F
+ // CHECK-NEXT: 0 | struct Empty::A a1 (empty)
+ // CHECK-NEXT: 1 | struct Empty::A a2 (empty)
+ // CHECK-NEXT: 0 | char f
+ // CHECK-NEXT: | [sizeof=2, dsize=1, align=1,
+ // CHECK-NEXT: | nvsize=2, nvalign=1]
+
+ struct G { [[no_unique_address]] A a; ~G(); };
+ static_assert(sizeof(G) == 1);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::G
+ // CHECK-NEXT: 0 | struct Empty::A a (empty)
+ // CHECK-NEXT: | [sizeof=1, dsize=0, align=1,
+ // CHECK-NEXT: | nvsize=1, nvalign=1]
+
+ struct H { [[no_unique_address]] A a, b; ~H(); };
+ static_assert(sizeof(H) == 2);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::H
+ // CHECK-NEXT: 0 | struct Empty::A a (empty)
+ // CHECK-NEXT: 1 | struct Empty::A b (empty)
+ // CHECK-NEXT: | [sizeof=2, dsize=0, align=1,
+ // CHECK-NEXT: | nvsize=2, nvalign=1]
+
+ struct OversizedEmpty : A {
+ ~OversizedEmpty();
+ [[no_unique_address]] A a;
+ };
+ static_assert(sizeof(OversizedEmpty) == 2);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::OversizedEmpty
+ // CHECK-NEXT: 0 | struct Empty::A (base) (empty)
+ // CHECK-NEXT: 1 | struct Empty::A a (empty)
+ // CHECK-NEXT: | [sizeof=2, dsize=0, align=1,
+ // CHECK-NEXT: | nvsize=2, nvalign=1]
+
+ struct HasOversizedEmpty {
+ [[no_unique_address]] OversizedEmpty m;
+ };
+ static_assert(sizeof(HasOversizedEmpty) == 2);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::HasOversizedEmpty
+ // CHECK-NEXT: 0 | struct Empty::OversizedEmpty m (empty)
+ // CHECK-NEXT: 0 | struct Empty::A (base) (empty)
+ // CHECK-NEXT: 1 | struct Empty::A a (empty)
+ // CHECK-NEXT: | [sizeof=2, dsize=0, align=1,
+ // CHECK-NEXT: | nvsize=2, nvalign=1]
+
+ struct EmptyWithNonzeroDSize {
+ [[no_unique_address]] A a;
+ int x;
+ [[no_unique_address]] A b;
+ int y;
+ [[no_unique_address]] A c;
+ };
+ static_assert(sizeof(EmptyWithNonzeroDSize) == 12);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::EmptyWithNonzeroDSize
+ // CHECK-NEXT: 0 | struct Empty::A a (empty)
+ // CHECK-NEXT: 0 | int x
+ // CHECK-NEXT: 4 | struct Empty::A b (empty)
+ // CHECK-NEXT: 4 | int y
+ // CHECK-NEXT: 8 | struct Empty::A c (empty)
+ // CHECK-NEXT: | [sizeof=12, dsize=12, align=4,
+ // CHECK-NEXT: | nvsize=12, nvalign=4]
+
+ struct EmptyWithNonzeroDSizeNonPOD {
+ ~EmptyWithNonzeroDSizeNonPOD();
+ [[no_unique_address]] A a;
+ int x;
+ [[no_unique_address]] A b;
+ int y;
+ [[no_unique_address]] A c;
+ };
+ static_assert(sizeof(EmptyWithNonzeroDSizeNonPOD) == 12);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct Empty::EmptyWithNonzeroDSizeNonPOD
+ // CHECK-NEXT: 0 | struct Empty::A a (empty)
+ // CHECK-NEXT: 0 | int x
+ // CHECK-NEXT: 4 | struct Empty::A b (empty)
+ // CHECK-NEXT: 4 | int y
+ // CHECK-NEXT: 8 | struct Empty::A c (empty)
+ // CHECK-NEXT: | [sizeof=12, dsize=8, align=4,
+ // CHECK-NEXT: | nvsize=9, nvalign=4]
+}
+
+namespace POD {
+ // Cannot reuse tail padding of a PDO type.
+ struct A { int n; char c[3]; };
+ struct B { [[no_unique_address]] A a; char d; };
+ static_assert(sizeof(B) == 12);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct POD::B
+ // CHECK-NEXT: 0 | struct POD::A a
+ // CHECK-NEXT: 0 | int n
+ // CHECK-NEXT: 4 | char [3] c
+ // CHECK-NEXT: 8 | char d
+ // CHECK-NEXT: | [sizeof=12, dsize=12, align=4,
+ // CHECK-NEXT: | nvsize=12, nvalign=4]
+}
+
+namespace NonPOD {
+ struct A { int n; char c[3]; ~A(); };
+ struct B { [[no_unique_address]] A a; char d; };
+ static_assert(sizeof(B) == 8);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct NonPOD::B
+ // CHECK-NEXT: 0 | struct NonPOD::A a
+ // CHECK-NEXT: 0 | int n
+ // CHECK-NEXT: 4 | char [3] c
+ // CHECK-NEXT: 7 | char d
+ // CHECK-NEXT: | [sizeof=8, dsize=8, align=4,
+ // CHECK-NEXT: | nvsize=8, nvalign=4]
+}
+
+namespace NVSizeGreaterThanDSize {
+ // The nvsize of an object includes the complete size of its empty subobjects
+ // (although it's unclear why). Ensure this corner case is handled properly.
+ struct alignas(8) A { ~A(); }; // dsize 0, nvsize 0, size 8
+ struct B : A { char c; }; // dsize 1, nvsize 8, size 8
+ static_assert(sizeof(B) == 8);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct NVSizeGreaterThanDSize::B
+ // CHECK-NEXT: 0 | struct NVSizeGreaterThanDSize::A (base) (empty)
+ // CHECK-NEXT: 0 | char c
+ // CHECK-NEXT: | [sizeof=8, dsize=1, align=8,
+ // CHECK-NEXT: | nvsize=8, nvalign=8]
+
+ struct V { int n; };
+
+ // V is at offset 16, not offset 12, because B's tail padding is strangely not
+ // usable for virtual bases.
+ struct C : B, virtual V {};
+ static_assert(sizeof(C) == 24);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct NVSizeGreaterThanDSize::C
+ // CHECK-NEXT: 0 | (C vtable pointer)
+ // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::B (base)
+ // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::A (base) (empty)
+ // CHECK-NEXT: 8 | char c
+ // CHECK-NEXT: 16 | struct NVSizeGreaterThanDSize::V (virtual base)
+ // CHECK-NEXT: 16 | int n
+ // CHECK-NEXT: | [sizeof=24, dsize=20, align=8,
+ // CHECK-NEXT: | nvsize=16, nvalign=8]
+
+ struct D : virtual V {
+ [[no_unique_address]] B b;
+ };
+ static_assert(sizeof(D) == 24);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct NVSizeGreaterThanDSize::D
+ // CHECK-NEXT: 0 | (D vtable pointer)
+ // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::B b
+ // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::A (base) (empty)
+ // CHECK-NEXT: 8 | char c
+ // CHECK-NEXT: 16 | struct NVSizeGreaterThanDSize::V (virtual base)
+ // CHECK-NEXT: 16 | int n
+ // CHECK-NEXT: | [sizeof=24, dsize=20, align=8,
+ // CHECK-NEXT: | nvsize=16, nvalign=8]
+
+ struct X : virtual A { [[no_unique_address]] A a; };
+ struct E : virtual A {
+ [[no_unique_address]] A a;
+ // Here, we arrange for X to hang over the end of the nvsize of E. This
+ // should force the A vbase to be laid out at offset 24, not 16.
+ [[no_unique_address]] X x;
+ };
+ static_assert(sizeof(E) == 32);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct NVSizeGreaterThanDSize::E
+ // CHECK-NEXT: 0 | (E vtable pointer)
+ // CHECK-NEXT: 0 | struct NVSizeGreaterThanDSize::A a (empty)
+ // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::X x
+ // CHECK-NEXT: 8 | (X vtable pointer)
+ // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::A a (empty)
+ // CHECK-NEXT: 16 | struct NVSizeGreaterThanDSize::A (virtual base) (empty)
+ // CHECK-NEXT: 24 | struct NVSizeGreaterThanDSize::A (virtual base) (empty)
+ // CHECK-NEXT: | [sizeof=32, dsize=16, align=8,
+ // CHECK-NEXT: | nvsize=16, nvalign=8]
+}
+
+namespace RepeatedVBase {
+ struct alignas(16) A { ~A(); };
+ struct B : A {};
+ struct X : virtual A, virtual B {};
+ struct Y { [[no_unique_address]] X x; char c; };
+ static_assert(sizeof(Y) == 32);
+
+ // CHECK:*** Dumping AST Record Layout
+ // CHECK: 0 | struct RepeatedVBase::Y
+ // CHECK-NEXT: 0 | struct RepeatedVBase::X x
+ // CHECK-NEXT: 0 | (X vtable pointer)
+ // CHECK-NEXT: 0 | struct RepeatedVBase::A (virtual base) (empty)
+ // CHECK-NEXT: 16 | struct RepeatedVBase::B (virtual base) (empty)
+ // CHECK-NEXT: 16 | struct RepeatedVBase::A (base) (empty)
+ // CHECK-NEXT: 8 | char c
+ // CHECK-NEXT: | [sizeof=32, dsize=9, align=16,
+ // CHECK-NEXT: | nvsize=9, nvalign=16]
+}
--- /dev/null
+// RUN: %clang_cc1 -std=c++2a %s -verify -triple x86_64-linux-gnu
+// RUN: %clang_cc1 -std=c++2a %s -verify=unsupported -triple x86_64-windows
+
+[[no_unique_address]] int a; // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+[[no_unique_address]] void f(); // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+struct [[no_unique_address]] S { // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+ [[no_unique_address]] int a; // unsupported-warning {{unknown}}
+ [[no_unique_address]] void f(); // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+ [[no_unique_address]] static int sa;// expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+ [[no_unique_address]] static void sf(); // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+ [[no_unique_address]] int b : 3; // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}}
+
+ [[no_unique_address, no_unique_address]] int duplicated; // expected-error {{cannot appear multiple times}}
+ // unsupported-error@-1 {{cannot appear multiple times}} unsupported-warning@-1 2{{unknown}}
+ [[no_unique_address]] [[no_unique_address]] int duplicated2; // unsupported-warning 2{{unknown}}
+ [[no_unique_address()]] int arglist; // expected-error {{cannot have an argument list}} unsupported-warning {{unknown}}
+
+ int [[no_unique_address]] c; // expected-error {{cannot be applied to types}} unsupported-error {{cannot be applied to types}}
+};
// Helper function for GenerateTargetSpecificAttrChecks that alters the 'Test'
// parameter with only a single check type, if applicable.
-static void GenerateTargetSpecificAttrCheck(const Record *R, std::string &Test,
+static bool GenerateTargetSpecificAttrCheck(const Record *R, std::string &Test,
std::string *FnName,
StringRef ListName,
StringRef CheckAgainst,
*FnName += Part;
}
Test += ")";
+ return true;
}
+ return false;
}
// Generate a conditional expression to check if the current target satisfies
// those checks to the Test string. If the FnName string pointer is non-null,
// append a unique suffix to distinguish this set of target checks from other
// TargetSpecificAttr records.
-static void GenerateTargetSpecificAttrChecks(const Record *R,
+static bool GenerateTargetSpecificAttrChecks(const Record *R,
std::vector<StringRef> &Arches,
std::string &Test,
std::string *FnName) {
+ bool AnyTargetChecks = false;
+
// It is assumed that there will be an llvm::Triple object
// named "T" and a TargetInfo object named "Target" within
// scope that can be used to determine whether the attribute exists in
// differently because GenerateTargetRequirements needs to combine the list
// with ParseKind.
if (!Arches.empty()) {
+ AnyTargetChecks = true;
Test += " && (";
for (auto I = Arches.begin(), E = Arches.end(); I != E; ++I) {
StringRef Part = *I;
}
// If the attribute is specific to particular OSes, check those.
- GenerateTargetSpecificAttrCheck(R, Test, FnName, "OSes", "T.getOS()",
- "llvm::Triple::");
+ AnyTargetChecks |= GenerateTargetSpecificAttrCheck(
+ R, Test, FnName, "OSes", "T.getOS()", "llvm::Triple::");
- // If one or more CXX ABIs are specified, check those as well.
- GenerateTargetSpecificAttrCheck(R, Test, FnName, "CXXABIs",
- "Target.getCXXABI().getKind()",
- "TargetCXXABI::");
// If one or more object formats is specified, check those.
- GenerateTargetSpecificAttrCheck(R, Test, FnName, "ObjectFormats",
- "T.getObjectFormat()", "llvm::Triple::");
+ AnyTargetChecks |=
+ GenerateTargetSpecificAttrCheck(R, Test, FnName, "ObjectFormats",
+ "T.getObjectFormat()", "llvm::Triple::");
+
+ // If custom code is specified, emit it.
+ StringRef Code = R->getValueAsString("CustomCode");
+ if (!Code.empty()) {
+ AnyTargetChecks = true;
+ Test += " && (";
+ Test += Code;
+ Test += ")";
+ }
+
+ return AnyTargetChecks;
}
static void GenerateHasAttrSpellingStringSwitch(
std::string FnName = "isTarget";
std::string Test;
- GenerateTargetSpecificAttrChecks(R, Arches, Test, &FnName);
+ bool UsesT = GenerateTargetSpecificAttrChecks(R, Arches, Test, &FnName);
// If this code has already been generated, simply return the previous
// instance of it.
return *I;
OS << "static bool " << FnName << "(const TargetInfo &Target) {\n";
- OS << " const llvm::Triple &T = Target.getTriple();\n";
+ if (UsesT)
+ OS << " const llvm::Triple &T = Target.getTriple(); (void)T;\n";
OS << " return " << Test << ";\n";
OS << "}\n\n";
<tr>
<td><tt>[[no_unique_address]]</tt> attribute</td>
<td><a href="http://wg21.link/p0840r2">P0840R2</a></td>
- <td class="none" align="center">No</td>
+ <td class="svn" align="center">SVN</td>
</tr>
<tr>
<td><tt>[[likely]]</tt> and <tt>[[unlikely]]</tt> attributes</td>