]> granicus.if.org Git - clang/commitdiff
Emit partial destruction of structs with initializer lists.
authorJohn McCall <rjmccall@apple.com>
Mon, 11 Jul 2011 19:35:02 +0000 (19:35 +0000)
committerJohn McCall <rjmccall@apple.com>
Mon, 11 Jul 2011 19:35:02 +0000 (19:35 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@134913 91177308-0d34-0410-b5e6-96231b3b80d8

lib/CodeGen/CGExprAgg.cpp
test/CodeGenCXX/partial-destruction.cpp

index 062e13d2b60a0fb6964799212ffe6ed2b3daf975..f4387cd0def062719decc5680d14c0a37fe66070 100644 (file)
@@ -800,9 +800,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
   // the disadvantage is that the generated code is more difficult for
   // the optimizer, especially with bitfields.
   unsigned NumInitElements = E->getNumInits();
-  RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
   
-  if (E->getType()->isUnionType()) {
+  if (record->isUnion()) {
     // Only initialize one field of a union. The field itself is
     // specified by the initializer list.
     if (!E->getInitializedFieldInUnion()) {
@@ -811,8 +811,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
 #ifndef NDEBUG
       // Make sure that it's really an empty and not a failure of
       // semantic analysis.
-      for (RecordDecl::field_iterator Field = SD->field_begin(),
-                                   FieldEnd = SD->field_end();
+      for (RecordDecl::field_iterator Field = record->field_begin(),
+                                   FieldEnd = record->field_end();
            Field != FieldEnd; ++Field)
         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
 #endif
@@ -834,45 +834,72 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
     return;
   }
 
+  // We'll need to enter cleanup scopes in case any of the member
+  // initializers throw an exception.
+  llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+
   // Here we iterate over the fields; this makes it simpler to both
   // default-initialize fields and skip over unnamed fields.
-  unsigned CurInitVal = 0;
-  for (RecordDecl::field_iterator Field = SD->field_begin(),
-                               FieldEnd = SD->field_end();
-       Field != FieldEnd; ++Field) {
-    // We're done once we hit the flexible array member
-    if (Field->getType()->isIncompleteArrayType())
+  unsigned curInitIndex = 0;
+  for (RecordDecl::field_iterator field = record->field_begin(),
+                               fieldEnd = record->field_end();
+       field != fieldEnd; ++field) {
+    // We're done once we hit the flexible array member.
+    if (field->getType()->isIncompleteArrayType())
       break;
 
-    if (Field->isUnnamedBitfield())
+    // Always skip anonymous bitfields.
+    if (field->isUnnamedBitfield())
       continue;
 
-    // Don't emit GEP before a noop store of zero.
-    if (CurInitVal == NumInitElements && Dest.isZeroed() &&
+    // We're done if we reach the end of the explicit initializers, we
+    // have a zeroed object, and the rest of the fields are
+    // zero-initializable.
+    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
         CGF.getTypes().isZeroInitializable(E->getType()))
       break;
     
     // FIXME: volatility
-    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
+    LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
     // We never generate write-barries for initialized fields.
-    FieldLoc.setNonGC(true);
+    LV.setNonGC(true);
     
-    if (CurInitVal < NumInitElements) {
+    if (curInitIndex < NumInitElements) {
       // Store the initializer into the field.
-      EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc);
+      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
     } else {
       // We're out of initalizers; default-initialize to null
-      EmitNullInitializationToLValue(FieldLoc);
+      EmitNullInitializationToLValue(LV);
+    }
+
+    // Push a destructor if necessary.
+    // FIXME: if we have an array of structures, all explicitly
+    // initialized, we can end up pushing a linear number of cleanups.
+    bool pushedCleanup = false;
+    if (QualType::DestructionKind dtorKind
+          = field->getType().isDestructedType()) {
+      assert(LV.isSimple());
+      if (CGF.needsEHCleanup(dtorKind)) {
+        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
+                        CGF.getDestroyer(dtorKind), false);
+        cleanups.push_back(CGF.EHStack.stable_begin());
+        pushedCleanup = true;
+      }
     }
     
     // If the GEP didn't get used because of a dead zero init or something
     // else, clean it up for -O0 builds and general tidiness.
-    if (FieldLoc.isSimple())
+    if (!pushedCleanup && LV.isSimple()) 
       if (llvm::GetElementPtrInst *GEP =
-            dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress()))
+            dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
         if (GEP->use_empty())
           GEP->eraseFromParent();
   }
+
+  // Deactivate all the partial cleanups in reverse order, which
+  // generally means popping them.
+  for (unsigned i = cleanups.size(); i != 0; --i)
+    CGF.DeactivateCleanupBlock(cleanups[i-1]);
 }
 
 //===----------------------------------------------------------------------===//
index 53b34d06bf6471dc4d78117ea787ee07ff383425..52995523bfbe5a99a56944c5b16e2c290322de66 100644 (file)
@@ -88,3 +88,33 @@ namespace test0 {
   // CHECK-NEXT: br i1 [[T0]],
 
 }
+
+namespace test1 {
+  struct A { A(); A(int); ~A(); };
+  struct B { A x, y, z; int w; };
+
+  void test() {
+    B v = { 5, 6, 7, 8 };
+  }
+  // CHECK:    define void @_ZN5test14testEv()
+  // CHECK:      [[V:%.*]] = alloca [[B:%.*]], align 4
+  // CHECK-NEXT: alloca i8*
+  // CHECK-NEXT: alloca i32
+  // CHECK-NEXT: alloca i32
+  // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[B]]* [[V]], i32 0, i32 0
+  // CHECK-NEXT: call void @_ZN5test11AC1Ei([[A:%.*]]* [[X]], i32 5)
+  // CHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[B]]* [[V]], i32 0, i32 1
+  // CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[Y]], i32 6)
+  // CHECK:      [[Z:%.*]] = getelementptr inbounds [[B]]* [[V]], i32 0, i32 2
+  // CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[Z]], i32 7)
+  // CHECK:      [[W:%.*]] = getelementptr inbounds [[B]]* [[V]], i32 0, i32 3
+  // CHECK-NEXT: store i32 8, i32* [[W]], align 4
+  // CHECK-NEXT: call void @_ZN5test11BD1Ev([[B]]* [[V]])
+  // CHECK-NEXT: ret void
+
+  // FIXME: again, the block ordering is pretty bad here
+  // CHECK:      eh.selector({{.*}}, i32 0)
+  // CHECK:      eh.selector({{.*}}, i32 0)
+  // CHECK:      invoke void @_ZN5test11AD1Ev([[A]]* [[Y]])
+  // CHECK:      invoke void @_ZN5test11AD1Ev([[A]]* [[X]])
+}