return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
type);
+ if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) &&
+ cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+ LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
+ assert(L.isSimple());
+ args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
+ type, /*NeedsCopy*/true);
+ return;
+ }
+
args.add(EmitAnyExprToTemp(E), type);
}
Alignment, I->Ty);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) {
+ Args.push_back(CreateMemTemp(I->Ty));
+ EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty,
+ RV.isVolatileQualified());
} else {
Args.push_back(RV.getAggregateAddr());
}
struct CallArg {
RValue RV;
QualType Ty;
- CallArg(RValue rv, QualType ty)
- : RV(rv), Ty(ty)
+ bool NeedsCopy;
+ CallArg(RValue rv, QualType ty, bool needscopy)
+ : RV(rv), Ty(ty), NeedsCopy(needscopy)
{ }
};
class CallArgList :
public llvm::SmallVector<CallArg, 16> {
public:
- void add(RValue rvalue, QualType type) {
- push_back(CallArg(rvalue, type));
+ void add(RValue rvalue, QualType type, bool needscopy = false) {
+ push_back(CallArg(rvalue, type, needscopy));
}
};
--- /dev/null
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-apple-darwin10 < %s | FileCheck %s
+
+struct Test1S {
+ long NumDecls;
+ long X;
+ long Y;
+};
+struct Test2S {
+ long NumDecls;
+ long X;
+};
+
+// Make sure we don't generate extra memcpy for lvalues
+void test1a(struct Test1S, struct Test2S);
+// CHECK: define void @test1(
+// CHECK-NOT: memcpy
+// CHECK: call void @test1a
+void test1(struct Test1S *A, struct Test2S *B) {
+ test1a(*A, *B);
+}