From: John McCall Date: Tue, 16 Nov 2010 23:07:28 +0000 (+0000) Subject: Support compound complex operations as l-values in C++. Add a test X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=83ce9d4a552987d34cbd500e983db8d770232379;p=clang Support compound complex operations as l-values in C++. Add a test case based on CodeGen/volatile-1.c which tests the current C++ semantics, and note the many, many places we fall short of them. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@119402 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 0c24bf5d1a..6bfafca857 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -1981,19 +1981,27 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) return EmitPointerToDataMemberBinaryExpr(E); - - // Can only get l-value for binary operator expressions which are a - // simple assignment of aggregate type. - if (E->getOpcode() != BO_Assign) - return EmitUnsupportedLValue(E, "binary l-value expression"); + assert(E->isAssignmentOp() && "unexpected binary l-value"); + if (!hasAggregateLLVMType(E->getType())) { + if (E->isCompoundAssignmentOp()) + return EmitCompoundAssignOperatorLValue(cast(E)); + + assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); + // Emit the LHS as an l-value. LValue LV = EmitLValue(E->getLHS()); // Store the value through the l-value. EmitStoreThroughLValue(EmitAnyExpr(E->getRHS()), LV, E->getType()); return LV; } + + if (E->getType()->isAnyComplexType()) + return EmitComplexAssignmentLValue(E); + + // The compound assignment operators are not used for aggregates. + assert(E->getOpcode() == BO_Assign && "aggregate compound assignment?"); return EmitAggExprToLValue(E); } diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp index df65d5a3ac..26bda79898 100644 --- a/lib/CodeGen/CGExprComplex.cpp +++ b/lib/CodeGen/CGExprComplex.cpp @@ -760,3 +760,26 @@ ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile) { return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile); } + +LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) { + ComplexPairTy Val; // ignored + + ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &); + + switch (E->getOpcode()) { + case BO_Assign: + return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val); + + case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break; + case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break; + case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break; + case BO_AddAssign: Op = &ComplexExprEmitter::EmitBinAdd; break; + + default: + llvm_unreachable("unexpected complex compound assignment"); + Op = 0; + } + + return ComplexExprEmitter(*this).EmitCompoundAssignLValue( + cast(E), Op, Val); +} diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index a3e1eac550..560675e73f 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -1404,6 +1404,9 @@ public: void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty, llvm::Value **Result=0); + /// Emit an l-value for an assignment (simple or compound) of complex type. + LValue EmitComplexAssignmentLValue(const BinaryOperator *E); + // Note: only availabe for agg return types LValue EmitBinaryOperatorLValue(const BinaryOperator *E); LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E); diff --git a/test/CodeGenCXX/volatile-1.cpp b/test/CodeGenCXX/volatile-1.cpp new file mode 100644 index 0000000000..4ff7e9e492 --- /dev/null +++ b/test/CodeGenCXX/volatile-1.cpp @@ -0,0 +1,383 @@ +// RUN: %clang_cc1 -Wno-unused-value -emit-llvm %s -o - | FileCheck %s + +// CHECK: @i = global [[INT:i[0-9]+]] 0 +volatile int i, j, k; +volatile int ar[5]; +volatile char c; +// CHECK: @ci = global [[CINT:%.*]] zeroinitializer +volatile _Complex int ci; +volatile struct S { +#ifdef __cplusplus + void operator =(volatile struct S&o) volatile; +#endif + int i; +} a, b; + +//void operator =(volatile struct S&o1, volatile struct S&o2) volatile; +int printf(const char *, ...); + + +// CHECK: define void @{{.*}}test +void test() { + + asm("nop"); // CHECK: call void asm + + // FIXME: should not load + i; + // CHECK-NEXT: volatile load [[INT]]* @i + + (float)(ci); + // CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: sitofp [[INT]] + + // These are not uses in C++: + // [expr.static.cast]p6: + // The lvalue-to-rvalue . . . conversions are not applied to the expression. + (void)ci; + (void)a; + + (void)(ci=ci); + // CHECK-NEXT: [[R:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + + (void)(i=j); + // CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* @j + // CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* @i + + // FIXME: extra load at end! + ci+=ci; + // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // Not sure why they're ordered this way. + // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] + // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] + // CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + + // Note that C++ requires an extra volatile load over C from the LHS of the '+'. + (ci += ci) + ci; + // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R:%.*]] = add [[INT]] [[R2]], [[R1]] + // CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]] + // CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[R2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0) + // CHECK-NEXT: [[I2:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // These additions can be elided. + // CHECK-NEXT: add [[INT]] [[R1]], [[R2]] + // CHECK-NEXT: add [[INT]] [[I1]], [[I2]] + + asm("nop"); // CHECK-NEXT: call void asm + + // Extra volatile load in C++. + (i += j) + k; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: add nsw [[INT]] + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: add nsw [[INT]] + + asm("nop"); // CHECK-NEXT: call void asm + + // Extra volatile load in C++. + (i += j) + 1; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: add nsw [[INT]] + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: add nsw [[INT]] + + asm("nop"); // CHECK-NEXT: call void asm + + ci+ci; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: add [[INT]] + // CHECK-NEXT: add [[INT]] + + // FIXME: should not load + __real i; + // CHECK-NEXT: volatile load + + +ci; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + + asm("nop"); // CHECK-NEXT: call void asm + + (void)(i=i); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + (float)(i=i); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: sitofp + + (void)i; + + i=i; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + // Extra volatile load in C++. + i=i=i; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + (void)__builtin_choose_expr(0, i=i, j=j); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + // FIXME: shouldn't get these extra loads here, or the phi + k ? (i=i) : (j=j); + // CHECK-NEXT: volatile load + // CHECK-NEXT: icmp + // CHECK-NEXT: br i1 + // CHECK: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: br label + // CHECK: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: br label + // CHECK: phi + + (void)(i,(i=i)); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + // FIXME: should not load k + i=i,k; + // CHECK-NEXT: volatile load [[INT]]* @i + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i + // CHECK-NEXT: volatile load [[INT]]* @k + + (i=j,k=j); + // CHECK-NEXT: volatile load [[INT]]* @j + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i + // CHECK-NEXT: volatile load [[INT]]* @j + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @k + + // FIXME: should not load 'k' + (i=j,k); + // CHECK-NEXT: volatile load [[INT]]* @j + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i + // CHECK-NEXT: volatile load [[INT]]* @k + + // FIXME: should not load either + (i,j); + // CHECK-NEXT: volatile load [[INT]]* @i + // CHECK-NEXT: volatile load [[INT]]* @j + + // Extra load in C++. + i=c=k; + // CHECK-NEXT: volatile load + // CHECK-NEXT: trunc + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: sext + // CHECK-NEXT: volatile store + + i+=k; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: add nsw [[INT]] + // CHECK-NEXT: volatile store + + // FIXME: should not load! + ci; + // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 0 + // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 1 + + asm("nop"); // CHECK-NEXT: call void asm + + (int)ci; + // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 0 + // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 1 + + (bool)ci; + // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 0 + // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 1 + // CHECK-NEXT: icmp ne + // CHECK-NEXT: icmp ne + // CHECK-NEXT: or i1 + + // FIXME: should not load! + ci=ci; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + + asm("nop"); // CHECK-NEXT: call void asm + + // FIXME: should not load at end + // Extra load in C++. + ci=ci=ci; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + + __imag ci = __imag ci = __imag ci; + // CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + // CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1) + + __real (i = j); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + __imag i; + // CHECK-NEXT: volatile load + + // ============================================================ + // FIXME: Test cases we get wrong. + + // A use. We load all of a into a copy of a, then load i. gcc forgets to do + // the assignment. + // (a = a).i; + + // ============================================================ + // Test cases where we intentionally differ from gcc, due to suspected bugs in + // gcc. + + // Not a use. gcc forgets to do the assignment. + // CHECK-NEXT: call + ((a=a),a); + + // Not a use. gcc gets this wrong, it doesn't emit the copy! + // CHECK-NEXT: call + (void)(a=a); + + // Not a use. gcc got this wrong in 4.2 and omitted the side effects + // entirely, but it is fixed in 4.4.0. + __imag (i = j); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + + // C++ does an extra load here. Note that we have to do full loads. + (float)(ci=ci); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: sitofp + + // Not a use, bug? gcc treats this as not a use, that's probably a + // bug due to tree folding ignoring volatile. + (int)(ci=ci); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + + // A use. + (float)(i=i); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: sitofp + + // A use. gcc treats this as not a use, that's probably a bug due to tree + // folding ignoring volatile. + (int)(i=i); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + + // A use. + -(i=j); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: sub + + // A use. gcc treats this a not a use, that's probably a bug due to tree + // folding ignoring volatile. + +(i=k); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + + // A use. gcc treats this a not a use, that's probably a bug due to tree + // folding ignoring volatile. + // FIXME: extra load at end + __real (ci=ci); + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + + // A use. + i + 0; + // CHECK-NEXT: volatile load + // CHECK-NEXT: add + + // A use. + (i=j) + i; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile load + // CHECK-NEXT: add + + // A use. gcc treats this as not a use, that's probably a bug due to tree + // folding ignoring volatile. + (i=j) + 0; + // CHECK-NEXT: volatile load + // CHECK-NEXT: volatile store + // CHECK-NEXT: volatile load + // CHECK-NEXT: add + + // FIXME: extra load of 'i' + (i,j)=k; + // CHECK-NEXT: volatile load [[INT]]* @k + // CHECK-NEXT: volatile load [[INT]]* @i + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @j + + // FIXME: extra load of 'j' + (j=k,i)=i; + // CHECK-NEXT: volatile load [[INT]]* @i + // CHECK-NEXT: volatile load [[INT]]* @k + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @j + // CHECK-NEXT: volatile load [[INT]]* @j + // CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i +}