return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
}
+/// EmitIgnoredExpr - Emit code to compute the specified expression,
+/// ignoring the result.
+void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
+ if (E->isRValue())
+ return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // Just emit it as an l-value and drop the result.
+ EmitLValue(E);
+}
+
/// EmitAnyExpr - Emit code to compute the specified expression which
/// can have any type. The result is returned as an RValue struct.
/// If this is an aggregate expression, AggSlot indicates where the
case Expr::BinaryOperatorClass:
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
case Expr::CompoundAssignOperatorClass:
- return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
+ if (!E->getType()->isAnyComplexType())
+ return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
case UO_Real:
case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isSimple() && "real/imag on non-ordinary l-value");
+ llvm::Value *Addr = LV.getAddress();
+
+ // real and imag are valid on scalars. This is a faster way of
+ // testing that.
+ if (!cast<llvm::PointerType>(Addr->getType())
+ ->getElementType()->isStructTy()) {
+ assert(E->getSubExpr()->getType()->isArithmeticType());
+ return LV;
+ }
+
+ assert(E->getSubExpr()->getType()->isAnyComplexType());
+
unsigned Idx = E->getOpcode() == UO_Imag;
return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
- Idx, "idx"),
+ Idx, "idx"),
ExprTy);
}
case UO_PreInc:
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
// Comma expressions just emit their LHS then their RHS as an l-value.
if (E->getOpcode() == BO_Comma) {
- EmitAnyExpr(E->getLHS());
+ EmitIgnoredExpr(E->getLHS());
EnsureInsertPoint();
return EmitLValue(E->getRHS());
}
E->getOpcode() == BO_PtrMemI)
return EmitPointerToDataMemberBinaryExpr(E);
- assert(E->isAssignmentOp() && "unexpected binary l-value");
+ assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
if (!hasAggregateLLVMType(E->getType())) {
- if (E->isCompoundAssignmentOp())
- return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
-
- assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
-
// Emit the LHS as an l-value.
LValue LV = EmitLValue(E->getLHS());
// Store the value through the l-value.
if (E->getType()->isAnyComplexType())
return EmitComplexAssignmentLValue(E);
- // The compound assignment operators are not used for aggregates.
- assert(E->getOpcode() == BO_Assign && "aggregate compound assignment?");
-
return EmitAggExprToLValue(E);
}
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitAnyExpr(E->getLHS(), AggValueSlot::ignored(), true);
+ CGF.EmitIgnoredExpr(E->getLHS());
Visit(E->getRHS());
}
CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
false, Dest.isZeroed()));
} else {
- CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
}
}
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitStmt(E->getLHS());
+ CGF.EmitIgnoredExpr(E->getLHS());
CGF.EnsureInsertPoint();
return Visit(E->getRHS());
}
}
LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
+ assert(E->getOpcode() == BO_Assign);
ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
+}
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
-
switch (E->getOpcode()) {
- case BO_Assign:
- return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
-
case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
Op = 0;
}
- return ComplexExprEmitter(*this).EmitCompoundAssignLValue(
- cast<CompoundAssignOperator>(E), Op, Val);
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
}
return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
}
case CK_ToVoid: {
- if (!E->isRValue())
- CGF.EmitLValue(E);
- else
- CGF.EmitAnyExpr(E, AggValueSlot::ignored(), true);
+ CGF.EmitIgnoredExpr(E);
return 0;
}
case CK_VectorSplat: {
} else {
// C99 6.5.3.4p2: If the argument is an expression of type
// VLA, it is evaluated.
- CGF.EmitAnyExpr(E->getArgumentExpr());
+ CGF.EmitIgnoredExpr(E->getArgumentExpr());
}
return CGF.GetVLASize(VAT);
}
Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitStmt(E->getLHS());
+ CGF.EmitIgnoredExpr(E->getLHS());
CGF.EnsureInsertPoint();
return Visit(E->getRHS());
}
}
-LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
+LValue CodeGenFunction::EmitCompoundAssignmentLValue(
const CompoundAssignOperator *E) {
ScalarExprEmitter Scalar(*this);
Value *Result = 0;
FunctionType::ExtInfo()),
GetCopyStructFn, ReturnValueSlot(), Args);
} else if (PID->getSetterCXXAssignment()) {
- EmitAnyExpr(PID->getSetterCXXAssignment(), AggValueSlot::ignored(), true);
-
+ EmitIgnoredExpr(PID->getSetterCXXAssignment());
} else {
// FIXME: Find a clean way to avoid AST node creation.
SourceLocation Loc = PD->getLocation();
EmitStopPoint(S);
switch (S->getStmtClass()) {
- default:
- // Must be an expression in a stmt context. Emit the value (to get
- // side-effects) and ignore the result.
- if (!isa<Expr>(S))
- ErrorUnsupported(S, "statement");
-
- EmitAnyExpr(cast<Expr>(S), AggValueSlot::ignored(), true);
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ case Stmt::SwitchCaseClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ EmitIgnoredExpr(cast<Expr>(S));
// Expression emitters don't handle unreachable blocks yet, so look for one
// explicitly here. This handles the common case of a call to a noreturn
}
}
break;
+
case Stmt::IndirectGotoStmtClass:
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
+ /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
+ void EmitIgnoredExpr(const Expr *E);
+
/// EmitAnyExpr - Emit code to compute the specified expression which can have
/// any type. The result is returned as an RValue struct. If this is an
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
/// Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
+ LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
// Note: only availabe for agg return types
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
- LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
+ LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
// Note: only available for agg return types
LValue EmitCallExprLValue(const CallExpr *E);
// Note: only available for agg return types
asm("nop"); // CHECK: call void asm
- // FIXME: should not load
+ // should not load
i;
- // CHECK-NEXT: volatile load [[INT]]* @i
(float)(ci);
// CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
// CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* @j
// CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* @i
- // FIXME: extra load at end!
ci+=ci;
// CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
// CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
// CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]]
// CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
// CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
- // CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
- // CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
// Note that C++ requires an extra volatile load over C from the LHS of the '+'.
(ci += ci) + ci;
// CHECK-NEXT: add [[INT]]
// CHECK-NEXT: add [[INT]]
- // FIXME: should not load
__real i;
- // CHECK-NEXT: volatile load
+ci;
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile store
- // FIXME: shouldn't get these extra loads here, or the phi
+ // FIXME: the phi-equivalent is unnecessary
k ? (i=i) : (j=j);
// CHECK-NEXT: volatile load
// CHECK-NEXT: icmp
// CHECK-NEXT: br i1
// CHECK: volatile load
// CHECK-NEXT: volatile store
- // CHECK-NEXT: volatile load
+ // CHECK-NEXT: store [[INT]]* @i
// CHECK-NEXT: br label
// CHECK: volatile load
// CHECK-NEXT: volatile store
- // CHECK-NEXT: volatile load
+ // CHECK-NEXT: store [[INT]]* @j
// CHECK-NEXT: br label
- // CHECK: phi
+ // CHECK: load [[INT]]**
(void)(i,(i=i));
// CHECK-NEXT: volatile load
- // CHECK-NEXT: volatile load
// CHECK-NEXT: volatile store
- // FIXME: should not load k
i=i,k;
// CHECK-NEXT: volatile load [[INT]]* @i
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i
- // CHECK-NEXT: volatile load [[INT]]* @k
(i=j,k=j);
// CHECK-NEXT: volatile load [[INT]]* @j
// CHECK-NEXT: volatile load [[INT]]* @j
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @k
- // FIXME: should not load 'k'
(i=j,k);
// CHECK-NEXT: volatile load [[INT]]* @j
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i
- // CHECK-NEXT: volatile load [[INT]]* @k
- // FIXME: should not load either
(i,j);
- // CHECK-NEXT: volatile load [[INT]]* @i
- // CHECK-NEXT: volatile load [[INT]]* @j
// Extra load in C++.
i=c=k;
// CHECK-NEXT: add nsw [[INT]]
// CHECK-NEXT: volatile store
- // FIXME: should not load!
ci;
- // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 0
- // CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 1
asm("nop"); // CHECK-NEXT: call void asm
// CHECK-NEXT: icmp ne
// CHECK-NEXT: or i1
- // FIXME: should not load!
ci=ci;
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile store
// CHECK-NEXT: volatile store
- // CHECK-NEXT: volatile load
- // CHECK-NEXT: volatile load
asm("nop"); // CHECK-NEXT: call void asm
- // FIXME: should not load at end
// Extra load in C++.
ci=ci=ci;
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile store
// CHECK-NEXT: volatile store
- // CHECK-NEXT: volatile load
- // CHECK-NEXT: volatile load
__imag ci = __imag ci = __imag ci;
// CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
// CHECK-NEXT: volatile store
__imag i;
- // CHECK-NEXT: volatile load
// ============================================================
// FIXME: Test cases we get wrong.
// A use. gcc treats this a not a use, that's probably a bug due to tree
// folding ignoring volatile.
- // FIXME: extra load at end
__real (ci=ci);
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile load
// CHECK-NEXT: volatile store
// CHECK-NEXT: volatile store
- // CHECK-NEXT: volatile load
// A use.
i + 0;
// CHECK-NEXT: volatile load
// CHECK-NEXT: add
- // FIXME: extra load of 'i'
(i,j)=k;
// CHECK-NEXT: volatile load [[INT]]* @k
- // CHECK-NEXT: volatile load [[INT]]* @i
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @j
- // FIXME: extra load of 'j'
(j=k,i)=i;
- // CHECK-NEXT: volatile load [[INT]]* @i
// CHECK-NEXT: volatile load [[INT]]* @k
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @j
- // CHECK-NEXT: volatile load [[INT]]* @j
+ // CHECK-NEXT: volatile load [[INT]]* @i
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i
+
+ // CHECK-NEXT: ret void
}
// CHECK: define void @_ZN5test14testEv()
void test() {
// CHECK: [[TMP:%.*]] = load i32** @_ZN5test11xE, align 8
- // *** FIXME: no! bad! should not be loaded! ***
- // CHECK-NEXT: [[TMP1:%.*]] = volatile load i32* [[TMP]]
// CHECK-NEXT: ret void
*x;
}