int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) const;
+ /// \brief Estimate the cost of a EXT operation when lowered.
+ ///
+ /// The contract for this function is the same as \c getOperationCost except
+ /// that it supports an interface that provides extra information specific to
+ /// the EXT operation.
+ int getExtCost(const Instruction *I, const Value *Src) const;
+
/// \brief Estimate the cost of a function call when lowered.
///
/// The contract for this is the same as \c getOperationCost except that it
virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) = 0;
+ virtual int getExtCost(const Instruction *I, const Value *Src) = 0;
virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
virtual int getCallCost(const Function *F, int NumArgs) = 0;
virtual int getCallCost(const Function *F,
ArrayRef<const Value *> Operands) override {
return Impl.getGEPCost(PointeeType, Ptr, Operands);
}
+ int getExtCost(const Instruction *I, const Value *Src) override {
+ return Impl.getExtCost(I, Src);
+ }
int getCallCost(FunctionType *FTy, int NumArgs) override {
return Impl.getCallCost(FTy, NumArgs);
}
return SI.getNumCases();
}
+ int getExtCost(const Instruction *I, const Value *Src) {
+ return TTI::TCC_Basic;
+ }
+
unsigned getCallCost(FunctionType *FTy, int NumArgs) {
assert(FTy && "FunctionType must be provided to this routine.");
// nop on most sane targets.
if (isa<CmpInst>(CI->getOperand(0)))
return TTI::TCC_Free;
+ if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
+ return static_cast<T *>(this)->getExtCost(CI, Operands.back());
}
return static_cast<T *>(this)->getOperationCost(
return BaseT::getGEPCost(PointeeType, Ptr, Operands);
}
+ int getExtCost(const Instruction *I, const Value *Src) {
+ if (getTLI()->isExtFree(I))
+ return TargetTransformInfo::TCC_Free;
+
+ if (isa<ZExtInst>(I) || isa<SExtInst>(I))
+ if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
+ if (getTLI()->isExtLoad(LI, I, DL))
+ return TargetTransformInfo::TCC_Free;
+
+ return TargetTransformInfo::TCC_Basic;
+ }
+
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) {
return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
return isExtFreeImpl(I);
}
+ /// Return true if \p Load and \p Ext can form an ExtLoad.
+ /// For example, in AArch64
+ /// %L = load i8, i8* %ptr
+ /// %E = zext i8 %L to i32
+ /// can be lowered into one load instruction
+ /// ldrb w0, [x0]
+ bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
+ const DataLayout &DL) const {
+ EVT VT = getValueType(DL, Ext->getType());
+ EVT LoadVT = getValueType(DL, Load->getType());
+
+ // If the load has other users and the truncate is not free, the ext
+ // probably isn't free.
+ if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
+ !isTruncateFree(Ext->getType(), Load->getType()))
+ return false;
+
+ // Check whether the target supports casts folded into loads.
+ unsigned LType;
+ if (isa<ZExtInst>(Ext))
+ LType = ISD::ZEXTLOAD;
+ else {
+ assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
+ LType = ISD::SEXTLOAD;
+ }
+
+ return isLoadExtLegal(LType, VT, LoadVT);
+ }
+
/// Return true if any actual instruction that defines a value of type FromTy
/// implicitly zero-extends the value to ToTy in the result register.
///
return TTIImpl->getGEPCost(PointeeType, Ptr, Operands);
}
+int TargetTransformInfo::getExtCost(const Instruction *I,
+ const Value *Src) const {
+ return TTIImpl->getExtCost(I, Src);
+}
+
int TargetTransformInfo::getIntrinsicCost(
Intrinsic::ID IID, Type *RetTy, ArrayRef<const Value *> Arguments) const {
int Cost = TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);
if (!HasPromoted && LI->getParent() == Inst->getParent())
return false;
- EVT VT = TLI->getValueType(*DL, Inst->getType());
- EVT LoadVT = TLI->getValueType(*DL, LI->getType());
-
- // If the load has other users and the truncate is not free, this probably
- // isn't worthwhile.
- if (!LI->hasOneUse() && (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) &&
- !TLI->isTruncateFree(Inst->getType(), LI->getType()))
- return false;
-
- // Check whether the target supports casts folded into loads.
- unsigned LType;
- if (isa<ZExtInst>(Inst))
- LType = ISD::ZEXTLOAD;
- else {
- assert(isa<SExtInst>(Inst) && "Unexpected ext type!");
- LType = ISD::SEXTLOAD;
- }
-
- return TLI->isLoadExtLegal(LType, VT, LoadVT);
+ return TLI->isExtLoad(LI, Inst, *DL);
}
/// Move a zext or sext fed by a load into the same basic block as the load,
--- /dev/null
+; REQUIRES: asserts
+; RUN: opt -inline -mtriple=aarch64--linux-gnu -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+define i32 @outer1(i32* %ptr, i32 %i) {
+ %C = call i32 @inner1(i32* %ptr, i32 %i)
+ ret i32 %C
+}
+
+; sext can be folded into gep.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner1(i32* %ptr, i32 %i) {
+ %E = sext i32 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i32 @outer2(i32* %ptr, i32 %i) {
+ %C = call i32 @inner2(i32* %ptr, i32 %i)
+ ret i32 %C
+}
+
+; zext from i32 to i64 is free.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner2(i32* %ptr, i32 %i) {
+ %E = zext i32 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i32 @outer3(i32* %ptr, i16 %i) {
+ %C = call i32 @inner3(i32* %ptr, i16 %i)
+ ret i32 %C
+}
+
+; zext can be folded into gep.
+; CHECK: Analyzing call of inner3
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner3(i32* %ptr, i16 %i) {
+ %E = zext i16 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i16 @outer4(i8* %ptr) {
+ %C = call i16 @inner4(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner4
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner4(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i16
+ ret i16 %E
+}
+
+define i16 @outer5(i8* %ptr) {
+ %C = call i16 @inner5(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner5
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner5(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i16
+ ret i16 %E
+}
+
+define i32 @outer6(i8* %ptr) {
+ %C = call i32 @inner6(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner6
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner6(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer7(i8* %ptr) {
+ %C = call i32 @inner7(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner7
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner7(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer8(i16* %ptr) {
+ %C = call i32 @inner8(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner8
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner8(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer9(i16* %ptr) {
+ %C = call i32 @inner9(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner9
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner9(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i32
+ ret i32 %E
+}
+
+define i64 @outer10(i8* %ptr) {
+ %C = call i64 @inner10(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner10
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner10(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer11(i8* %ptr) {
+ %C = call i64 @inner11(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner11
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner11(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer12(i16* %ptr) {
+ %C = call i64 @inner12(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner12
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner12(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer13(i16* %ptr) {
+ %C = call i64 @inner13(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner13
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner13(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer14(i32* %ptr) {
+ %C = call i64 @inner14(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner14
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner14(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = zext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer15(i32* %ptr) {
+ %C = call i64 @inner15(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner15
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner15(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = sext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer16(i32 %V1, i64 %V2) {
+ %C = call i64 @inner16(i32 %V1, i64 %V2)
+ ret i64 %C
+}
+
+; sext can be folded into shl.
+; CHECK: Analyzing call of inner16
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 4
+define i64 @inner16(i32 %V1, i64 %V2) {
+ %E = sext i32 %V1 to i64
+ %S = shl i64 %E, 3
+ %A = add i64 %V2, %S
+ ret i64 %A
+}
--- /dev/null
+; REQUIRES: asserts
+; RUN: opt -inline -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64le-ibm-linux-gnu"
+
+define i16 @outer1(i8* %ptr) {
+ %C = call i16 @inner1(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner1(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i16
+ ret i16 %E
+}
+
+define i32 @outer2(i8* %ptr) {
+ %C = call i32 @inner2(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner2(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer3(i16* %ptr) {
+ %C = call i32 @inner3(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner3
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner3(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer4(i16* %ptr) {
+ %C = call i32 @inner4(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner4
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner4(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i32
+ ret i32 %E
+}
+
+define i64 @outer5(i8* %ptr) {
+ %C = call i64 @inner5(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner5
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner5(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer6(i16* %ptr) {
+ %C = call i64 @inner6(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner6
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner6(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer7(i16* %ptr) {
+ %C = call i64 @inner7(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner7
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner7(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer8(i32* %ptr) {
+ %C = call i64 @inner8(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner8
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner8(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = zext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer9(i32* %ptr) {
+ %C = call i64 @inner9(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner9
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner9(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = sext i32 %L to i64
+ ret i64 %E
+}
--- /dev/null
+if not 'PowerPC' in config.root.targets:
+ config.unsupported = True
+
--- /dev/null
+; REQUIRES: asserts
+; RUN: opt -inline -mtriple=x86_64-unknown-unknown -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define i32 @outer1(i32* %ptr, i32 %i) {
+ %C = call i32 @inner1(i32* %ptr, i32 %i)
+ ret i32 %C
+}
+
+; zext from i32 to i64 is free.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner1(i32* %ptr, i32 %i) {
+ %E = zext i32 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i16 @outer2(i8* %ptr) {
+ %C = call i16 @inner2(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner2(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i16
+ ret i16 %E
+}
+
+define i16 @outer3(i8* %ptr) {
+ %C = call i16 @inner3(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner3
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner3(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i16
+ ret i16 %E
+}
+
+define i32 @outer4(i8* %ptr) {
+ %C = call i32 @inner4(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner4
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner4(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer5(i8* %ptr) {
+ %C = call i32 @inner5(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner5
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner5(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer6(i16* %ptr) {
+ %C = call i32 @inner6(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner6
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner6(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer7(i16* %ptr) {
+ %C = call i32 @inner7(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner7
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner7(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i32
+ ret i32 %E
+}
+
+define i64 @outer8(i8* %ptr) {
+ %C = call i64 @inner8(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner8
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner8(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer9(i8* %ptr) {
+ %C = call i64 @inner9(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner9
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner9(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer10(i16* %ptr) {
+ %C = call i64 @inner10(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner10
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner10(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer11(i16* %ptr) {
+ %C = call i64 @inner11(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner11
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner11(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer12(i32* %ptr) {
+ %C = call i64 @inner12(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner12
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner12(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = zext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer13(i32* %ptr) {
+ %C = call i64 @inner13(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner13
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner13(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = sext i32 %L to i64
+ ret i64 %E
+}