/// swifterror attribute.
bool isSwiftError() const;
- /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
+ /// Strip off pointer casts, all-zero GEPs and address space casts.
///
/// Returns the original uncasted value. If this is called on a non-pointer
/// value, it returns 'this'.
const Value *stripPointerCasts() const;
Value *stripPointerCasts() {
return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCasts());
+ static_cast<const Value *>(this)->stripPointerCasts());
}
- /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases
+ /// Strip off pointer casts, all-zero GEPs and address space casts
/// but ensures the representation of the result stays the same.
///
/// Returns the original uncasted value with the same representation. If this
->stripPointerCastsSameRepresentation());
}
- /// Strip off pointer casts, all-zero GEPs, aliases and invariant group
- /// info.
+ /// Strip off pointer casts, all-zero GEPs and invariant group info.
///
/// Returns the original uncasted value. If this is called on a non-pointer
/// value, it returns 'this'. This function should be used only in
/// Alias analysis.
const Value *stripPointerCastsAndInvariantGroups() const;
Value *stripPointerCastsAndInvariantGroups() {
- return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCastsAndInvariantGroups());
- }
-
- /// Strip off pointer casts and all-zero GEPs.
- ///
- /// Returns the original uncasted value. If this is called on a non-pointer
- /// value, it returns 'this'.
- const Value *stripPointerCastsNoFollowAliases() const;
- Value *stripPointerCastsNoFollowAliases() {
- return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCastsNoFollowAliases());
+ return const_cast<Value *>(static_cast<const Value *>(this)
+ ->stripPointerCastsAndInvariantGroups());
}
/// Strip off pointer casts and all-constant inbounds GEPs.
}
/// Strip the pointer casts, but preserve the address space information.
-Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
+Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) {
assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
auto *OldPtrTy = cast<PointerType>(Ptr->getType());
- Ptr = cast<Constant>(Ptr->stripPointerCastsNoFollowAliases());
+ Ptr = cast<Constant>(Ptr->stripPointerCasts());
auto *NewPtrTy = cast<PointerType>(Ptr->getType());
ElemTy = NewPtrTy->getPointerElementType();
auto *CalledValue = CS.getCalledValue();
auto *CalledFunction = CS.getCalledFunction();
if (CalledValue && !CalledFunction) {
- CalledValue = CalledValue->stripPointerCastsNoFollowAliases();
+ CalledValue = CalledValue->stripPointerCasts();
// Stripping pointer casts can reveal a called function.
CalledFunction = dyn_cast<Function>(CalledValue);
}
// FIXME: consult devirt?
// Do not follow aliases, otherwise we could inadvertently follow
// dso_preemptable aliases or aliases with interposable linkage.
- const GlobalValue *Callee = dyn_cast<GlobalValue>(
- CS.getCalledValue()->stripPointerCastsNoFollowAliases());
+ const GlobalValue *Callee =
+ dyn_cast<GlobalValue>(CS.getCalledValue()->stripPointerCasts());
if (!Callee) {
US.updateRange(UnknownRange);
return false;
/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
GlobalValue *llvm::ExtractTypeInfo(Value *V) {
- V = V->stripPointerCastsNoFollowAliases();
+ V = V->stripPointerCasts();
GlobalValue *GV = dyn_cast<GlobalValue>(V);
GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
"expected llvm.used to be an array type");
if (const auto *A = cast<ConstantArray>(LU->getInitializer())) {
for (const Value *Op : A->operands()) {
- const auto *GV =
- cast<GlobalValue>(Op->stripPointerCastsNoFollowAliases());
+ const auto *GV = cast<GlobalValue>(Op->stripPointerCasts());
// Global symbols with internal or private linkage are not visible to
// the linker, and thus would cause an error when the linker tried to
// preserve the symbol due to the `/include:` directive.
return false;
// Relative pointers do not need to be dynamically relocated.
- if (auto *LHSGV = dyn_cast<GlobalValue>(
- LHSOp0->stripPointerCastsNoFollowAliases()))
- if (auto *RHSGV = dyn_cast<GlobalValue>(
- RHSOp0->stripPointerCastsNoFollowAliases()))
+ if (auto *LHSGV = dyn_cast<GlobalValue>(LHSOp0->stripPointerCasts()))
+ if (auto *RHSGV = dyn_cast<GlobalValue>(RHSOp0->stripPointerCasts()))
if (LHSGV->isDSOLocal() && RHSGV->isDSOLocal())
return false;
}
const ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
for (Value *Op : Init->operands()) {
- GlobalValue *G = cast<GlobalValue>(Op->stripPointerCastsNoFollowAliases());
+ GlobalValue *G = cast<GlobalValue>(Op->stripPointerCasts());
Set.insert(G);
}
return GV;
// Various metrics for how much to strip off of pointers.
enum PointerStripKind {
PSK_ZeroIndices,
- PSK_ZeroIndicesAndAliases,
- PSK_ZeroIndicesAndAliasesSameRepresentation,
- PSK_ZeroIndicesAndAliasesAndInvariantGroups,
+ PSK_ZeroIndicesSameRepresentation,
+ PSK_ZeroIndicesAndInvariantGroups,
PSK_InBoundsConstantIndices,
PSK_InBounds
};
do {
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
- case PSK_ZeroIndicesAndAliases:
- case PSK_ZeroIndicesAndAliasesSameRepresentation:
- case PSK_ZeroIndicesAndAliasesAndInvariantGroups:
case PSK_ZeroIndices:
+ case PSK_ZeroIndicesSameRepresentation:
+ case PSK_ZeroIndicesAndInvariantGroups:
if (!GEP->hasAllZeroIndices())
return V;
break;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
- } else if (StripKind != PSK_ZeroIndicesAndAliasesSameRepresentation &&
+ } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
// TODO: If we know an address space cast will not change the
// representation we could look through it here as well.
V = cast<Operator>(V)->getOperand(0);
- } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
- if (StripKind == PSK_ZeroIndices || GA->isInterposable())
- return V;
- V = GA->getAliasee();
} else {
if (const auto *Call = dyn_cast<CallBase>(V)) {
if (const Value *RV = Call->getReturnedArgOperand()) {
// The result of launder.invariant.group must alias it's argument,
// but it can't be marked with returned attribute, that's why it needs
// special case.
- if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
+ if (StripKind == PSK_ZeroIndicesAndInvariantGroups &&
(Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
V = Call->getArgOperand(0);
} // end anonymous namespace
const Value *Value::stripPointerCasts() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
+ return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
}
const Value *Value::stripPointerCastsSameRepresentation() const {
- return stripPointerCastsAndOffsets<
- PSK_ZeroIndicesAndAliasesSameRepresentation>(this);
-}
-
-const Value *Value::stripPointerCastsNoFollowAliases() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(this);
}
const Value *Value::stripInBoundsConstantOffsets() const {
}
const Value *Value::stripPointerCastsAndInvariantGroups() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndInvariantGroups>(
- this);
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndInvariantGroups>(this);
}
const Value *
Assert(InitArray, "wrong initalizer for intrinsic global variable",
Init);
for (Value *Op : InitArray->operands()) {
- Value *V = Op->stripPointerCastsNoFollowAliases();
+ Value *V = Op->stripPointerCasts();
Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
isa<GlobalAlias>(V),
"invalid llvm.used member", V);
break; // Found a null terminator, skip the rest.
Constant *Associated = CS->getOperand(2);
- Associated = cast<Constant>(Associated->stripPointerCastsNoFollowAliases());
+ Associated = cast<Constant>(Associated->stripPointerCasts());
DtorFuncs[PriorityValue][Associated].push_back(DtorFunc);
}
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) {
- Value *Operand = Inits->getOperand(i)->stripPointerCastsNoFollowAliases();
+ Value *Operand = Inits->getOperand(i)->stripPointerCasts();
GlobalValue *GV = cast<GlobalValue>(Operand);
UsedValues.insert(GV);
}
}
static int compareNames(Constant *const *A, Constant *const *B) {
- Value *AStripped = (*A)->stripPointerCastsNoFollowAliases();
- Value *BStripped = (*B)->stripPointerCastsNoFollowAliases();
+ Value *AStripped = (*A)->stripPointerCasts();
+ Value *BStripped = (*B)->stripPointerCasts();
return AStripped->getName().compare(BStripped->getName());
}
+++ /dev/null
-; RUN: opt -S -instcombine -o - %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v64:64:64-v128:128:128-a0:0:64"
-
-
-
-; Cases that should be bitcast
-
-; Test cast between scalars with same bit sizes
-@alias_i32_to_f32 = alias float (float), bitcast (i32 (i32)* @func_i32 to float (float)*)
-
-; Test cast between vectors with same number of elements and bit sizes
-@alias_v2i32_to_v2f32 = alias <2 x float> (<2 x float>), bitcast (<2 x i32> (<2 x i32>)* @func_v2i32 to <2 x float> (<2 x float>)*)
-
-; Test cast from vector to scalar with same number of bits
-@alias_v2f32_to_i64 = alias <2 x float> (<2 x float>), bitcast (i64 (i64)* @func_i64 to <2 x float> (<2 x float>)*)
-
-; Test cast from scalar to vector with same number of bits
-@alias_i64_to_v2f32 = alias i64 (i64), bitcast (<2 x float> (<2 x float>)* @func_v2f32 to i64 (i64)*)
-
-; Test cast between vectors of pointers
-@alias_v2i32p_to_v2i64p = alias <2 x i64*> (<2 x i64*>), bitcast (<2 x i32*> (<2 x i32*>)* @func_v2i32p to <2 x i64*> (<2 x i64*>)*)
-
-
-; Cases that should be invalid and unchanged
-
-; Test cast between scalars with different bit sizes
-@alias_i64_to_f32 = alias float (float), bitcast (i64 (i64)* @func_i64 to float (float)*)
-
-; Test cast between vectors with different bit sizes but the
-; same number of elements
-@alias_v2i64_to_v2f32 = alias <2 x float> (<2 x float>), bitcast (<2 x i64> (<2 x i64>)* @func_v2i64 to <2 x float> (<2 x float>)*)
-
-; Test cast between vectors with same number of bits and different
-; numbers of elements
-@alias_v2i32_to_v4f32 = alias <4 x float> (<4 x float>), bitcast (<2 x i32> (<2 x i32>)* @func_v2i32 to <4 x float> (<4 x float>)*)
-
-; Test cast between scalar and vector with different number of bits
-@alias_i64_to_v4f32 = alias i64 (i64), bitcast (<4 x float> (<4 x float>)* @func_v4f32 to i64 (i64)*)
-
-; Test cast between vector and scalar with different number of bits
-@alias_v4f32_to_i64 = alias <4 x float> (<4 x float>), bitcast (i64 (i64)* @func_i64 to <4 x float> (<4 x float>)*)
-
-; Test cast from scalar to vector of pointers with same number of bits
-; We don't know the pointer size at this point, so this can't be done
-@alias_i64_to_v2i32p = alias i64 (i64), bitcast (<2 x i32*> (<2 x i32*>)* @func_v2i32p to i64 (i64)*)
-
-; Test cast between vector of pointers and scalar with different number of bits
-@alias_v4i32p_to_i64 = alias <4 x i32*> (<4 x i32*>), bitcast (i64 (i64)* @func_i64 to <4 x i32*> (<4 x i32*>)*)
-
-
-
-define internal <2 x i32> @func_v2i32(<2 x i32> %v) noinline nounwind {
-entry:
- ret <2 x i32> %v
-}
-
-define internal <2 x float> @func_v2f32(<2 x float> %v) noinline nounwind {
-entry:
- ret <2 x float> %v
-}
-
-define internal <4 x float> @func_v4f32(<4 x float> %v) noinline nounwind {
-entry:
- ret <4 x float> %v
-}
-
-define internal i32 @func_i32(i32 %v) noinline nounwind {
-entry:
- ret i32 %v
-}
-
-define internal i64 @func_i64(i64 %v) noinline nounwind {
-entry:
- ret i64 %v
-}
-
-define internal <2 x i64> @func_v2i64(<2 x i64> %v) noinline nounwind {
-entry:
- ret <2 x i64> %v
-}
-
-define internal <2 x i32*> @func_v2i32p(<2 x i32*> %v) noinline nounwind {
-entry:
- ret <2 x i32*> %v
-}
-
-; Valid cases, only bitcast for argument / return type and call underlying function
-
-; Sizes match, should only bitcast
-define void @bitcast_alias_scalar(float* noalias %source, float* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_scalar
-; CHECK: bitcast float* %source to i32*
-; CHECK: load i32, i32*
-; CHECK-NOT: fptoui
-; CHECK-NOT: uitofp
-; CHECK: bitcast float* %dest to i32*
-; CHECK: store i32
- %tmp = load float, float* %source, align 8
- %call = call float @alias_i32_to_f32(float %tmp) nounwind
- store float %call, float* %dest, align 8
- ret void
-}
-
-; Sizes match, should only bitcast
-define void @bitcast_alias_vector(<2 x float>* noalias %source, <2 x float>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_vector
-; CHECK: bitcast <2 x float>* %source to <2 x i32>*
-; CHECK: load <2 x i32>, <2 x i32>*
-; CHECK-NOT: fptoui
-; CHECK-NOT: uitofp
-; CHECK: bitcast <2 x float>* %dest to <2 x i32>*
-; CHECK: store <2 x i32>
- %tmp = load <2 x float>, <2 x float>* %source, align 8
- %call = call <2 x float> @alias_v2i32_to_v2f32(<2 x float> %tmp) nounwind
- store <2 x float> %call, <2 x float>* %dest, align 8
- ret void
-}
-
-; Sizes match, should only bitcast
-define void @bitcast_alias_vector_scalar_same_size(<2 x float>* noalias %source, <2 x float>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_vector_scalar_same_size
-; CHECK: bitcast <2 x float>* %source to i64*
-; CHECK: load i64, i64*
-; CHECK: %call = call i64 @func_i64
-; CHECK: bitcast <2 x float>* %dest to i64*
-; CHECK: store i64
- %tmp = load <2 x float>, <2 x float>* %source, align 8
- %call = call <2 x float> @alias_v2f32_to_i64(<2 x float> %tmp) nounwind
- store <2 x float> %call, <2 x float>* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_scalar_vector_same_size(i64* noalias %source, i64* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_scalar_vector_same_size
-; CHECK: bitcast i64* %source to <2 x float>*
-; CHECK: load <2 x float>, <2 x float>*
-; CHECK: call <2 x float> @func_v2f32
-; CHECK: bitcast i64* %dest to <2 x float>*
-; CHECK: store <2 x float>
- %tmp = load i64, i64* %source, align 8
- %call = call i64 @alias_i64_to_v2f32(i64 %tmp) nounwind
- store i64 %call, i64* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_vector_ptrs_same_size(<2 x i64*>* noalias %source, <2 x i64*>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_vector_ptrs_same_size
-; CHECK: bitcast <2 x i64*>* %source to <2 x i32*>*
-; CHECK: load <2 x i32*>, <2 x i32*>*
-; CHECK: call <2 x i32*> @func_v2i32p
-; CHECK: bitcast <2 x i64*>* %dest to <2 x i32*>*
-; CHECK: store <2 x i32*>
- %tmp = load <2 x i64*>, <2 x i64*>* %source, align 8
- %call = call <2 x i64*> @alias_v2i32p_to_v2i64p(<2 x i64*> %tmp) nounwind
- store <2 x i64*> %call, <2 x i64*>* %dest, align 8
- ret void
-}
-
-; Invalid cases:
-
-define void @bitcast_alias_mismatch_scalar_size(float* noalias %source, float* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_mismatch_scalar_size
-; CHECK-NOT: fptoui
-; CHECK: @alias_i64_to_f32
-; CHECK-NOT: uitofp
- %tmp = load float, float* %source, align 8
- %call = call float @alias_i64_to_f32(float %tmp) nounwind
- store float %call, float* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_mismatch_vector_element_and_bit_size(<2 x float>* noalias %source, <2 x float>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_mismatch_vector_element_and_bit_size
-; CHECK-NOT: fptoui <2 x float> %tmp to <2 x i64>
-; CHECK: @alias_v2i64_to_v2f32
-; CHECK-NOT: uitofp <2 x i64> %call to <2 x float>
- %tmp = load <2 x float>, <2 x float>* %source, align 8
- %call = call <2 x float> @alias_v2i64_to_v2f32(<2 x float> %tmp) nounwind
- store <2 x float> %call, <2 x float>* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_vector_mismatched_number_elements(<4 x float>* noalias %source, <4 x float>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_vector_mismatched_number_elements
-; CHECK: %call = call <4 x float> @alias_v2i32_to_v4f32
- %tmp = load <4 x float>, <4 x float>* %source, align 8
- %call = call <4 x float> @alias_v2i32_to_v4f32(<4 x float> %tmp) nounwind
- store <4 x float> %call, <4 x float>* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_vector_scalar_mismatched_bit_size(<4 x float>* noalias %source, <4 x float>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_vector_scalar_mismatched_bit_size
-; CHECK: %call = call <4 x float> @alias_v4f32_to_i64
- %tmp = load <4 x float>, <4 x float>* %source, align 8
- %call = call <4 x float> @alias_v4f32_to_i64(<4 x float> %tmp) nounwind
- store <4 x float> %call, <4 x float>* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_vector_ptrs_scalar_mismatched_bit_size(<4 x i32*>* noalias %source, <4 x i32*>* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_vector_ptrs_scalar_mismatched_bit_size
-; CHECK: @alias_v4i32p_to_i64
- %tmp = load <4 x i32*>, <4 x i32*>* %source, align 8
- %call = call <4 x i32*> @alias_v4i32p_to_i64(<4 x i32*> %tmp) nounwind
- store <4 x i32*> %call, <4 x i32*>* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_scalar_vector_ptrs_same_size(i64* noalias %source, i64* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_scalar_vector_ptrs_same_size
-; CHECK: @alias_i64_to_v2i32p
- %tmp = load i64, i64* %source, align 8
- %call = call i64 @alias_i64_to_v2i32p(i64 %tmp) nounwind
- store i64 %call, i64* %dest, align 8
- ret void
-}
-
-define void @bitcast_alias_scalar_vector_mismatched_bit_size(i64* noalias %source, i64* noalias %dest) nounwind {
-entry:
-; CHECK-LABEL: @bitcast_alias_scalar_vector_mismatched_bit_size
-; CHECK: call i64 @alias_i64_to_v4f32
- %tmp = load i64, i64* %source, align 8
- %call = call i64 @alias_i64_to_v4f32(i64 %tmp) nounwind
- store i64 %call, i64* %dest, align 8
- ret void
-}
-
--- /dev/null
+; RUN: opt -S -instcombine -o - %s | FileCheck %s
+target datalayout = "e-p:32:32:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v64:64:64-v128:128:128-a0:0:64"
+
+define internal <2 x i32> @func_v2i32(<2 x i32> %v) noinline nounwind {
+entry:
+ ret <2 x i32> %v
+}
+
+define internal <2 x float> @func_v2f32(<2 x float> %v) noinline nounwind {
+entry:
+ ret <2 x float> %v
+}
+
+define internal <4 x float> @func_v4f32(<4 x float> %v) noinline nounwind {
+entry:
+ ret <4 x float> %v
+}
+
+define internal i32 @func_i32(i32 %v) noinline nounwind {
+entry:
+ ret i32 %v
+}
+
+define internal i64 @func_i64(i64 %v) noinline nounwind {
+entry:
+ ret i64 %v
+}
+
+define internal <2 x i64> @func_v2i64(<2 x i64> %v) noinline nounwind {
+entry:
+ ret <2 x i64> %v
+}
+
+define internal <2 x i32*> @func_v2i32p(<2 x i32*> %v) noinline nounwind {
+entry:
+ ret <2 x i32*> %v
+}
+
+; Valid cases, only bitcast for argument / return type and call underlying function
+
+; Test cast between scalars with same bit sizes
+; Sizes match, should only bitcast
+define void @bitcast_scalar(float* noalias %source, float* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_scalar
+; CHECK: bitcast float* %source to i32*
+; CHECK: load i32, i32*
+; CHECK-NOT: fptoui
+; CHECK-NOT: uitofp
+; CHECK: bitcast float* %dest to i32*
+; CHECK: store i32
+ %tmp = load float, float* %source, align 8
+ %call = call float bitcast (i32 (i32)* @func_i32 to float (float)*)(float %tmp) nounwind
+ store float %call, float* %dest, align 8
+ ret void
+}
+
+; Test cast between vectors with same number of elements and bit sizes
+; Sizes match, should only bitcast
+define void @bitcast_vector(<2 x float>* noalias %source, <2 x float>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_vector
+; CHECK: bitcast <2 x float>* %source to <2 x i32>*
+; CHECK: load <2 x i32>, <2 x i32>*
+; CHECK-NOT: fptoui
+; CHECK-NOT: uitofp
+; CHECK: bitcast <2 x float>* %dest to <2 x i32>*
+; CHECK: store <2 x i32>
+ %tmp = load <2 x float>, <2 x float>* %source, align 8
+ %call = call <2 x float> bitcast (<2 x i32> (<2 x i32>)* @func_v2i32 to <2 x float> (<2 x float>)*)(<2 x float> %tmp) nounwind
+ store <2 x float> %call, <2 x float>* %dest, align 8
+ ret void
+}
+
+; Test cast from vector to scalar with same number of bits
+; Sizes match, should only bitcast
+define void @bitcast_vector_scalar_same_size(<2 x float>* noalias %source, <2 x float>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_vector_scalar_same_size
+; CHECK: bitcast <2 x float>* %source to i64*
+; CHECK: load i64, i64*
+; CHECK: %call = call i64 @func_i64
+; CHECK: bitcast <2 x float>* %dest to i64*
+; CHECK: store i64
+ %tmp = load <2 x float>, <2 x float>* %source, align 8
+ %call = call <2 x float> bitcast (i64 (i64)* @func_i64 to <2 x float> (<2 x float>)*)(<2 x float> %tmp) nounwind
+ store <2 x float> %call, <2 x float>* %dest, align 8
+ ret void
+}
+
+; Test cast from scalar to vector with same number of bits
+define void @bitcast_scalar_vector_same_size(i64* noalias %source, i64* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_scalar_vector_same_size
+; CHECK: bitcast i64* %source to <2 x float>*
+; CHECK: load <2 x float>, <2 x float>*
+; CHECK: call <2 x float> @func_v2f32
+; CHECK: bitcast i64* %dest to <2 x float>*
+; CHECK: store <2 x float>
+ %tmp = load i64, i64* %source, align 8
+ %call = call i64 bitcast (<2 x float> (<2 x float>)* @func_v2f32 to i64 (i64)*)(i64 %tmp) nounwind
+ store i64 %call, i64* %dest, align 8
+ ret void
+}
+
+; Test cast between vectors of pointers
+define void @bitcast_vector_ptrs_same_size(<2 x i64*>* noalias %source, <2 x i64*>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_vector_ptrs_same_size
+; CHECK: bitcast <2 x i64*>* %source to <2 x i32*>*
+; CHECK: load <2 x i32*>, <2 x i32*>*
+; CHECK: call <2 x i32*> @func_v2i32p
+; CHECK: bitcast <2 x i64*>* %dest to <2 x i32*>*
+; CHECK: store <2 x i32*>
+ %tmp = load <2 x i64*>, <2 x i64*>* %source, align 8
+ %call = call <2 x i64*> bitcast (<2 x i32*> (<2 x i32*>)* @func_v2i32p to <2 x i64*> (<2 x i64*>)*)(<2 x i64*> %tmp) nounwind
+ store <2 x i64*> %call, <2 x i64*>* %dest, align 8
+ ret void
+}
+
+; Invalid cases:
+
+; Test cast between scalars with different bit sizes
+define void @bitcast_mismatch_scalar_size(float* noalias %source, float* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_mismatch_scalar_size
+; CHECK-NOT: fptoui
+; CHECK: call float bitcast
+; CHECK-NOT: uitofp
+ %tmp = load float, float* %source, align 8
+ %call = call float bitcast (i64 (i64)* @func_i64 to float (float)*)(float %tmp) nounwind
+ store float %call, float* %dest, align 8
+ ret void
+}
+
+; Test cast between vectors with different bit sizes but the
+; same number of elements
+define void @bitcast_mismatch_vector_element_and_bit_size(<2 x float>* noalias %source, <2 x float>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_mismatch_vector_element_and_bit_size
+; CHECK-NOT: fptoui <2 x float> %tmp to <2 x i64>
+; CHECK: call <2 x float> bitcast
+; CHECK-NOT: uitofp <2 x i64> %call to <2 x float>
+ %tmp = load <2 x float>, <2 x float>* %source, align 8
+ %call = call <2 x float> bitcast (<2 x i64> (<2 x i64>)* @func_v2i64 to <2 x float> (<2 x float>)*)(<2 x float> %tmp) nounwind
+ store <2 x float> %call, <2 x float>* %dest, align 8
+ ret void
+}
+
+; Test cast between vectors with same number of bits and different
+; numbers of elements
+define void @bitcast_vector_mismatched_number_elements(<4 x float>* noalias %source, <4 x float>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_vector_mismatched_number_elements
+; CHECK: %call = call <4 x float> bitcast
+ %tmp = load <4 x float>, <4 x float>* %source, align 8
+ %call = call <4 x float> bitcast (<2 x i32> (<2 x i32>)* @func_v2i32 to <4 x float> (<4 x float>)*)(<4 x float> %tmp) nounwind
+ store <4 x float> %call, <4 x float>* %dest, align 8
+ ret void
+}
+
+; Test cast between vector and scalar with different number of bits
+define void @bitcast_vector_scalar_mismatched_bit_size(<4 x float>* noalias %source, <4 x float>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_vector_scalar_mismatched_bit_size
+; CHECK: %call = call <4 x float> bitcast
+ %tmp = load <4 x float>, <4 x float>* %source, align 8
+ %call = call <4 x float> bitcast (i64 (i64)* @func_i64 to <4 x float> (<4 x float>)*)(<4 x float> %tmp) nounwind
+ store <4 x float> %call, <4 x float>* %dest, align 8
+ ret void
+}
+
+; Test cast between vector of pointers and scalar with different number of bits
+define void @bitcast_vector_ptrs_scalar_mismatched_bit_size(<4 x i32*>* noalias %source, <4 x i32*>* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_vector_ptrs_scalar_mismatched_bit_size
+; CHECK: call <4 x i32*> bitcast
+ %tmp = load <4 x i32*>, <4 x i32*>* %source, align 8
+ %call = call <4 x i32*> bitcast (i64 (i64)* @func_i64 to <4 x i32*> (<4 x i32*>)*)(<4 x i32*> %tmp) nounwind
+ store <4 x i32*> %call, <4 x i32*>* %dest, align 8
+ ret void
+}
+
+; Test cast from scalar to vector of pointers with same number of bits
+; We don't know the pointer size at this point, so this can't be done
+define void @bitcast_scalar_vector_ptrs_same_size(i64* noalias %source, i64* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_scalar_vector_ptrs_same_size
+; CHECK: call i64 bitcast
+ %tmp = load i64, i64* %source, align 8
+ %call = call i64 bitcast (<2 x i32*> (<2 x i32*>)* @func_v2i32p to i64 (i64)*)(i64 %tmp) nounwind
+ store i64 %call, i64* %dest, align 8
+ ret void
+}
+
+; Test cast between scalar and vector with different number of bits
+define void @bitcast_scalar_vector_mismatched_bit_size(i64* noalias %source, i64* noalias %dest) nounwind {
+entry:
+; CHECK-LABEL: @bitcast_scalar_vector_mismatched_bit_size
+; CHECK: call i64 bitcast
+ %tmp = load i64, i64* %source, align 8
+ %call = call i64 bitcast (<4 x float> (<4 x float>)* @func_v4f32 to i64 (i64)*)(i64 %tmp) nounwind
+ store i64 %call, i64* %dest, align 8
+ ret void
+}
+
--- /dev/null
+; RUN: opt -S -instcombine -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-linux-android10000"
+
+@x.hwasan = private global { [3 x i32], [4 x i8] } { [3 x i32] [i32 42, i32 57, i32 10], [4 x i8] c"\00\00\00\87" }, align 16
+@x = alias [3 x i32], inttoptr (i64 add (i64 ptrtoint ({ [3 x i32], [4 x i8] }* @x.hwasan to i64), i64 -8718968878589280256) to [3 x i32]*)
+
+define i32 @f(i64 %i) {
+entry:
+ ; CHECK: getelementptr inbounds [3 x i32], [3 x i32]* @x
+ %arrayidx = getelementptr inbounds [3 x i32], [3 x i32]* @x, i64 0, i64 %i
+ %0 = load i32, i32* %arrayidx
+ ret i32 %0
+}
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load %struct._IO_FILE*, %struct._IO_FILE** @stderr, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = call i64 @__fwrite_alias(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i64 7, i64 1, %struct._IO_FILE* [[TMP0]])
+; CHECK-NEXT: [[TMP1:%.*]] = call i64 @fwrite(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i64 7, i64 1, %struct._IO_FILE* [[TMP0]])
; CHECK-NEXT: ret void
;
entry: