return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
-int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
unsigned Alignment, unsigned AddressSpace) {
- std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
+ auto LT = TLI->getTypeLegalizationCost(DL, Ty);
if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
- Src->isVectorTy() && Alignment != 16 &&
- Src->getVectorElementType()->isIntegerTy(64)) {
- // Unaligned stores are extremely inefficient. We don't split
- // unaligned v2i64 stores because the negative impact that has shown in
- // practice on inlined memcpy code.
- // We make v2i64 stores expensive so that we will only vectorize if there
+ LT.second.is128BitVector() && Alignment < 16) {
+ // Unaligned stores are extremely inefficient. We don't split all
+ // unaligned 128-bit stores because the negative impact that has shown in
+ // practice on inlined block copy code.
+ // We make such stores expensive so that we will only vectorize if there
// are 6 other instructions getting vectorized.
- int AmortizationCost = 6;
+ const int AmortizationCost = 6;
return LT.first * 2 * AmortizationCost;
}
- if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
- Src->getVectorNumElements() < 8) {
+ if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
+ Ty->getVectorNumElements() < 8) {
// We scalarize the loads/stores because there is not v.4b register and we
// have to promote the elements to v.4h.
- unsigned NumVecElts = Src->getVectorNumElements();
+ unsigned NumVecElts = Ty->getVectorNumElements();
unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
// We generate 2 instructions per vector element.
return NumVectorizableInstsToAmortize * NumVecElts * 2;
-; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s
-; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
+; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
; CHECK-LABEL: getMemoryOpCost
; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
define void @getMemoryOpCost() {
- ; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On
- ; Cyclone, for example, such stores should be expensive because we don't
- ; split them and misaligned 16b stores have bad performance.
- ;
- ; CHECK: cost of 1 {{.*}} store
- ; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store
+ ; If FeatureSlowMisaligned128Store is set, we penalize 128-bit stores.
+ ; The unlegalized 256-bit stores are further penalized when legalized down
+ ; to 128-bit stores.
+
+ ; CHECK: cost of 2 for {{.*}} store <4 x i64>
+ ; SLOW_MISALIGNED_128_STORE: cost of 24 for {{.*}} store <4 x i64>
+ store <4 x i64> undef, <4 x i64> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <8 x i32>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x i32>
+ store <8 x i32> undef, <8 x i32> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <16 x i16>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x i16>
+ store <16 x i16> undef, <16 x i16> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <32 x i8>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <32 x i8>
+ store <32 x i8> undef, <32 x i8> * undef
+
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <4 x double>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <4 x double>
+ store <4 x double> undef, <4 x double> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <8 x float>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x float>
+ store <8 x float> undef, <8 x float> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <16 x half>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x half>
+ store <16 x half> undef, <16 x half> * undef
+
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <2 x i64>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x i64>
store <2 x i64> undef, <2 x i64> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <4 x i32>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x i32>
+ store <4 x i32> undef, <4 x i32> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <8 x i16>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x i16>
+ store <8 x i16> undef, <8 x i16> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <16 x i8>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <16 x i8>
+ store <16 x i8> undef, <16 x i8> * undef
+
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <2 x double>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x double>
+ store <2 x double> undef, <2 x double> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <4 x float>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x float>
+ store <4 x float> undef, <4 x float> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <8 x half>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x half>
+ store <8 x half> undef, <8 x half> * undef
; We scalarize the loads/stores because there is no vector register name for
; these types (they get extended to v.4h/v.2s).