------------------------------------------------------------------------
r293088 | tnorthover | 2017-01-25 12:58:26 -0800 (Wed, 25 Jan 2017) | 5 lines
SDag: fix how initial loads are formed when splitting vector ops.
Later code expects the vector loads produced to be directly
concatenable, which means we shouldn't pad anything except the last load
produced with UNDEF.
------------------------------------------------------------------------
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_40@293103
91177308-0d34-0410-b5e6-
96231b3b80d8
LD->getPointerInfo().getWithOffset(Offset),
MinAlign(Align, Increment), MMOFlags, AAInfo);
LdChain.push_back(L.getValue(1));
- if (L->getValueType(0).isVector()) {
+ if (L->getValueType(0).isVector() && NewVTWidth >= LdWidth) {
+ // Later code assumes the vector loads produced will be mergeable, so we
+ // must pad the final entry up to the previous width. Scalars are
+ // combined separately.
SmallVector<SDValue, 16> Loads;
Loads.push_back(L);
unsigned size = L->getValueSizeInBits(0);
%zlA = zext <4 x i8> %lA to <4 x i32>
ret <4 x i32> %zlA
}
+
+; CHECK-LABEL: test_silly_load:
+; CHECK: ldr {{r[0-9]+}}, [r0, #24]
+; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0:128]!
+; CHECK: vldr d{{[0-9]+}}, [r0]
+
+define void @test_silly_load(<28 x i8>* %addr) {
+ load volatile <28 x i8>, <28 x i8>* %addr
+ ret void
+}