if (!LoadValueTy.isScalar())
return false;
+ // Most architectures are going to legalize <s8 loads into at least a 1 byte
+ // load, and the MMOs can only describe memory accesses in multiples of bytes.
+ // If we try to perform extload combining on those, we can end up with
+ // %a(s8) = extload %ptr (load 1 byte from %ptr)
+ // ... which is an illegal extload instruction.
+ if (LoadValueTy.getSizeInBits() < 8)
+ return false;
+
// Find the preferred type aside from the any-extends (unless it's the only
// one) and non-extending ops. We'll emit an extending load to that type and
// and emit a variant of (extend (trunc X)) for the others according to the
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+
+# Check we don't try to combine a load of < s8 as that will end up creating a illegal non-extending load.
+--- |
+ define i8 @test(i1* %ptr) {
+ ret i8 undef
+ }
+
+...
+---
+name: test
+alignment: 2
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x0
+
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.ptr)
+ ; CHECK: [[ZEXT:%[0-9]+]]:_(s8) = G_ZEXT [[LOAD]](s1)
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s8)
+ ; CHECK: $w0 = COPY [[ANYEXT]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(p0) = COPY $x0
+ %1:_(s1) = G_LOAD %0(p0) :: (load 1 from %ir.ptr)
+ %2:_(s8) = G_ZEXT %1(s1)
+ %3:_(s32) = G_ANYEXT %2(s8)
+ $w0 = COPY %3(s32)
+ RET_ReallyLR implicit $w0
+
+...