From a4e52312e828520f6bc93cdce8335d8bd07b69b7 Mon Sep 17 00:00:00 2001 From: Artur Pilipenko <apilipenko@azulsystems.com> Date: Wed, 1 Mar 2017 18:12:29 +0000 Subject: [PATCH] [DAGCombiner] Support {a|s}ext, {a|z|s}ext load nodes in load combine Resubmit r295336 after the bug with non-zero offset patterns on BE targets is fixed (r296336). Support {a|s}ext, {a|z|s}ext load nodes as a part of load combine patters. Reviewed By: filcab Differential Revision: https://reviews.llvm.org/D29591 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@296651 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 27 ++-- .../AArch64/load-combine-big-endian.ll | 6 +- test/CodeGen/AArch64/load-combine.ll | 7 +- test/CodeGen/AMDGPU/insert_vector_elt.ll | 31 ++-- test/CodeGen/ARM/fp16-promote.ll | 12 +- test/CodeGen/ARM/load-combine-big-endian.ll | 11 +- test/CodeGen/ARM/load-combine.ll | 12 +- test/CodeGen/PowerPC/ppc64le-aggregates.ll | 5 +- test/CodeGen/X86/load-combine.ll | 133 ++++-------------- 9 files changed, 74 insertions(+), 170 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index aa84850ab86..547655f9f7b 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4506,6 +4506,8 @@ const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, : calculateByteProvider(Op->getOperand(0), Index - ByteShift, Depth + 1); } + case ISD::ANY_EXTEND: + case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: { SDValue NarrowOp = Op->getOperand(0); unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); @@ -4513,22 +4515,32 @@ const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, return None; uint64_t NarrowByteWidth = NarrowBitWidth / 8; - return Index >= NarrowByteWidth - ? ByteProvider::getConstantZero() - : calculateByteProvider(NarrowOp, Index, Depth + 1); + if (Index >= NarrowByteWidth) + return Op.getOpcode() == ISD::ZERO_EXTEND + ? Optional<ByteProvider>(ByteProvider::getConstantZero()) + : None; + else + return calculateByteProvider(NarrowOp, Index, Depth + 1); } case ISD::BSWAP: return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1, Depth + 1); case ISD::LOAD: { auto L = cast<LoadSDNode>(Op.getNode()); + if (L->isVolatile() || L->isIndexed()) + return None; - // TODO: support ext loads - if (L->isVolatile() || L->isIndexed() || - L->getExtensionType() != ISD::NON_EXTLOAD) + unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits(); + if (NarrowBitWidth % 8 != 0) return None; + uint64_t NarrowByteWidth = NarrowBitWidth / 8; - return ByteProvider::getMemory(L, Index); + if (Index >= NarrowByteWidth) + return L->getExtensionType() == ISD::ZEXTLOAD + ? Optional<ByteProvider>(ByteProvider::getConstantZero()) + : None; + else + return ByteProvider::getMemory(L, Index); } } @@ -4617,7 +4629,6 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { LoadSDNode *L = P->Load; assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() && - (L->getExtensionType() == ISD::NON_EXTLOAD) && "Must be enforced by calculateByteProvider"); assert(L->getOffset().isUndef() && "Unindexed load must have undef offset"); diff --git a/test/CodeGen/AArch64/load-combine-big-endian.ll b/test/CodeGen/AArch64/load-combine-big-endian.ll index e60e86a4052..918ceaeb1b4 100644 --- a/test/CodeGen/AArch64/load-combine-big-endian.ll +++ b/test/CodeGen/AArch64/load-combine-big-endian.ll @@ -336,11 +336,8 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) { ; (i32) p[1] | (sext(p[0] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh w8, [x0] -; CHECK-NEXT: ldrh w0, [x0, #2] -; CHECK-NEXT: bfi w0, w8, #16, #16 +; CHECK: ldr w0, [x0] ; CHECK-NEXT: ret - %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = sext i16 %tmp1 to i32 @@ -399,7 +396,6 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-NEXT: ldur w8, [x8, #13] ; CHECK-NEXT: rev w0, w8 ; CHECK-NEXT: ret - %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 %tmp3 = add nuw nsw i32 %i, 2 diff --git a/test/CodeGen/AArch64/load-combine.ll b/test/CodeGen/AArch64/load-combine.ll index 59622fc3e0a..f0ed40357f1 100644 --- a/test/CodeGen/AArch64/load-combine.ll +++ b/test/CodeGen/AArch64/load-combine.ll @@ -324,12 +324,8 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) { ; (i32) p[0] | (sext(p[1] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh w8, [x0] -; CHECK-NEXT: ldrh w9, [x0, #2] -; CHECK-NEXT: bfi w8, w9, #16, #16 -; CHECK-NEXT: mov w0, w8 +; CHECK: ldr w0, [x0] ; CHECK-NEXT: ret - %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = zext i16 %tmp1 to i32 @@ -386,7 +382,6 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK: add x8, x0, w1, uxtw ; CHECK-NEXT: ldur w0, [x8, #13] ; CHECK-NEXT: ret - %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 %tmp3 = add nuw nsw i32 %i, 2 diff --git a/test/CodeGen/AMDGPU/insert_vector_elt.ll b/test/CodeGen/AMDGPU/insert_vector_elt.ll index f35bf1d546f..97ef5ce9039 100644 --- a/test/CodeGen/AMDGPU/insert_vector_elt.ll +++ b/test/CodeGen/AMDGPU/insert_vector_elt.ll @@ -1,5 +1,5 @@ -; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=GCN-NO-TONGA %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=GCN-TONGA %s ; FIXME: Broken on evergreen ; FIXME: For some reason the 8 and 16 vectors are being stored as @@ -219,10 +219,7 @@ define void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> ; GCN: s_waitcnt -; GCN: buffer_load_ushort -; GCN: buffer_load_ushort -; GCN: buffer_load_ushort -; GCN: buffer_load_ushort +; GCN: buffer_load_dwordx2 ; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind { @@ -240,8 +237,9 @@ define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> ; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} -; GCN: buffer_load_ubyte -; GCN: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-TONGA: buffer_load_ushort ; GCN: buffer_store_short v{{[0-9]+}}, off define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind { @@ -259,9 +257,11 @@ define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a ; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:5 ; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:6 -; GCN: buffer_load_ubyte -; GCN: buffer_load_ubyte -; GCN: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-TONGA: buffer_load_ushort +; GCN-TONGA: buffer_load_ubyte ; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off ; GCN-DAG: buffer_store_short v{{[0-9]+}}, off @@ -284,10 +284,11 @@ define void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> %a ; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} -; GCN: buffer_load_ubyte -; GCN: buffer_load_ubyte -; GCN: buffer_load_ubyte -; GCN: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-NO-TONGA: buffer_load_ubyte +; GCN-TONGA: buffer_load_dword ; GCN: buffer_store_dword v{{[0-9]+}}, off define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind { diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll index 2f7dff70b9b..34b5f29b789 100644 --- a/test/CodeGen/ARM/fp16-promote.ll +++ b/test/CodeGen/ARM/fp16-promote.ll @@ -847,21 +847,15 @@ define void @test_insertelement(half* %p, <4 x half>* %q, i32 %i) #0 { } ; CHECK-ALL-LABEL: test_extractelement: +; CHECK-VFP: push {{{.*}}, lr} ; CHECK-VFP: sub sp, sp, #8 -; CHECK-VFP: ldrh -; CHECK-VFP: ldrh -; CHECK-VFP: orr -; CHECK-VFP: str -; CHECK-VFP: ldrh -; CHECK-VFP: ldrh -; CHECK-VFP: orr -; CHECK-VFP: str +; CHECK-VFP: ldrd ; CHECK-VFP: mov ; CHECK-VFP: orr ; CHECK-VFP: ldrh ; CHECK-VFP: strh ; CHECK-VFP: add sp, sp, #8 -; CHECK-VFP: bx lr +; CHECK-VFP: pop {{{.*}}, pc} ; CHECK-NOVFP: ldrh ; CHECK-NOVFP: strh ; CHECK-NOVFP: ldrh diff --git a/test/CodeGen/ARM/load-combine-big-endian.ll b/test/CodeGen/ARM/load-combine-big-endian.ll index 4068be9527b..8d8a0136cf9 100644 --- a/test/CodeGen/ARM/load-combine-big-endian.ll +++ b/test/CodeGen/ARM/load-combine-big-endian.ll @@ -456,17 +456,12 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) { ; (i32) p[1] | (sext(p[0] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh r1, [r0] -; CHECK-NEXT: ldrh r0, [r0, #2] -; CHECK-NEXT: orr r0, r0, r1, lsl #16 +; CHECK: ldr r0, [r0] ; CHECK-NEXT: mov pc, lr - +; ; CHECK-ARMv6-LABEL: load_i32_by_sext_i16: -; CHECK-ARMv6: ldrh r1, [r0] -; CHECK-ARMv6-NEXT: ldrh r0, [r0, #2] -; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16 +; CHECK-ARMv6: ldr r0, [r0] ; CHECK-ARMv6-NEXT: bx lr - %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = sext i16 %tmp1 to i32 diff --git a/test/CodeGen/ARM/load-combine.ll b/test/CodeGen/ARM/load-combine.ll index f19911a8e66..720bc7b88b3 100644 --- a/test/CodeGen/ARM/load-combine.ll +++ b/test/CodeGen/ARM/load-combine.ll @@ -414,17 +414,12 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) { ; (i32) p[0] | (sext(p[1] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh r1, [r0, #2] -; CHECK-NEXT: ldrh r0, [r0] -; CHECK-NEXT: orr r0, r0, r1, lsl #16 +; CHECK: ldr r0, [r0] ; CHECK-NEXT: mov pc, lr ; ; CHECK-ARMv6-LABEL: load_i32_by_sext_i16: -; CHECK-ARMv6: ldrh r1, [r0, #2] -; CHECK-ARMv6-NEXT: ldrh r0, [r0] -; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16 -; CHECK-ARMv6-NEXT: bx lr - +; CHECK-ARMv6: ldr r0, [r0] +; CHECK-ARMv6-NEXT: bx lr %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = zext i16 %tmp1 to i32 @@ -492,7 +487,6 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-ARMv6: add r0, r0, r1 ; CHECK-ARMv6-NEXT: ldr r0, [r0, #13] ; CHECK-ARMv6-NEXT: bx lr - %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 %tmp3 = add nuw nsw i32 %i, 2 diff --git a/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/test/CodeGen/PowerPC/ppc64le-aggregates.ll index 25b3e5d8933..6fcbdda4e34 100644 --- a/test/CodeGen/PowerPC/ppc64le-aggregates.ll +++ b/test/CodeGen/PowerPC/ppc64le-aggregates.ll @@ -284,10 +284,7 @@ entry: ; CHECK-DAG: lfs 12, 12({{[0-9]+}}) ; CHECK-DAG: lfs 13, 16({{[0-9]+}}) -; CHECK-DAG: lwz [[REG0:[0-9]+]], 0({{[0-9]+}}) -; CHECK-DAG: lwz [[REG1:[0-9]+]], 4({{[0-9]+}}) -; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 32 -; CHECK-DAG: or 10, [[REG0]], [[REG2]] +; CHECK-DAG: ld 10, 0({{[0-9]+}}) ; CHECK: bl test2 declare void @test2([8 x float], [5 x float], [2 x float]) diff --git a/test/CodeGen/X86/load-combine.ll b/test/CodeGen/X86/load-combine.ll index 77fea70b0bb..e737a51cf40 100644 --- a/test/CodeGen/X86/load-combine.ll +++ b/test/CodeGen/X86/load-combine.ll @@ -772,20 +772,25 @@ define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) { ; BSWAP-NEXT: bswapl %eax ; BSWAP-NEXT: retl ; -; CHECK64-LABEL: load_i32_by_i8_bswap_base_index_offset: -; CHECK64: # BB#0: -; CHECK64-NEXT: movslq %esi, %rax -; CHECK64-NEXT: movzbl (%rdi,%rax), %ecx -; CHECK64-NEXT: shll $24, %ecx -; CHECK64-NEXT: movzbl 1(%rdi,%rax), %edx -; CHECK64-NEXT: shll $16, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 2(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $8, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movzbl 3(%rdi,%rax), %eax -; CHECK64-NEXT: orl %ecx, %eax -; CHECK64-NEXT: retq +; MOVBE-LABEL: load_i32_by_i8_bswap_base_index_offset: +; MOVBE: # BB#0: +; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax +; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx +; MOVBE-NEXT: movbel (%ecx,%eax), %eax +; MOVBE-NEXT: retl +; +; BSWAP64-LABEL: load_i32_by_i8_bswap_base_index_offset: +; BSWAP64: # BB#0: +; BSWAP64-NEXT: movslq %esi, %rax +; BSWAP64-NEXT: movl (%rdi,%rax), %eax +; BSWAP64-NEXT: bswapl %eax +; BSWAP64-NEXT: retq +; +; MOVBE64-LABEL: load_i32_by_i8_bswap_base_index_offset: +; MOVBE64: # BB#0: +; MOVBE64-NEXT: movslq %esi, %rax +; MOVBE64-NEXT: movbel (%rdi,%rax), %eax +; MOVBE64-NEXT: retq %tmp = bitcast i32* %arg to i8* %tmp2 = getelementptr inbounds i8, i8* %tmp, i32 %arg1 %tmp3 = load i8, i8* %tmp2, align 1 @@ -886,18 +891,12 @@ define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: ; CHECK: # BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movzwl (%eax), %ecx -; CHECK-NEXT: movzwl 2(%eax), %eax -; CHECK-NEXT: shll $16, %eax -; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: movl (%eax), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_sext_i16: ; CHECK64: # BB#0: -; CHECK64-NEXT: movzwl (%rdi), %ecx -; CHECK64-NEXT: movzwl 2(%rdi), %eax -; CHECK64-NEXT: shll $16, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl (%rdi), %eax ; CHECK64-NEXT: retq %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 1 @@ -916,24 +915,9 @@ define i32 @load_i32_by_sext_i16(i32* %arg) { define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi4: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi5: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl 15(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 12(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_base_offset_index: @@ -976,24 +960,9 @@ define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) { define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi6: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi7: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 14(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 15(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl 16(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 13(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2: @@ -1047,39 +1016,15 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) { ; CHECK-LABEL: load_i32_by_i8_zaext_loads: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi8: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi9: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl 15(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 12(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_zaext_loads: ; CHECK64: # BB#0: ; CHECK64-NEXT: movl %esi, %eax -; CHECK64-NEXT: movzbl 12(%rdi,%rax), %ecx -; CHECK64-NEXT: movzbl 13(%rdi,%rax), %edx -; CHECK64-NEXT: shll $8, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 14(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $16, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movzbl 15(%rdi,%rax), %eax -; CHECK64-NEXT: shll $24, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl 12(%rdi,%rax), %eax ; CHECK64-NEXT: retq %tmp = add nuw nsw i32 %arg1, 3 %tmp2 = add nuw nsw i32 %arg1, 2 @@ -1127,39 +1072,15 @@ define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) { define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) { ; CHECK-LABEL: load_i32_by_i8_zsext_loads: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi10: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi11: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movsbl 15(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 12(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_zsext_loads: ; CHECK64: # BB#0: ; CHECK64-NEXT: movl %esi, %eax -; CHECK64-NEXT: movzbl 12(%rdi,%rax), %ecx -; CHECK64-NEXT: movzbl 13(%rdi,%rax), %edx -; CHECK64-NEXT: shll $8, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 14(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $16, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movsbl 15(%rdi,%rax), %eax -; CHECK64-NEXT: shll $24, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl 12(%rdi,%rax), %eax ; CHECK64-NEXT: retq %tmp = add nuw nsw i32 %arg1, 3 %tmp2 = add nuw nsw i32 %arg1, 2 -- 2.40.0