From: Roman Lebedev Date: Thu, 30 May 2019 13:02:11 +0000 (+0000) Subject: [NFC][Codegen] Add add+sub/sub+add constant-fold tests for from D62257 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c06bb2cf410820af353eca0a754a112efe90f599;p=llvm [NFC][Codegen] Add add+sub/sub+add constant-fold tests for from D62257 add+sub/sub+add when second operands are constants should be folded into a single add, just like with add+add. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@362093 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/AArch64/vec_add.ll b/test/CodeGen/AArch64/vec_add.ll new file mode 100644 index 00000000000..9609822b54f --- /dev/null +++ b/test/CodeGen/AArch64/vec_add.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s + +declare void @use(<4 x i32> %arg) + +define <2 x i64> @test(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test: +; CHECK: // %bb.0: +; CHECK-NEXT: add v0.2d, v1.2d, v0.2d +; CHECK-NEXT: ret + %tmp9 = add <2 x i64> %b, %a + ret <2 x i64> %tmp9 +} + +define <4 x i32> @add_const_add_const(<4 x i32> %arg) { +; CHECK-LABEL: add_const_add_const: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #10 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const(<4 x i32> %arg) { +; CHECK-LABEL: add_const_sub_const: +; CHECK: // %bb.0: +; CHECK-NEXT: mvni v2.4s, #1 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: sub v0.4s, v0.4s, v2.4s +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: add_const_sub_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: mvni v0.4s, #1 +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: add_const_sub_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x9, .LCPI4_1 +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI4_1] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: ret + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_add_const: +; CHECK: // %bb.0: +; CHECK-NEXT: mvni v1.4s, #1 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_add_const_extrause: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: mvni v1.4s, #1 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl use +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: movi v0.4s, #8 +; CHECK-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { +; CHECK-LABEL: sub_const_add_const_nonsplat: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: adrp x8, .LCPI7_1 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI7_1] +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: ret + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} diff --git a/test/CodeGen/X86/vec_add.ll b/test/CodeGen/X86/vec_add.ll index 3d144e8ea3e..48ccf34dd0b 100644 --- a/test/CodeGen/X86/vec_add.ll +++ b/test/CodeGen/X86/vec_add.ll @@ -1,7 +1,166 @@ -; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,SSE,X86-SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64,SSE,X64-SSE + +declare void @use(<4 x i32> %arg) define <2 x i64> @test(<2 x i64> %a, <2 x i64> %b) { -entry: - %tmp9 = add <2 x i64> %b, %a ; <<2 x i64>> [#uses=1] +; X86-LABEL: test: +; X86: # %bb.0: +; X86-NEXT: paddq %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test: +; X64: # %bb.0: +; X64-NEXT: paddq %xmm1, %xmm0 +; X64-NEXT: retq + %tmp9 = add <2 x i64> %b, %a ret <2 x i64> %tmp9 } + +define <4 x i32> @add_const_add_const(<4 x i32> %arg) { +; X86-LABEL: add_const_add_const: +; X86: # %bb.0: +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_add_const: +; X64: # %bb.0: +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const(<4 x i32> %arg) { +; X86-LABEL: add_const_sub_const: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_sub_const: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_extrause(<4 x i32> %arg) { +; X86-LABEL: add_const_sub_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: add_const_sub_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @add_const_sub_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: add_const_sub_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_const_sub_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = add <4 x i32> %arg, + %t1 = sub <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { +; X86-LABEL: sub_const_add_const: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_add_const: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { +; X86-LABEL: sub_const_add_const_extrause: +; X86: # %bb.0: +; X86-NEXT: subl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: calll use +; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addl $28, %esp +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_add_const_extrause: +; X64: # %bb.0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: callq use +; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + call void @use(<4 x i32> %t0) + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +} + +define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { +; X86-LABEL: sub_const_add_const_nonsplat: +; X86: # %bb.0: +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: sub_const_add_const_nonsplat: +; X64: # %bb.0: +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %t0 = sub <4 x i32> %arg, + %t1 = add <4 x i32> %t0, + ret <4 x i32> %t1 +}