From 36dc99ec47817e70d98ffaac9846369a39208f68 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Thu, 17 Aug 2017 17:03:11 +0000 Subject: [PATCH] [PowerPC] add tests for vector select-of-constants; NFC We've discussed canonicalizing to this form in IR, so the backend should be prepared to lower these in ways better than what we see here. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@311099 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/PowerPC/vselect-constants.ll | 236 ++++++++++++++++++++++ 1 file changed, 236 insertions(+) create mode 100644 test/CodeGen/PowerPC/vselect-constants.ll diff --git a/test/CodeGen/PowerPC/vselect-constants.ll b/test/CodeGen/PowerPC/vselect-constants.ll new file mode 100644 index 00000000000..2dbe12e882d --- /dev/null +++ b/test/CodeGen/PowerPC/vselect-constants.ll @@ -0,0 +1,236 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-unknown | FileCheck %s + +; First, check the generic pattern for any 2 vector constants. Then, check special cases where +; the constants are all off-by-one. Finally, check the extra special cases where the constants +; include 0 or -1. +; Each minimal select test is repeated with a more typical pattern that includes a compare to +; generate the condition value. + +define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_C1_or_C2_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha +; CHECK-NEXT: addis 4, 2, .LCPI0_1@toc@ha +; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l +; CHECK-NEXT: addi 4, 4, .LCPI0_1@toc@l +; CHECK-NEXT: lvx 18, 0, 3 +; CHECK-NEXT: lvx 19, 0, 4 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 51, 50, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_C1_or_C2_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: addis 3, 2, .LCPI1_0@toc@ha +; CHECK-NEXT: addis 4, 2, .LCPI1_1@toc@ha +; CHECK-NEXT: addi 3, 3, .LCPI1_0@toc@l +; CHECK-NEXT: addi 4, 4, .LCPI1_1@toc@l +; CHECK-NEXT: lvx 19, 0, 3 +; CHECK-NEXT: lvx 4, 0, 4 +; CHECK-NEXT: xxsel 34, 36, 51, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_Cplus1_or_C_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha +; CHECK-NEXT: addis 4, 2, .LCPI2_1@toc@ha +; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l +; CHECK-NEXT: addi 4, 4, .LCPI2_1@toc@l +; CHECK-NEXT: lvx 18, 0, 3 +; CHECK-NEXT: lvx 19, 0, 4 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 51, 50, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_Cplus1_or_C_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha +; CHECK-NEXT: addis 4, 2, .LCPI3_1@toc@ha +; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l +; CHECK-NEXT: addi 4, 4, .LCPI3_1@toc@l +; CHECK-NEXT: lvx 19, 0, 3 +; CHECK-NEXT: lvx 4, 0, 4 +; CHECK-NEXT: xxsel 34, 36, 51, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_Cminus1_or_C_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha +; CHECK-NEXT: addis 4, 2, .LCPI4_1@toc@ha +; CHECK-NEXT: addi 3, 3, .LCPI4_0@toc@l +; CHECK-NEXT: addi 4, 4, .LCPI4_1@toc@l +; CHECK-NEXT: lvx 18, 0, 3 +; CHECK-NEXT: lvx 19, 0, 4 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 51, 50, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_Cminus1_or_C_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha +; CHECK-NEXT: addis 4, 2, .LCPI5_1@toc@ha +; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l +; CHECK-NEXT: addi 4, 4, .LCPI5_1@toc@l +; CHECK-NEXT: lvx 19, 0, 3 +; CHECK-NEXT: lvx 4, 0, 4 +; CHECK-NEXT: xxsel 34, 36, 51, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_minus1_or_0_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: vspltisb 19, -1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 0, 51, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_minus1_or_0_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: vspltisb 19, -1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: xxsel 34, 0, 51, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_0_or_minus1_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: vspltisb 19, -1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_0_or_minus1_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: vspltisb 19, -1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_1_or_0_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: vspltisw 19, 1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 0, 51, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_1_or_0_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: vspltisw 19, 1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: xxsel 34, 0, 51, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { +; CHECK-LABEL: sel_0_or_1_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vspltisw 3, -16 +; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: vspltisw 19, 1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: vsubuwm 3, 4, 3 +; CHECK-NEXT: vslw 2, 2, 3 +; CHECK-NEXT: vsraw 2, 2, 3 +; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: blr + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: cmp_sel_0_or_1_vec: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpequw 2, 2, 3 +; CHECK-NEXT: vspltisw 19, 1 +; CHECK-NEXT: xxlxor 0, 0, 0 +; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: blr + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + -- 2.40.0