From cb98b65ac4bdc7de6d1b9261210f39bf21c8ff60 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 5 Feb 2017 23:31:48 +0000 Subject: [PATCH] [AVX-512] Add all masked and unmasked versions of VPMULDQ and VPMULUDQ to load folding tables. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294163 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 16 +++++ .../CodeGen/X86/stack-folding-int-avx512vl.ll | 59 +++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 8885393c742..f977797b61e 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2060,6 +2060,10 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPMADDUBSWZ256rr, X86::VPMADDUBSWZ256rm, 0 }, { X86::VPMADDWDZ128rr, X86::VPMADDWDZ128rm, 0 }, { X86::VPMADDWDZ256rr, X86::VPMADDWDZ256rm, 0 }, + { X86::VPMULDQZ128rr, X86::VPMULDQZ128rm, 0 }, + { X86::VPMULDQZ256rr, X86::VPMULDQZ256rm, 0 }, + { X86::VPMULUDQZ128rr, X86::VPMULUDQZ128rm, 0 }, + { X86::VPMULUDQZ256rr, X86::VPMULUDQZ256rm, 0 }, { X86::VPORDZ128rr, X86::VPORDZ128rm, 0 }, { X86::VPORDZ256rr, X86::VPORDZ256rm, 0 }, { X86::VPORQZ128rr, X86::VPORQZ128rm, 0 }, @@ -2376,6 +2380,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPERMWZrrkz, X86::VPERMWZrmkz, 0 }, { X86::VPMADDUBSWZrrkz, X86::VPMADDUBSWZrmkz, 0 }, { X86::VPMADDWDZrrkz, X86::VPMADDWDZrmkz, 0 }, + { X86::VPMULDQZrrkz, X86::VPMULDQZrmkz, 0 }, + { X86::VPMULUDQZrrkz, X86::VPMULUDQZrmkz, 0 }, { X86::VPORDZrrkz, X86::VPORDZrmkz, 0 }, { X86::VPORQZrrkz, X86::VPORQZrmkz, 0 }, { X86::VPSHUFBZrrkz, X86::VPSHUFBZrmkz, 0 }, @@ -2458,6 +2464,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPERMWZ256rrkz, X86::VPERMWZ256rmkz, 0 }, { X86::VPMADDUBSWZ256rrkz, X86::VPMADDUBSWZ256rmkz, 0 }, { X86::VPMADDWDZ256rrkz, X86::VPMADDWDZ256rmkz, 0 }, + { X86::VPMULDQZ256rrkz, X86::VPMULDQZ256rmkz, 0 }, + { X86::VPMULUDQZ256rrkz, X86::VPMULUDQZ256rmkz, 0 }, { X86::VPORDZ256rrkz, X86::VPORDZ256rmkz, 0 }, { X86::VPORQZ256rrkz, X86::VPORQZ256rmkz, 0 }, { X86::VPSHUFBZ256rrkz, X86::VPSHUFBZ256rmkz, 0 }, @@ -2530,6 +2538,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPERMWZ128rrkz, X86::VPERMWZ128rmkz, 0 }, { X86::VPMADDUBSWZ128rrkz, X86::VPMADDUBSWZ128rmkz, 0 }, { X86::VPMADDWDZ128rrkz, X86::VPMADDWDZ128rmkz, 0 }, + { X86::VPMULDQZ128rrkz, X86::VPMULDQZ128rmkz, 0 }, + { X86::VPMULUDQZ128rrkz, X86::VPMULUDQZ128rmkz, 0 }, { X86::VPORDZ128rrkz, X86::VPORDZ128rmkz, 0 }, { X86::VPORQZ128rrkz, X86::VPORQZ128rmkz, 0 }, { X86::VPSHUFBZ128rrkz, X86::VPSHUFBZ128rmkz, 0 }, @@ -2727,6 +2737,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPERMWZrrk, X86::VPERMWZrmk, 0 }, { X86::VPMADDUBSWZrrk, X86::VPMADDUBSWZrmk, 0 }, { X86::VPMADDWDZrrk, X86::VPMADDWDZrmk, 0 }, + { X86::VPMULDQZrrk, X86::VPMULDQZrmk, 0 }, + { X86::VPMULUDQZrrk, X86::VPMULUDQZrmk, 0 }, { X86::VPORDZrrk, X86::VPORDZrmk, 0 }, { X86::VPORQZrrk, X86::VPORQZrmk, 0 }, { X86::VPSHUFBZrrk, X86::VPSHUFBZrmk, 0 }, @@ -2822,6 +2834,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPERMWZ256rrk, X86::VPERMWZ256rmk, 0 }, { X86::VPMADDUBSWZ256rrk, X86::VPMADDUBSWZ256rmk, 0 }, { X86::VPMADDWDZ256rrk, X86::VPMADDWDZ256rmk, 0 }, + { X86::VPMULDQZ256rrk, X86::VPMULDQZ256rmk, 0 }, + { X86::VPMULUDQZ256rrk, X86::VPMULUDQZ256rmk, 0 }, { X86::VPORDZ256rrk, X86::VPORDZ256rmk, 0 }, { X86::VPORQZ256rrk, X86::VPORQZ256rmk, 0 }, { X86::VPSHUFBZ256rrk, X86::VPSHUFBZ256rmk, 0 }, @@ -2908,6 +2922,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPERMWZ128rrk, X86::VPERMWZ128rmk, 0 }, { X86::VPMADDUBSWZ128rrk, X86::VPMADDUBSWZ128rmk, 0 }, { X86::VPMADDWDZ128rrk, X86::VPMADDWDZ128rmk, 0 }, + { X86::VPMULDQZ128rrk, X86::VPMULDQZ128rmk, 0 }, + { X86::VPMULUDQZ128rrk, X86::VPMULUDQZ128rmk, 0 }, { X86::VPORDZ128rrk, X86::VPORDZ128rmk, 0 }, { X86::VPORQZ128rrk, X86::VPORQZ128rmk, 0 }, { X86::VPSHUFBZ128rrk, X86::VPSHUFBZ128rmk, 0 }, diff --git a/test/CodeGen/X86/stack-folding-int-avx512vl.ll b/test/CodeGen/X86/stack-folding-int-avx512vl.ll index 77afc49b257..06cd19e9577 100644 --- a/test/CodeGen/X86/stack-folding-int-avx512vl.ll +++ b/test/CodeGen/X86/stack-folding-int-avx512vl.ll @@ -861,6 +861,65 @@ define <4 x i64> @stack_fold_pmovzxwq_mask_ymm(<4 x i64> %passthru, <8 x i16> %a ret <4 x i64> %6 } +define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) { + ;CHECK-LABEL: stack_fold_pmuldq + ;CHECK: vpmuldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) + ret <2 x i64> %2 +} +declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone + +define <4 x i64> @stack_fold_pmuldq_ymm(<8 x i32> %a0, <8 x i32> %a1) { + ;CHECK-LABEL: stack_fold_pmuldq_ymm + ;CHECK: vpmuldq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %a0, <8 x i32> %a1) + ret <4 x i64> %2 +} +declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone + +define <2 x i64> @stack_fold_pmuludq(<4 x i32> %a0, <4 x i32> %a1) { + ;CHECK-LABEL: stack_fold_pmuludq + ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) + ret <2 x i64> %2 +} +declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone + +define <4 x i64> @stack_fold_pmuludq_ymm(<8 x i32> %a0, <8 x i32> %a1) { + ;CHECK-LABEL: stack_fold_pmuludq_ymm + ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) + ret <4 x i64> %2 +} +declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnone + +define <4 x i64> @stack_fold_pmuludq_ymm_mask(<4 x i64>* %passthru, <8 x i32> %a0, <8 x i32> %a1, i8 %mask) { + ;CHECK-LABEL: stack_fold_pmuludq_ymm_mask + ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) + %3 = bitcast i8 %mask to <8 x i1> + %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %5 = load <4 x i64>, <4 x i64>* %passthru + %6 = select <4 x i1> %4, <4 x i64> %2, <4 x i64> %5 + ret <4 x i64> %6 +} + +define <4 x i64> @stack_fold_pmuludq_ymm_maskz(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) { + ;CHECK-LABEL: stack_fold_pmuludq_ymm_maskz + ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) + %3 = bitcast i8 %mask to <8 x i1> + %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %4, <4 x i64> %2, <4 x i64> zeroinitializer + ret <4 x i64> %5 +} + define <16 x i8> @stack_fold_punpckhbw(<16 x i8> %a0, <16 x i8> %a1) { ;CHECK-LABEL: stack_fold_punpckhbw ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload -- 2.50.1