From 0f3d4650f71e36c36c06d55198217036e729e511 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 26 Jan 2015 20:06:51 +0000 Subject: [PATCH] [x86][MMX] Rename and cleanup tests: arith, intrinsics and shuffle - Rename mmx-builtins to mmx-intrinsics to match other intrinsic test naming. - Remove tests that duplicate functionality from mmx-intrinsics.ll. - Move arith related tests to mmx-arith.ll. - MMX related shuffle goes to vector-shuffle-mmx.ll. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227130 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll | 64 --- test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll | 100 ---- test/CodeGen/X86/mmx-arith.ll | 543 +++++++++--------- .../{mmx-builtins.ll => mmx-intrinsics.ll} | 0 test/CodeGen/X86/mmx-punpckhdq.ll | 31 - test/CodeGen/X86/mmx-shift.ll | 39 -- test/CodeGen/X86/mmx-shuffle.ll | 31 - test/CodeGen/X86/vector-shuffle-mmx.ll | 40 ++ 8 files changed, 311 insertions(+), 537 deletions(-) delete mode 100644 test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll delete mode 100644 test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll rename test/CodeGen/X86/{mmx-builtins.ll => mmx-intrinsics.ll} (100%) delete mode 100644 test/CodeGen/X86/mmx-punpckhdq.ll delete mode 100644 test/CodeGen/X86/mmx-shift.ll delete mode 100644 test/CodeGen/X86/mmx-shuffle.ll create mode 100644 test/CodeGen/X86/vector-shuffle-mmx.ll diff --git a/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll b/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll deleted file mode 100644 index 11c0bf95798..00000000000 --- a/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll +++ /dev/null @@ -1,64 +0,0 @@ -; RUN: llc < %s -o - -march=x86 -mattr=+mmx | FileCheck %s -; There are no MMX instructions here. We use add+adcl for the adds. - -define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind { -entry: - %tmp2942 = icmp eq i32 %count, 0 ; [#uses=1] - br i1 %tmp2942, label %bb31, label %bb26 - -bb26: ; preds = %bb26, %entry - -; CHECK: addl -; CHECK: adcl - - %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; [#uses=3] - %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1] - %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0 ; <<1 x i64>*> [#uses=1] - %tmp14 = load <1 x i64>* %tmp13 ; <<1 x i64>> [#uses=1] - %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0 ; <<1 x i64>*> [#uses=1] - %tmp19 = load <1 x i64>* %tmp18 ; <<1 x i64>> [#uses=1] - %tmp21 = add <1 x i64> %tmp19, %tmp14 ; <<1 x i64>> [#uses=1] - %tmp22 = add <1 x i64> %tmp21, %sum.035.0 ; <<1 x i64>> [#uses=2] - %tmp25 = add i32 %i.037.0, 1 ; [#uses=2] - %tmp29 = icmp ult i32 %tmp25, %count ; [#uses=1] - br i1 %tmp29, label %bb26, label %bb31 - -bb31: ; preds = %bb26, %entry - %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1] - ret <1 x i64> %sum.035.1 -} - - -; This is the original test converted to use MMX intrinsics. - -define <1 x i64> @unsigned_add3a(x86_mmx* %a, x86_mmx* %b, i32 %count) nounwind { -entry: - %tmp2943 = bitcast <1 x i64> to x86_mmx - %tmp2942 = icmp eq i32 %count, 0 ; [#uses=1] - br i1 %tmp2942, label %bb31, label %bb26 - -bb26: ; preds = %bb26, %entry - -; CHECK: movq ({{.*}},8), %mm -; CHECK: paddq ({{.*}},8), %mm -; CHECK: paddq %mm{{[0-7]}}, %mm - - %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; [#uses=3] - %sum.035.0 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; [#uses=1] - %tmp13 = getelementptr x86_mmx* %b, i32 %i.037.0 ; [#uses=1] - %tmp14 = load x86_mmx* %tmp13 ; [#uses=1] - %tmp18 = getelementptr x86_mmx* %a, i32 %i.037.0 ; [#uses=1] - %tmp19 = load x86_mmx* %tmp18 ; [#uses=1] - %tmp21 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp19, x86_mmx %tmp14) ; [#uses=1] - %tmp22 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp21, x86_mmx %sum.035.0) ; [#uses=2] - %tmp25 = add i32 %i.037.0, 1 ; [#uses=2] - %tmp29 = icmp ult i32 %tmp25, %count ; [#uses=1] - br i1 %tmp29, label %bb26, label %bb31 - -bb31: ; preds = %bb26, %entry - %sum.035.1 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; [#uses=1] - %t = bitcast x86_mmx %sum.035.1 to <1 x i64> - ret <1 x i64> %t -} - -declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx) diff --git a/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll b/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll deleted file mode 100644 index 60025bfcdc8..00000000000 --- a/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll +++ /dev/null @@ -1,100 +0,0 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s -; There are no MMX operations here, so we use XMM or i64. - -; CHECK: ti8 -define void @ti8(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to <8 x i8> - %tmp2 = bitcast double %b to <8 x i8> - %tmp3 = add <8 x i8> %tmp1, %tmp2 -; CHECK: paddb - store <8 x i8> %tmp3, <8 x i8>* null - ret void -} - -; CHECK: ti16 -define void @ti16(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to <4 x i16> - %tmp2 = bitcast double %b to <4 x i16> - %tmp3 = add <4 x i16> %tmp1, %tmp2 -; CHECK: paddw - store <4 x i16> %tmp3, <4 x i16>* null - ret void -} - -; CHECK: ti32 -define void @ti32(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to <2 x i32> - %tmp2 = bitcast double %b to <2 x i32> - %tmp3 = add <2 x i32> %tmp1, %tmp2 -; CHECK: paddd - store <2 x i32> %tmp3, <2 x i32>* null - ret void -} - -; CHECK: ti64 -define void @ti64(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to <1 x i64> - %tmp2 = bitcast double %b to <1 x i64> - %tmp3 = add <1 x i64> %tmp1, %tmp2 -; CHECK: addq - store <1 x i64> %tmp3, <1 x i64>* null - ret void -} - -; MMX intrinsics calls get us MMX instructions. -; CHECK: ti8a -define void @ti8a(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to x86_mmx -; CHECK: movdq2q - %tmp2 = bitcast double %b to x86_mmx -; CHECK: movdq2q - %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2) - store x86_mmx %tmp3, x86_mmx* null - ret void -} - -; CHECK: ti16a -define void @ti16a(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to x86_mmx -; CHECK: movdq2q - %tmp2 = bitcast double %b to x86_mmx -; CHECK: movdq2q - %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2) - store x86_mmx %tmp3, x86_mmx* null - ret void -} - -; CHECK: ti32a -define void @ti32a(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to x86_mmx -; CHECK: movdq2q - %tmp2 = bitcast double %b to x86_mmx -; CHECK: movdq2q - %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2) - store x86_mmx %tmp3, x86_mmx* null - ret void -} - -; CHECK: ti64a -define void @ti64a(double %a, double %b) nounwind { -entry: - %tmp1 = bitcast double %a to x86_mmx -; CHECK: movdq2q - %tmp2 = bitcast double %b to x86_mmx -; CHECK: movdq2q - %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2) - store x86_mmx %tmp3, x86_mmx* null - ret void -} - -declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx) diff --git a/test/CodeGen/X86/mmx-arith.ll b/test/CodeGen/X86/mmx-arith.ll index 68174873240..d9d1fbfaa65 100644 --- a/test/CodeGen/X86/mmx-arith.ll +++ b/test/CodeGen/X86/mmx-arith.ll @@ -1,309 +1,308 @@ -; RUN: llc < %s -march=x86 -mattr=+mmx +; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s +; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s ;; A basic sanity check to make sure that MMX arithmetic actually compiles. ;; First is a straight translation of the original with bitcasts as needed. -define void @foo(x86_mmx* %A, x86_mmx* %B) { +; X32-LABEL: test0 +; X64-LABEL: test0 +define void @test0(x86_mmx* %A, x86_mmx* %B) { entry: - %tmp1 = load x86_mmx* %A ; [#uses=1] - %tmp3 = load x86_mmx* %B ; [#uses=1] - %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8> - %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8> - %tmp4 = add <8 x i8> %tmp1a, %tmp3a ; <<8 x i8>> [#uses=2] - %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx - store x86_mmx %tmp4a, x86_mmx* %A - %tmp7 = load x86_mmx* %B ; [#uses=1] - %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; [#uses=2] - store x86_mmx %tmp12, x86_mmx* %A - %tmp16 = load x86_mmx* %B ; [#uses=1] - %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; [#uses=2] - store x86_mmx %tmp21, x86_mmx* %A - %tmp27 = load x86_mmx* %B ; [#uses=1] - %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8> - %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8> - %tmp28 = sub <8 x i8> %tmp21a, %tmp27a ; <<8 x i8>> [#uses=2] - %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx - store x86_mmx %tmp28a, x86_mmx* %A - %tmp31 = load x86_mmx* %B ; [#uses=1] - %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; [#uses=2] - store x86_mmx %tmp36, x86_mmx* %A - %tmp40 = load x86_mmx* %B ; [#uses=1] - %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; [#uses=2] - store x86_mmx %tmp45, x86_mmx* %A - %tmp51 = load x86_mmx* %B ; [#uses=1] - %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8> - %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8> - %tmp52 = mul <8 x i8> %tmp45a, %tmp51a ; <<8 x i8>> [#uses=2] - %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx - store x86_mmx %tmp52a, x86_mmx* %A - %tmp57 = load x86_mmx* %B ; [#uses=1] - %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8> - %tmp58 = and <8 x i8> %tmp52, %tmp57a ; <<8 x i8>> [#uses=2] - %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx - store x86_mmx %tmp58a, x86_mmx* %A - %tmp63 = load x86_mmx* %B ; [#uses=1] - %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8> - %tmp64 = or <8 x i8> %tmp58, %tmp63a ; <<8 x i8>> [#uses=2] - %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx - store x86_mmx %tmp64a, x86_mmx* %A - %tmp69 = load x86_mmx* %B ; [#uses=1] - %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8> - %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8> - %tmp70 = xor <8 x i8> %tmp64b, %tmp69a ; <<8 x i8>> [#uses=1] - %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx - store x86_mmx %tmp70a, x86_mmx* %A - tail call void @llvm.x86.mmx.emms( ) - ret void + %tmp1 = load x86_mmx* %A + %tmp3 = load x86_mmx* %B + %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8> + %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8> + %tmp4 = add <8 x i8> %tmp1a, %tmp3a + %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx + store x86_mmx %tmp4a, x86_mmx* %A + %tmp7 = load x86_mmx* %B + %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7) + store x86_mmx %tmp12, x86_mmx* %A + %tmp16 = load x86_mmx* %B + %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16) + store x86_mmx %tmp21, x86_mmx* %A + %tmp27 = load x86_mmx* %B + %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8> + %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8> + %tmp28 = sub <8 x i8> %tmp21a, %tmp27a + %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx + store x86_mmx %tmp28a, x86_mmx* %A + %tmp31 = load x86_mmx* %B + %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31) + store x86_mmx %tmp36, x86_mmx* %A + %tmp40 = load x86_mmx* %B + %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40) + store x86_mmx %tmp45, x86_mmx* %A + %tmp51 = load x86_mmx* %B + %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8> + %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8> + %tmp52 = mul <8 x i8> %tmp45a, %tmp51a + %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx + store x86_mmx %tmp52a, x86_mmx* %A + %tmp57 = load x86_mmx* %B + %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8> + %tmp58 = and <8 x i8> %tmp52, %tmp57a + %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx + store x86_mmx %tmp58a, x86_mmx* %A + %tmp63 = load x86_mmx* %B + %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8> + %tmp64 = or <8 x i8> %tmp58, %tmp63a + %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx + store x86_mmx %tmp64a, x86_mmx* %A + %tmp69 = load x86_mmx* %B + %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8> + %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8> + %tmp70 = xor <8 x i8> %tmp64b, %tmp69a + %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx + store x86_mmx %tmp70a, x86_mmx* %A + tail call void @llvm.x86.mmx.emms() + ret void } -define void @baz(x86_mmx* %A, x86_mmx* %B) { +; X32-LABEL: test1 +; X64-LABEL: test1 +define void @test1(x86_mmx* %A, x86_mmx* %B) { entry: - %tmp1 = load x86_mmx* %A ; [#uses=1] - %tmp3 = load x86_mmx* %B ; [#uses=1] - %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32> - %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32> - %tmp4 = add <2 x i32> %tmp1a, %tmp3a ; <<2 x i32>> [#uses=2] - %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx - store x86_mmx %tmp4a, x86_mmx* %A - %tmp9 = load x86_mmx* %B ; [#uses=1] - %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32> - %tmp10 = sub <2 x i32> %tmp4, %tmp9a ; <<2 x i32>> [#uses=2] - %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx - store x86_mmx %tmp10a, x86_mmx* %A - %tmp15 = load x86_mmx* %B ; [#uses=1] - %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32> - %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32> - %tmp16 = mul <2 x i32> %tmp10b, %tmp15a ; <<2 x i32>> [#uses=2] - %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx - store x86_mmx %tmp16a, x86_mmx* %A - %tmp21 = load x86_mmx* %B ; [#uses=1] - %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32> - %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32> - %tmp22 = and <2 x i32> %tmp16b, %tmp21a ; <<2 x i32>> [#uses=2] - %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx - store x86_mmx %tmp22a, x86_mmx* %A - %tmp27 = load x86_mmx* %B ; [#uses=1] - %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32> - %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32> - %tmp28 = or <2 x i32> %tmp22b, %tmp27a ; <<2 x i32>> [#uses=2] - %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx - store x86_mmx %tmp28a, x86_mmx* %A - %tmp33 = load x86_mmx* %B ; [#uses=1] - %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32> - %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32> - %tmp34 = xor <2 x i32> %tmp28b, %tmp33a ; <<2 x i32>> [#uses=1] - %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx - store x86_mmx %tmp34a, x86_mmx* %A - tail call void @llvm.x86.mmx.emms( ) - ret void + %tmp1 = load x86_mmx* %A + %tmp3 = load x86_mmx* %B + %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32> + %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32> + %tmp4 = add <2 x i32> %tmp1a, %tmp3a + %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx + store x86_mmx %tmp4a, x86_mmx* %A + %tmp9 = load x86_mmx* %B + %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32> + %tmp10 = sub <2 x i32> %tmp4, %tmp9a + %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx + store x86_mmx %tmp10a, x86_mmx* %A + %tmp15 = load x86_mmx* %B + %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32> + %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32> + %tmp16 = mul <2 x i32> %tmp10b, %tmp15a + %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx + store x86_mmx %tmp16a, x86_mmx* %A + %tmp21 = load x86_mmx* %B + %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32> + %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32> + %tmp22 = and <2 x i32> %tmp16b, %tmp21a + %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx + store x86_mmx %tmp22a, x86_mmx* %A + %tmp27 = load x86_mmx* %B + %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32> + %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32> + %tmp28 = or <2 x i32> %tmp22b, %tmp27a + %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx + store x86_mmx %tmp28a, x86_mmx* %A + %tmp33 = load x86_mmx* %B + %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32> + %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32> + %tmp34 = xor <2 x i32> %tmp28b, %tmp33a + %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx + store x86_mmx %tmp34a, x86_mmx* %A + tail call void @llvm.x86.mmx.emms( ) + ret void } -define void @bar(x86_mmx* %A, x86_mmx* %B) { +; X32-LABEL: test2 +; X64-LABEL: test2 +define void @test2(x86_mmx* %A, x86_mmx* %B) { entry: - %tmp1 = load x86_mmx* %A ; [#uses=1] - %tmp3 = load x86_mmx* %B ; [#uses=1] - %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16> - %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16> - %tmp4 = add <4 x i16> %tmp1a, %tmp3a ; <<4 x i16>> [#uses=2] - %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx - store x86_mmx %tmp4a, x86_mmx* %A - %tmp7 = load x86_mmx* %B ; [#uses=1] - %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; [#uses=2] - store x86_mmx %tmp12, x86_mmx* %A - %tmp16 = load x86_mmx* %B ; [#uses=1] - %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; [#uses=2] - store x86_mmx %tmp21, x86_mmx* %A - %tmp27 = load x86_mmx* %B ; [#uses=1] - %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16> - %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16> - %tmp28 = sub <4 x i16> %tmp21a, %tmp27a ; <<4 x i16>> [#uses=2] - %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx - store x86_mmx %tmp28a, x86_mmx* %A - %tmp31 = load x86_mmx* %B ; [#uses=1] - %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; [#uses=2] - store x86_mmx %tmp36, x86_mmx* %A - %tmp40 = load x86_mmx* %B ; [#uses=1] - %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; [#uses=2] - store x86_mmx %tmp45, x86_mmx* %A - %tmp51 = load x86_mmx* %B ; [#uses=1] - %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16> - %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16> - %tmp52 = mul <4 x i16> %tmp45a, %tmp51a ; <<4 x i16>> [#uses=2] - %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx - store x86_mmx %tmp52a, x86_mmx* %A - %tmp55 = load x86_mmx* %B ; [#uses=1] - %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52a, x86_mmx %tmp55 ) ; [#uses=2] - store x86_mmx %tmp60, x86_mmx* %A - %tmp64 = load x86_mmx* %B ; [#uses=1] - %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; [#uses=1] - %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; [#uses=2] - store x86_mmx %tmp70, x86_mmx* %A - %tmp75 = load x86_mmx* %B ; [#uses=1] - %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16> - %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16> - %tmp76 = and <4 x i16> %tmp70a, %tmp75a ; <<4 x i16>> [#uses=2] - %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx - store x86_mmx %tmp76a, x86_mmx* %A - %tmp81 = load x86_mmx* %B ; [#uses=1] - %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16> - %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16> - %tmp82 = or <4 x i16> %tmp76b, %tmp81a ; <<4 x i16>> [#uses=2] - %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx - store x86_mmx %tmp82a, x86_mmx* %A - %tmp87 = load x86_mmx* %B ; [#uses=1] - %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16> - %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16> - %tmp88 = xor <4 x i16> %tmp82b, %tmp87a ; <<4 x i16>> [#uses=1] - %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx - store x86_mmx %tmp88a, x86_mmx* %A - tail call void @llvm.x86.mmx.emms( ) - ret void + %tmp1 = load x86_mmx* %A + %tmp3 = load x86_mmx* %B + %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16> + %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16> + %tmp4 = add <4 x i16> %tmp1a, %tmp3a + %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx + store x86_mmx %tmp4a, x86_mmx* %A + %tmp7 = load x86_mmx* %B + %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7) + store x86_mmx %tmp12, x86_mmx* %A + %tmp16 = load x86_mmx* %B + %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16) + store x86_mmx %tmp21, x86_mmx* %A + %tmp27 = load x86_mmx* %B + %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16> + %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16> + %tmp28 = sub <4 x i16> %tmp21a, %tmp27a + %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx + store x86_mmx %tmp28a, x86_mmx* %A + %tmp31 = load x86_mmx* %B + %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31) + store x86_mmx %tmp36, x86_mmx* %A + %tmp40 = load x86_mmx* %B + %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40) + store x86_mmx %tmp45, x86_mmx* %A + %tmp51 = load x86_mmx* %B + %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16> + %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16> + %tmp52 = mul <4 x i16> %tmp45a, %tmp51a + %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx + store x86_mmx %tmp52a, x86_mmx* %A + %tmp55 = load x86_mmx* %B + %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55) + store x86_mmx %tmp60, x86_mmx* %A + %tmp64 = load x86_mmx* %B + %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64) + %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx + store x86_mmx %tmp70, x86_mmx* %A + %tmp75 = load x86_mmx* %B + %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16> + %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16> + %tmp76 = and <4 x i16> %tmp70a, %tmp75a + %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx + store x86_mmx %tmp76a, x86_mmx* %A + %tmp81 = load x86_mmx* %B + %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16> + %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16> + %tmp82 = or <4 x i16> %tmp76b, %tmp81a + %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx + store x86_mmx %tmp82a, x86_mmx* %A + %tmp87 = load x86_mmx* %B + %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16> + %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16> + %tmp88 = xor <4 x i16> %tmp82b, %tmp87a + %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx + store x86_mmx %tmp88a, x86_mmx* %A + tail call void @llvm.x86.mmx.emms( ) + ret void } -;; The following is modified to use MMX intrinsics everywhere they work. +; X32-LABEL: test3 +define <1 x i64> @test3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind { +entry: + %tmp2942 = icmp eq i32 %count, 0 + br i1 %tmp2942, label %bb31, label %bb26 + +bb26: +; X32: addl +; X32: adcl + %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] + %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] + %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0 + %tmp14 = load <1 x i64>* %tmp13 + %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0 + %tmp19 = load <1 x i64>* %tmp18 + %tmp21 = add <1 x i64> %tmp19, %tmp14 + %tmp22 = add <1 x i64> %tmp21, %sum.035.0 + %tmp25 = add i32 %i.037.0, 1 + %tmp29 = icmp ult i32 %tmp25, %count + br i1 %tmp29, label %bb26, label %bb31 + +bb31: + %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] + ret <1 x i64> %sum.035.1 +} -define void @fooa(x86_mmx* %A, x86_mmx* %B) { +; There are no MMX operations here, so we use XMM or i64. +; X64-LABEL: ti8 +define void @ti8(double %a, double %b) nounwind { entry: - %tmp1 = load x86_mmx* %A ; [#uses=1] - %tmp3 = load x86_mmx* %B ; [#uses=1] - %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.b( x86_mmx %tmp1, x86_mmx %tmp3 ) ; [#uses=2] - store x86_mmx %tmp4, x86_mmx* %A - %tmp7 = load x86_mmx* %B ; [#uses=1] - %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4, x86_mmx %tmp7 ) ; [#uses=2] - store x86_mmx %tmp12, x86_mmx* %A - %tmp16 = load x86_mmx* %B ; [#uses=1] - %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; [#uses=2] - store x86_mmx %tmp21, x86_mmx* %A - %tmp27 = load x86_mmx* %B ; [#uses=1] - %tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.b( x86_mmx %tmp21, x86_mmx %tmp27 ) ; [#uses=2] - store x86_mmx %tmp28, x86_mmx* %A - %tmp31 = load x86_mmx* %B ; [#uses=1] - %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28, x86_mmx %tmp31 ) ; [#uses=2] - store x86_mmx %tmp36, x86_mmx* %A - %tmp40 = load x86_mmx* %B ; [#uses=1] - %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; [#uses=2] - store x86_mmx %tmp45, x86_mmx* %A - %tmp51 = load x86_mmx* %B ; [#uses=1] - %tmp51a = bitcast x86_mmx %tmp51 to i64 - %tmp51aa = bitcast i64 %tmp51a to <8 x i8> - %tmp51b = bitcast x86_mmx %tmp45 to <8 x i8> - %tmp52 = mul <8 x i8> %tmp51b, %tmp51aa ; [#uses=2] - %tmp52a = bitcast <8 x i8> %tmp52 to i64 - %tmp52aa = bitcast i64 %tmp52a to x86_mmx - store x86_mmx %tmp52aa, x86_mmx* %A - %tmp57 = load x86_mmx* %B ; [#uses=1] - %tmp58 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp51, x86_mmx %tmp57 ) ; [#uses=2] - store x86_mmx %tmp58, x86_mmx* %A - %tmp63 = load x86_mmx* %B ; [#uses=1] - %tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 ) ; [#uses=2] - store x86_mmx %tmp64, x86_mmx* %A - %tmp69 = load x86_mmx* %B ; [#uses=1] - %tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 ) ; [#uses=2] - store x86_mmx %tmp70, x86_mmx* %A - tail call void @llvm.x86.mmx.emms( ) - ret void + %tmp1 = bitcast double %a to <8 x i8> + %tmp2 = bitcast double %b to <8 x i8> + %tmp3 = add <8 x i8> %tmp1, %tmp2 +; X64: paddb + store <8 x i8> %tmp3, <8 x i8>* null + ret void } -define void @baza(x86_mmx* %A, x86_mmx* %B) { +; X64-LABEL: ti16 +define void @ti16(double %a, double %b) nounwind { entry: - %tmp1 = load x86_mmx* %A ; [#uses=1] - %tmp3 = load x86_mmx* %B ; [#uses=1] - %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.d( x86_mmx %tmp1, x86_mmx %tmp3 ) ; [#uses=2] - store x86_mmx %tmp4, x86_mmx* %A - %tmp9 = load x86_mmx* %B ; [#uses=1] - %tmp10 = tail call x86_mmx @llvm.x86.mmx.psub.d( x86_mmx %tmp4, x86_mmx %tmp9 ) ; [#uses=2] - store x86_mmx %tmp10, x86_mmx* %A - %tmp15 = load x86_mmx* %B ; [#uses=1] - %tmp10a = bitcast x86_mmx %tmp10 to <2 x i32> - %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32> - %tmp16 = mul <2 x i32> %tmp10a, %tmp15a ; [#uses=2] - %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx - store x86_mmx %tmp16a, x86_mmx* %A - %tmp21 = load x86_mmx* %B ; [#uses=1] - %tmp22 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp16a, x86_mmx %tmp21 ) ; [#uses=2] - store x86_mmx %tmp22, x86_mmx* %A - %tmp27 = load x86_mmx* %B ; [#uses=1] - %tmp28 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp22, x86_mmx %tmp27 ) ; [#uses=2] - store x86_mmx %tmp28, x86_mmx* %A - %tmp33 = load x86_mmx* %B ; [#uses=1] - %tmp34 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp28, x86_mmx %tmp33 ) ; [#uses=2] - store x86_mmx %tmp34, x86_mmx* %A - tail call void @llvm.x86.mmx.emms( ) - ret void + %tmp1 = bitcast double %a to <4 x i16> + %tmp2 = bitcast double %b to <4 x i16> + %tmp3 = add <4 x i16> %tmp1, %tmp2 +; X64: paddw + store <4 x i16> %tmp3, <4 x i16>* null + ret void } -define void @bara(x86_mmx* %A, x86_mmx* %B) { +; X64-LABEL: ti32 +define void @ti32(double %a, double %b) nounwind { entry: - %tmp1 = load x86_mmx* %A ; [#uses=1] - %tmp3 = load x86_mmx* %B ; [#uses=1] - %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.w( x86_mmx %tmp1, x86_mmx %tmp3 ) ; [#uses=2] - store x86_mmx %tmp4, x86_mmx* %A - %tmp7 = load x86_mmx* %B ; [#uses=1] - %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4, x86_mmx %tmp7 ) ; [#uses=2] - store x86_mmx %tmp12, x86_mmx* %A - %tmp16 = load x86_mmx* %B ; [#uses=1] - %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; [#uses=2] - store x86_mmx %tmp21, x86_mmx* %A - %tmp27 = load x86_mmx* %B ; [#uses=1] - %tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.w( x86_mmx %tmp21, x86_mmx %tmp27 ) ; [#uses=2] - store x86_mmx %tmp28, x86_mmx* %A - %tmp31 = load x86_mmx* %B ; [#uses=1] - %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28, x86_mmx %tmp31 ) ; [#uses=2] - store x86_mmx %tmp36, x86_mmx* %A - %tmp40 = load x86_mmx* %B ; [#uses=1] - %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; [#uses=2] - store x86_mmx %tmp45, x86_mmx* %A - %tmp51 = load x86_mmx* %B ; [#uses=1] - %tmp52 = tail call x86_mmx @llvm.x86.mmx.pmull.w( x86_mmx %tmp45, x86_mmx %tmp51 ) ; [#uses=2] - store x86_mmx %tmp52, x86_mmx* %A - %tmp55 = load x86_mmx* %B ; [#uses=1] - %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52, x86_mmx %tmp55 ) ; [#uses=2] - store x86_mmx %tmp60, x86_mmx* %A - %tmp64 = load x86_mmx* %B ; [#uses=1] - %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; [#uses=1] - %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; [#uses=2] - store x86_mmx %tmp70, x86_mmx* %A - %tmp75 = load x86_mmx* %B ; [#uses=1] - %tmp76 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp70, x86_mmx %tmp75 ) ; [#uses=2] - store x86_mmx %tmp76, x86_mmx* %A - %tmp81 = load x86_mmx* %B ; [#uses=1] - %tmp82 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp76, x86_mmx %tmp81 ) ; [#uses=2] - store x86_mmx %tmp82, x86_mmx* %A - %tmp87 = load x86_mmx* %B ; [#uses=1] - %tmp88 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp82, x86_mmx %tmp87 ) ; [#uses=2] - store x86_mmx %tmp88, x86_mmx* %A - tail call void @llvm.x86.mmx.emms( ) - ret void + %tmp1 = bitcast double %a to <2 x i32> + %tmp2 = bitcast double %b to <2 x i32> + %tmp3 = add <2 x i32> %tmp1, %tmp2 +; X64: paddd + store <2 x i32> %tmp3, <2 x i32>* null + ret void } -declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx) +; X64-LABEL: ti64 +define void @ti64(double %a, double %b) nounwind { +entry: + %tmp1 = bitcast double %a to <1 x i64> + %tmp2 = bitcast double %b to <1 x i64> + %tmp3 = add <1 x i64> %tmp1, %tmp2 +; X64: addq + store <1 x i64> %tmp3, <1 x i64>* null + ret void +} -declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx) +; MMX intrinsics calls get us MMX instructions. +; X64-LABEL: ti8a +define void @ti8a(double %a, double %b) nounwind { +entry: + %tmp1 = bitcast double %a to x86_mmx +; X64: movdq2q + %tmp2 = bitcast double %b to x86_mmx +; X64: movdq2q + %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2) + store x86_mmx %tmp3, x86_mmx* null + ret void +} -declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx) +; X64-LABEL: ti16a +define void @ti16a(double %a, double %b) nounwind { +entry: + %tmp1 = bitcast double %a to x86_mmx +; X64: movdq2q + %tmp2 = bitcast double %b to x86_mmx +; X64: movdq2q + %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2) + store x86_mmx %tmp3, x86_mmx* null + ret void +} -declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx) +; X64-LABEL: ti32a +define void @ti32a(double %a, double %b) nounwind { +entry: + %tmp1 = bitcast double %a to x86_mmx +; X64: movdq2q + %tmp2 = bitcast double %b to x86_mmx +; X64: movdq2q + %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2) + store x86_mmx %tmp3, x86_mmx* null + ret void +} -declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx) +; X64-LABEL: ti64a +define void @ti64a(double %a, double %b) nounwind { +entry: + %tmp1 = bitcast double %a to x86_mmx +; X64: movdq2q + %tmp2 = bitcast double %b to x86_mmx +; X64: movdq2q + %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2) + store x86_mmx %tmp3, x86_mmx* null + ret void +} + +declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx) +declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx) declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx) declare void @llvm.x86.mmx.emms() -declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx) declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.padds.d(x86_mmx, x86_mmx) declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx) declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.psubs.d(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx) -declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx) diff --git a/test/CodeGen/X86/mmx-builtins.ll b/test/CodeGen/X86/mmx-intrinsics.ll similarity index 100% rename from test/CodeGen/X86/mmx-builtins.ll rename to test/CodeGen/X86/mmx-intrinsics.ll diff --git a/test/CodeGen/X86/mmx-punpckhdq.ll b/test/CodeGen/X86/mmx-punpckhdq.ll deleted file mode 100644 index 9e8f5bf5336..00000000000 --- a/test/CodeGen/X86/mmx-punpckhdq.ll +++ /dev/null @@ -1,31 +0,0 @@ -; RUN: llc < %s -march=x86 -mattr=+mmx,+sse4.2 -mtriple=x86_64-apple-darwin10 | FileCheck %s -; There are no MMX operations in bork; promoted to XMM. - -define void @bork(<1 x i64>* %x) { -; CHECK: bork -; CHECK: movlpd -entry: - %tmp2 = load <1 x i64>* %x ; <<1 x i64>> [#uses=1] - %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32> ; <<2 x i32>> [#uses=1] - %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ; <<2 x i32>> [#uses=1] - %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> ; <<1 x i64>> [#uses=1] - store <1 x i64> %tmp10, <1 x i64>* %x - tail call void @llvm.x86.mmx.emms( ) - ret void -} - -; pork uses MMX. - -define void @pork(x86_mmx* %x) { -; CHECK: pork -; CHECK: punpckhdq -entry: - %tmp2 = load x86_mmx* %x ; [#uses=1] - %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2) - store x86_mmx %tmp9, x86_mmx* %x - tail call void @llvm.x86.mmx.emms( ) - ret void -} - -declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx) -declare void @llvm.x86.mmx.emms() diff --git a/test/CodeGen/X86/mmx-shift.ll b/test/CodeGen/X86/mmx-shift.ll deleted file mode 100644 index c7c6e75a507..00000000000 --- a/test/CodeGen/X86/mmx-shift.ll +++ /dev/null @@ -1,39 +0,0 @@ -; RUN: llc < %s -march=x86 -mattr=+mmx | FileCheck %s -; RUN: llc < %s -march=x86-64 -mattr=+mmx | FileCheck %s - -define i64 @t1(<1 x i64> %mm1) nounwind { -entry: - %tmp = bitcast <1 x i64> %mm1 to x86_mmx - %tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 ) ; [#uses=1] - %retval1112 = bitcast x86_mmx %tmp6 to i64 - ret i64 %retval1112 - -; CHECK-LABEL: t1: -; CHECK: psllq $32 -} - -declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone - -define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind { -entry: - %tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone ; [#uses=1] - %retval1112 = bitcast x86_mmx %tmp7 to i64 - ret i64 %retval1112 - -; CHECK-LABEL: t2: -; CHECK: psrad -} - -declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone - -define i64 @t3(x86_mmx %mm1, i32 %bits) nounwind { -entry: - %tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone ; [#uses=1] - %retval1314 = bitcast x86_mmx %tmp8 to i64 - ret i64 %retval1314 - -; CHECK-LABEL: t3: -; CHECK: psrlw -} - -declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone diff --git a/test/CodeGen/X86/mmx-shuffle.ll b/test/CodeGen/X86/mmx-shuffle.ll deleted file mode 100644 index 869f32b89fb..00000000000 --- a/test/CodeGen/X86/mmx-shuffle.ll +++ /dev/null @@ -1,31 +0,0 @@ -; RUN: llc < %s -mcpu=yonah -; PR1427 - -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64" -target triple = "i686-pc-linux-gnu" - %struct.DrawHelper = type { void (i32, %struct.QT_FT_Span*, i8*)*, void (i32, %struct.QT_FT_Span*, i8*)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i32, i32)* } - %struct.QBasicAtomic = type { i32 } - %struct.QClipData = type { i32, %"struct.QClipData::ClipLine"*, i32, i32, %struct.QT_FT_Span*, i32, i32, i32, i32 } - %"struct.QClipData::ClipLine" = type { i32, %struct.QT_FT_Span* } - %struct.QRasterBuffer = type { %struct.QRect, %struct.QRegion, %struct.QClipData*, %struct.QClipData*, i8, i32, i32, %struct.DrawHelper*, i32, i32, i32, i8* } - %struct.QRect = type { i32, i32, i32, i32 } - %struct.QRegion = type { %"struct.QRegion::QRegionData"* } - %"struct.QRegion::QRegionData" = type { %struct.QBasicAtomic, %struct._XRegion*, i8*, %struct.QRegionPrivate* } - %struct.QRegionPrivate = type opaque - %struct.QT_FT_Span = type { i16, i16, i16, i8 } - %struct._XRegion = type opaque - -define void @_Z19qt_bitmapblit16_sseP13QRasterBufferiijPKhiii(%struct.QRasterBuffer* %rasterBuffer, i32 %x, i32 %y, i32 %color, i8* %src, i32 %width, i32 %height, i32 %stride) { -entry: - %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32> ; <<2 x i32>> [#uses=1] - %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>) ; <<2 x i32>> [#uses=1] - %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16> ; <<4 x i16>> [#uses=1] - %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 > ; <<4 x i16>> [#uses=1] - %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8> ; <<8 x i8>> [#uses=1] - %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx - %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx - tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null ) - ret void -} - -declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) diff --git a/test/CodeGen/X86/vector-shuffle-mmx.ll b/test/CodeGen/X86/vector-shuffle-mmx.ll new file mode 100644 index 00000000000..516a4c23eb5 --- /dev/null +++ b/test/CodeGen/X86/vector-shuffle-mmx.ll @@ -0,0 +1,40 @@ +; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s +; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s + +; If there is no explicit MMX type usage, always promote to XMM. + +define void @test0(<1 x i64>* %x) { +; X32-LABEL: test0 +; X64-LABEL: test0 +; X32: pshufd $213 +; X64: pshufd $213 +; X32-NEXT: movlpd %xmm +; X64-NEXT: movq %xmm +entry: + %tmp2 = load <1 x i64>* %x + %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32> + %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > + %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> + store <1 x i64> %tmp10, <1 x i64>* %x + ret void +} + +define void @test1() { +; X32-LABEL: test1: +; X32: pshuflw +; X32-NEXT: pshufhw +; X32-NEXT: pshufd +; X32: maskmovq +entry: + %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32> + %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>) + %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16> + %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 > + %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8> + %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx + %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx + tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null) + ret void +} + +declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) -- 2.40.0