From: Daniel Sanders Date: Wed, 30 Oct 2013 15:20:38 +0000 (+0000) Subject: [mips][msa] Added support for matching bmnz, bmnzi, bmz, and bmzi from normal IR... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=33d7fbc1008ada44845fc49b1bd704778c5fe1e4;p=clang [mips][msa] Added support for matching bmnz, bmnzi, bmz, and bmzi from normal IR (i.e. not intrinsics) Also corrected the definition of the intrinsics for these instructions (the result register is also the first operand), and added intrinsics for bsel and bseli to clang (they already existed in the backend). These four operations are mostly equivalent to bsel, and bseli (the difference is which operand is tied to the result). As a result some of the tests changed as described below. bitwise.ll: - bsel.v test adapted so that the mask is unknown at compile-time. This stops it emitting bmnzi.b instead of the intended bsel.v. - The bseli.b test now tests the right thing. Namely the case when one of the values is an uimm8, rather than when the condition is a uimm8 (which is covered by bmnzi.b) compare.ll: - bsel.v tests now (correctly) emits bmnz.v instead of bsel.v because this is the same operation (see MSA.txt). i8.ll - CHECK-DAG-ized test. - bmzi.b test now (correctly) emits equivalent bmnzi.b with swapped operands because this is the same operation (see MSA.txt). - bseli.b still emits bseli.b though because the immediate makes it distinguishable from bmnzi.b. vec.ll: - CHECK-DAG-ized test. - bmz.v tests now (correctly) emits bmnz.v with swapped operands (see MSA.txt). - bsel.v tests now (correctly) emits bmnz.v with swapped operands (see MSA.txt). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@193693 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/clang/Basic/BuiltinsMips.def b/include/clang/Basic/BuiltinsMips.def index 091f77f476..c03ed3a990 100644 --- a/include/clang/Basic/BuiltinsMips.def +++ b/include/clang/Basic/BuiltinsMips.def @@ -281,13 +281,13 @@ BUILTIN(__builtin_msa_binsri_h, "V8UsV8UsV8UsIUi", "nc") BUILTIN(__builtin_msa_binsri_w, "V4UiV4UiV4UiIUi", "nc") BUILTIN(__builtin_msa_binsri_d, "V2ULLiV2ULLiV2ULLiIUi", "nc") -BUILTIN(__builtin_msa_bmnz_v, "V16UcV16UcV16Uc", "nc") +BUILTIN(__builtin_msa_bmnz_v, "V16UcV16UcV16UcV16Uc", "nc") -BUILTIN(__builtin_msa_bmnzi_b, "V16UcV16UcIUi", "nc") +BUILTIN(__builtin_msa_bmnzi_b, "V16UcV16UcV16UcIUi", "nc") -BUILTIN(__builtin_msa_bmz_v, "V16UcV16UcV16Uc", "nc") +BUILTIN(__builtin_msa_bmz_v, "V16UcV16UcV16UcV16Uc", "nc") -BUILTIN(__builtin_msa_bmzi_b, "V16UcV16UcIUi", "nc") +BUILTIN(__builtin_msa_bmzi_b, "V16UcV16UcV16UcIUi", "nc") BUILTIN(__builtin_msa_bneg_b, "V16UcV16UcV16Uc", "nc") BUILTIN(__builtin_msa_bneg_h, "V8UsV8UsV8Us", "nc") @@ -306,6 +306,10 @@ BUILTIN(__builtin_msa_bnz_d, "iV2ULLi", "nc") BUILTIN(__builtin_msa_bnz_v, "iV16Uc", "nc") +BUILTIN(__builtin_msa_bsel_v, "V16UcV16UcV16UcV16Uc", "nc") + +BUILTIN(__builtin_msa_bseli_b, "V16UcV16UcV16UcIUi", "nc") + BUILTIN(__builtin_msa_bset_b, "V16UcV16UcV16Uc", "nc") BUILTIN(__builtin_msa_bset_h, "V8UsV8UsV8Us", "nc") BUILTIN(__builtin_msa_bset_w, "V4UiV4UiV4Ui", "nc") diff --git a/test/CodeGen/builtins-mips-msa.c b/test/CodeGen/builtins-mips-msa.c index 47e16b790a..da78a6df25 100644 --- a/test/CodeGen/builtins-mips-msa.c +++ b/test/CodeGen/builtins-mips-msa.c @@ -170,19 +170,19 @@ void test(void) { v4i32_r = __builtin_msa_binsri_w(v4i32_r, v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.binsri.w( v2i64_r = __builtin_msa_binsri_d(v2i64_r, v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.binsri.d( - v16i8_r = __builtin_msa_bmnz_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( - v8i16_r = __builtin_msa_bmnz_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( - v4i32_r = __builtin_msa_bmnz_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( - v2i64_r = __builtin_msa_bmnz_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( + v16i8_r = __builtin_msa_bmnz_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( + v8i16_r = __builtin_msa_bmnz_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( + v4i32_r = __builtin_msa_bmnz_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( + v2i64_r = __builtin_msa_bmnz_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v( - v16i8_r = __builtin_msa_bmnzi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmnzi.b( + v16i8_r = __builtin_msa_bmnzi_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmnzi.b( - v16i8_r = __builtin_msa_bmz_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( - v8i16_r = __builtin_msa_bmz_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( - v4i32_r = __builtin_msa_bmz_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( - v2i64_r = __builtin_msa_bmz_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( + v16i8_r = __builtin_msa_bmz_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( + v8i16_r = __builtin_msa_bmz_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( + v4i32_r = __builtin_msa_bmz_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( + v2i64_r = __builtin_msa_bmz_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v( - v16i8_r = __builtin_msa_bmzi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmzi.b( + v16i8_r = __builtin_msa_bmzi_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmzi.b( v16i8_r = __builtin_msa_bneg_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bneg.b( v8i16_r = __builtin_msa_bneg_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bneg.h( @@ -201,6 +201,13 @@ void test(void) { int_r = __builtin_msa_bnz_v(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.v( + v16i8_r = __builtin_msa_bsel_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v( + v8i16_r = __builtin_msa_bsel_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v( + v4i32_r = __builtin_msa_bsel_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v( + v2i64_r = __builtin_msa_bsel_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v( + + v16i8_r = __builtin_msa_bseli_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bseli.b( + v16i8_r = __builtin_msa_bset_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bset.b( v8i16_r = __builtin_msa_bset_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bset.h( v4i32_r = __builtin_msa_bset_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bset.w(