From 232aa4a3a2f8976966ba6dfce434b8bfdcd8b72f Mon Sep 17 00:00:00 2001 From: Bill Schmidt Date: Wed, 12 Nov 2014 04:19:56 +0000 Subject: [PATCH] [PowerPC] Add vec_vsx_ld and vec_vsx_st intrinsics This patch enables the vec_vsx_ld and vec_vsx_st intrinsics for PowerPC, which provide programmer access to the lxvd2x, lxvw4x, stxvd2x, and stxvw4x instructions. New code in altivec.h defines these in terms of new builtins, which are themselves defined in BuiltinsPPC.def. The builtins are converted to LLVM intrinsics in CGBuiltin.cpp. Additional code is added to builtins-ppc-vsx.c to verify the correct generation of the intrinsics. Note that I moved the other VSX builtins so all VSX builtins will be alphabetical in their own section in BuiltinsPPC.def. There is a companion patch for LLVM. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@221768 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/clang/Basic/BuiltinsPPC.def | 22 ++++++-- lib/CodeGen/CGBuiltin.cpp | 16 ++++++ lib/Headers/altivec.h | 85 +++++++++++++++++++++++++++++ test/CodeGen/builtins-ppc-vsx.c | 48 ++++++++++++++++ 4 files changed, 165 insertions(+), 6 deletions(-) diff --git a/include/clang/Basic/BuiltinsPPC.def b/include/clang/Basic/BuiltinsPPC.def index 2d84276349..12bf58f74e 100644 --- a/include/clang/Basic/BuiltinsPPC.def +++ b/include/clang/Basic/BuiltinsPPC.def @@ -132,9 +132,6 @@ BUILTIN(__builtin_altivec_vmaxuh, "V8UsV8UsV8Us", "") BUILTIN(__builtin_altivec_vmaxsw, "V4SiV4SiV4Si", "") BUILTIN(__builtin_altivec_vmaxuw, "V4UiV4UiV4Ui", "") BUILTIN(__builtin_altivec_vmaxfp, "V4fV4fV4f", "") -BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "") -BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "") -BUILTIN(__builtin_vsx_xsmaxdp, "ddd", "") BUILTIN(__builtin_altivec_mfvscr, "V8Us", "") @@ -145,9 +142,6 @@ BUILTIN(__builtin_altivec_vminuh, "V8UsV8UsV8Us", "") BUILTIN(__builtin_altivec_vminsw, "V4SiV4SiV4Si", "") BUILTIN(__builtin_altivec_vminuw, "V4UiV4UiV4Ui", "") BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "") -BUILTIN(__builtin_vsx_xvmindp, "V2dV2dV2d", "") -BUILTIN(__builtin_vsx_xvminsp, "V4fV4fV4f", "") -BUILTIN(__builtin_vsx_xsmindp, "ddd", "") BUILTIN(__builtin_altivec_mtvscr, "vV4i", "") @@ -210,6 +204,22 @@ BUILTIN(__builtin_altivec_vcmpgtsw_p, "iiV4SiV4Si", "") BUILTIN(__builtin_altivec_vcmpgtuw_p, "iiV4UiV4Ui", "") BUILTIN(__builtin_altivec_vcmpgtfp_p, "iiV4fV4f", "") +// VSX built-ins. + +BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "") +BUILTIN(__builtin_vsx_lxvw4x, "V4iivC*", "") + +BUILTIN(__builtin_vsx_stxvd2x, "vV2div*", "") +BUILTIN(__builtin_vsx_stxvw4x, "vV4iiv*", "") + +BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "") +BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "") +BUILTIN(__builtin_vsx_xsmaxdp, "ddd", "") + +BUILTIN(__builtin_vsx_xvmindp, "V2dV2dV2d", "") +BUILTIN(__builtin_vsx_xvminsp, "V4fV4fV4f", "") +BUILTIN(__builtin_vsx_xsmindp, "ddd", "") + // FIXME: Obviously incomplete. #undef BUILTIN diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 9f6f9f3106..494e1efc5b 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -6035,6 +6035,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_altivec_lvewx: case PPC::BI__builtin_altivec_lvsl: case PPC::BI__builtin_altivec_lvsr: + case PPC::BI__builtin_vsx_lxvd2x: + case PPC::BI__builtin_vsx_lxvw4x: { Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); @@ -6064,6 +6066,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_altivec_lvsr: ID = Intrinsic::ppc_altivec_lvsr; break; + case PPC::BI__builtin_vsx_lxvd2x: + ID = Intrinsic::ppc_vsx_lxvd2x; + break; + case PPC::BI__builtin_vsx_lxvw4x: + ID = Intrinsic::ppc_vsx_lxvw4x; + break; } llvm::Function *F = CGM.getIntrinsic(ID); return Builder.CreateCall(F, Ops, ""); @@ -6075,6 +6083,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_altivec_stvebx: case PPC::BI__builtin_altivec_stvehx: case PPC::BI__builtin_altivec_stvewx: + case PPC::BI__builtin_vsx_stxvd2x: + case PPC::BI__builtin_vsx_stxvw4x: { Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); @@ -6097,6 +6107,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_altivec_stvewx: ID = Intrinsic::ppc_altivec_stvewx; break; + case PPC::BI__builtin_vsx_stxvd2x: + ID = Intrinsic::ppc_vsx_stxvd2x; + break; + case PPC::BI__builtin_vsx_stxvw4x: + ID = Intrinsic::ppc_vsx_stxvw4x; + break; } llvm::Function *F = CGM.getIntrinsic(ID); return Builder.CreateCall(F, Ops, ""); diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h index 1ffefa8585..6e33091db2 100644 --- a/lib/Headers/altivec.h +++ b/lib/Headers/altivec.h @@ -8875,6 +8875,91 @@ vec_vupklsh(vector pixel __a) #endif } +/* vec_vsx_ld */ + +#ifdef __VSX__ + +static vector signed int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed int *__b) +{ + return (vector signed int)__builtin_vsx_lxvw4x(__a, __b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned int *__b) +{ + return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b); +} + +static vector float __ATTRS_o_ai +vec_vsx_ld(int __a, const vector float *__b) +{ + return (vector float)__builtin_vsx_lxvw4x(__a, __b); +} + +static vector signed long long __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed long long *__b) +{ + return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b); +} + +static vector unsigned long long __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned long long *__b) +{ + return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b); +} + +static vector double __ATTRS_o_ai +vec_vsx_ld(int __a, const vector double *__b) +{ + return (vector double)__builtin_vsx_lxvd2x(__a, __b); +} + +#endif + +/* vec_vsx_st */ + +#ifdef __VSX__ + +static void __ATTRS_o_ai +vec_vsx_st(vector signed int __a, int __b, vector signed int *__c) +{ + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static void __ATTRS_o_ai +vec_vsx_st(vector unsigned int __a, int __b, vector unsigned int *__c) +{ + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static void __ATTRS_o_ai +vec_vsx_st(vector float __a, int __b, vector float *__c) +{ + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static void __ATTRS_o_ai +vec_vsx_st(vector signed long long __a, int __b, vector signed long long *__c) +{ + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static void __ATTRS_o_ai +vec_vsx_st(vector unsigned long long __a, int __b, + vector unsigned long long *__c) +{ + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static void __ATTRS_o_ai +vec_vsx_st(vector double __a, int __b, vector double *__c) +{ + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +#endif + /* vec_xor */ #define __builtin_altivec_vxor vec_xor diff --git a/test/CodeGen/builtins-ppc-vsx.c b/test/CodeGen/builtins-ppc-vsx.c index bae961456e..9bd0a5df6f 100644 --- a/test/CodeGen/builtins-ppc-vsx.c +++ b/test/CodeGen/builtins-ppc-vsx.c @@ -3,10 +3,18 @@ vector float vf = { -1.5, 2.5, -3.5, 4.5 }; vector double vd = { 3.5, -7.5 }; +vector signed int vsi = { -1, 2, -3, 4 }; +vector unsigned int vui = { 0, 1, 2, 3 }; +vector signed long long vsll = { 255LL, -937LL }; +vector unsigned long long vull = { 1447LL, 2894LL }; double d = 23.4; vector float res_vf; vector double res_vd; +vector signed int res_vsi; +vector unsigned int res_vui; +vector signed long long res_vsll; +vector unsigned long long res_vull; double res_d; void test1() { @@ -37,4 +45,44 @@ void test1() { res_d = __builtin_vsx_xsmindp(d, d); // CHECK: @llvm.ppc.vsx.xsmindp + + /* vec_vsx_ld */ + + res_vsi = vec_vsx_ld(0, &vsi); +// CHECK: @llvm.ppc.vsx.lxvw4x + + res_vui = vec_vsx_ld(0, &vui); +// CHECK: @llvm.ppc.vsx.lxvw4x + + res_vf = vec_vsx_ld (0, &vf); +// CHECK: @llvm.ppc.vsx.lxvw4x + + res_vsll = vec_vsx_ld(0, &vsll); +// CHECK: @llvm.ppc.vsx.lxvd2x + + res_vull = vec_vsx_ld(0, &vull); +// CHECK: @llvm.ppc.vsx.lxvd2x + + res_vd = vec_vsx_ld(0, &vd); +// CHECK: @llvm.ppc.vsx.lxvd2x + + /* vec_vsx_st */ + + vec_vsx_st(vsi, 0, &res_vsi); +// CHECK: @llvm.ppc.vsx.stxvw4x + + vec_vsx_st(vui, 0, &res_vui); +// CHECK: @llvm.ppc.vsx.stxvw4x + + vec_vsx_st(vf, 0, &res_vf); +// CHECK: @llvm.ppc.vsx.stxvw4x + + vec_vsx_st(vsll, 0, &res_vsll); +// CHECK: @llvm.ppc.vsx.stxvd2x + + vec_vsx_st(vull, 0, &res_vull); +// CHECK: @llvm.ppc.vsx.stxvd2x + + vec_vsx_st(vd, 0, &res_vd); +// CHECK: @llvm.ppc.vsx.stxvd2x } -- 2.40.0