From: Anton Yartsev Date: Thu, 14 Oct 2010 14:37:46 +0000 (+0000) Subject: support for AltiVec extensions from the Cell architecture X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=44270d6abff30415cdd873164823f48a45be7f8c;p=clang support for AltiVec extensions from the Cell architecture git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@116478 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h index 59d9d2d3fd..49e468ca65 100644 --- a/lib/Headers/altivec.h +++ b/lib/Headers/altivec.h @@ -8517,6 +8517,1408 @@ vec_vxor(vector float a, vector bool int b) } /* ------------------------ extensions for CBEA ----------------------------- */ + +/* vec_extract */ + +static signed char __ATTRS_o_ai +vec_extract(vector signed char a, int b) +{ + return a[b]; +} + +static unsigned char __ATTRS_o_ai +vec_extract(vector unsigned char a, int b) +{ + return a[b]; +} + +static short __ATTRS_o_ai +vec_extract(vector short a, int b) +{ + return a[b]; +} + +static unsigned short __ATTRS_o_ai +vec_extract(vector unsigned short a, int b) +{ + return a[b]; +} + +static int __ATTRS_o_ai +vec_extract(vector int a, int b) +{ + return a[b]; +} + +static unsigned int __ATTRS_o_ai +vec_extract(vector unsigned int a, int b) +{ + return a[b]; +} + +static float __ATTRS_o_ai +vec_extract(vector float a, int b) +{ + return a[b]; +} + +/* vec_insert */ + +static vector signed char __ATTRS_o_ai +vec_insert(signed char a, vector signed char b, int c) +{ + b[c] = a; + return b; +} + +static vector unsigned char __ATTRS_o_ai +vec_insert(unsigned char a, vector unsigned char b, int c) +{ + b[c] = a; + return b; +} + +static vector short __ATTRS_o_ai +vec_insert(short a, vector short b, int c) +{ + b[c] = a; + return b; +} + +static vector unsigned short __ATTRS_o_ai +vec_insert(unsigned short a, vector unsigned short b, int c) +{ + b[c] = a; + return b; +} + +static vector int __ATTRS_o_ai +vec_insert(int a, vector int b, int c) +{ + b[c] = a; + return b; +} + +static vector unsigned int __ATTRS_o_ai +vec_insert(unsigned int a, vector unsigned int b, int c) +{ + b[c] = a; + return b; +} + +static vector float __ATTRS_o_ai +vec_insert(float a, vector float b, int c) +{ + b[c] = a; + return b; +} + +/* vec_lvlx */ + +static vector signed char __ATTRS_o_ai +vec_lvlx(int a, signed char *b) +{ + return vec_perm(vec_ld(a, b), + (vector signed char)(0), + vec_lvsl(a, b)); +} + +static vector signed char __ATTRS_o_ai +vec_lvlx(int a, vector signed char *b) +{ + return vec_perm(vec_ld(a, b), + (vector signed char)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvlx(int a, unsigned char *b) +{ + return vec_perm(vec_ld(a, b), + (vector unsigned char)(0), + vec_lvsl(a, b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvlx(int a, vector unsigned char *b) +{ + return vec_perm(vec_ld(a, b), + (vector unsigned char)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool char __ATTRS_o_ai +vec_lvlx(int a, vector bool char *b) +{ + return vec_perm(vec_ld(a, b), + (vector bool char)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector short __ATTRS_o_ai +vec_lvlx(int a, short *b) +{ + return vec_perm(vec_ld(a, b), + (vector short)(0), + vec_lvsl(a, b)); +} + +static vector short __ATTRS_o_ai +vec_lvlx(int a, vector short *b) +{ + return vec_perm(vec_ld(a, b), + (vector short)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvlx(int a, unsigned short *b) +{ + return vec_perm(vec_ld(a, b), + (vector unsigned short)(0), + vec_lvsl(a, b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvlx(int a, vector unsigned short *b) +{ + return vec_perm(vec_ld(a, b), + (vector unsigned short)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool short __ATTRS_o_ai +vec_lvlx(int a, vector bool short *b) +{ + return vec_perm(vec_ld(a, b), + (vector bool short)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector pixel __ATTRS_o_ai +vec_lvlx(int a, vector pixel *b) +{ + return vec_perm(vec_ld(a, b), + (vector pixel)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector int __ATTRS_o_ai +vec_lvlx(int a, int *b) +{ + return vec_perm(vec_ld(a, b), + (vector int)(0), + vec_lvsl(a, b)); +} + +static vector int __ATTRS_o_ai +vec_lvlx(int a, vector int *b) +{ + return vec_perm(vec_ld(a, b), + (vector int)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvlx(int a, unsigned int *b) +{ + return vec_perm(vec_ld(a, b), + (vector unsigned int)(0), + vec_lvsl(a, b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvlx(int a, vector unsigned int *b) +{ + return vec_perm(vec_ld(a, b), + (vector unsigned int)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool int __ATTRS_o_ai +vec_lvlx(int a, vector bool int *b) +{ + return vec_perm(vec_ld(a, b), + (vector bool int)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector float __ATTRS_o_ai +vec_lvlx(int a, float *b) +{ + return vec_perm(vec_ld(a, b), + (vector float)(0), + vec_lvsl(a, b)); +} + +static vector float __ATTRS_o_ai +vec_lvlx(int a, vector float *b) +{ + return vec_perm(vec_ld(a, b), + (vector float)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +/* vec_lvlxl */ + +static vector signed char __ATTRS_o_ai +vec_lvlxl(int a, signed char *b) +{ + return vec_perm(vec_ldl(a, b), + (vector signed char)(0), + vec_lvsl(a, b)); +} + +static vector signed char __ATTRS_o_ai +vec_lvlxl(int a, vector signed char *b) +{ + return vec_perm(vec_ldl(a, b), + (vector signed char)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvlxl(int a, unsigned char *b) +{ + return vec_perm(vec_ldl(a, b), + (vector unsigned char)(0), + vec_lvsl(a, b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvlxl(int a, vector unsigned char *b) +{ + return vec_perm(vec_ldl(a, b), + (vector unsigned char)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool char __ATTRS_o_ai +vec_lvlxl(int a, vector bool char *b) +{ + return vec_perm(vec_ldl(a, b), + (vector bool char)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector short __ATTRS_o_ai +vec_lvlxl(int a, short *b) +{ + return vec_perm(vec_ldl(a, b), + (vector short)(0), + vec_lvsl(a, b)); +} + +static vector short __ATTRS_o_ai +vec_lvlxl(int a, vector short *b) +{ + return vec_perm(vec_ldl(a, b), + (vector short)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvlxl(int a, unsigned short *b) +{ + return vec_perm(vec_ldl(a, b), + (vector unsigned short)(0), + vec_lvsl(a, b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvlxl(int a, vector unsigned short *b) +{ + return vec_perm(vec_ldl(a, b), + (vector unsigned short)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool short __ATTRS_o_ai +vec_lvlxl(int a, vector bool short *b) +{ + return vec_perm(vec_ldl(a, b), + (vector bool short)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector pixel __ATTRS_o_ai +vec_lvlxl(int a, vector pixel *b) +{ + return vec_perm(vec_ldl(a, b), + (vector pixel)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector int __ATTRS_o_ai +vec_lvlxl(int a, int *b) +{ + return vec_perm(vec_ldl(a, b), + (vector int)(0), + vec_lvsl(a, b)); +} + +static vector int __ATTRS_o_ai +vec_lvlxl(int a, vector int *b) +{ + return vec_perm(vec_ldl(a, b), + (vector int)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvlxl(int a, unsigned int *b) +{ + return vec_perm(vec_ldl(a, b), + (vector unsigned int)(0), + vec_lvsl(a, b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvlxl(int a, vector unsigned int *b) +{ + return vec_perm(vec_ldl(a, b), + (vector unsigned int)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool int __ATTRS_o_ai +vec_lvlxl(int a, vector bool int *b) +{ + return vec_perm(vec_ldl(a, b), + (vector bool int)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector float __ATTRS_o_ai +vec_lvlxl(int a, float *b) +{ + return vec_perm(vec_ldl(a, b), + (vector float)(0), + vec_lvsl(a, b)); +} + +static vector float __ATTRS_o_ai +vec_lvlxl(int a, vector float *b) +{ + return vec_perm(vec_ldl(a, b), + (vector float)(0), + vec_lvsl(a, (unsigned char *)b)); +} + +/* vec_lvrx */ + +static vector signed char __ATTRS_o_ai +vec_lvrx(int a, signed char *b) +{ + return vec_perm((vector signed char)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector signed char __ATTRS_o_ai +vec_lvrx(int a, vector signed char *b) +{ + return vec_perm((vector signed char)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvrx(int a, unsigned char *b) +{ + return vec_perm((vector unsigned char)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvrx(int a, vector unsigned char *b) +{ + return vec_perm((vector unsigned char)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool char __ATTRS_o_ai +vec_lvrx(int a, vector bool char *b) +{ + return vec_perm((vector bool char)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector short __ATTRS_o_ai +vec_lvrx(int a, short *b) +{ + return vec_perm((vector short)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector short __ATTRS_o_ai +vec_lvrx(int a, vector short *b) +{ + return vec_perm((vector short)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvrx(int a, unsigned short *b) +{ + return vec_perm((vector unsigned short)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvrx(int a, vector unsigned short *b) +{ + return vec_perm((vector unsigned short)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool short __ATTRS_o_ai +vec_lvrx(int a, vector bool short *b) +{ + return vec_perm((vector bool short)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector pixel __ATTRS_o_ai +vec_lvrx(int a, vector pixel *b) +{ + return vec_perm((vector pixel)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector int __ATTRS_o_ai +vec_lvrx(int a, int *b) +{ + return vec_perm((vector int)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector int __ATTRS_o_ai +vec_lvrx(int a, vector int *b) +{ + return vec_perm((vector int)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvrx(int a, unsigned int *b) +{ + return vec_perm((vector unsigned int)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvrx(int a, vector unsigned int *b) +{ + return vec_perm((vector unsigned int)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool int __ATTRS_o_ai +vec_lvrx(int a, vector bool int *b) +{ + return vec_perm((vector bool int)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector float __ATTRS_o_ai +vec_lvrx(int a, float *b) +{ + return vec_perm((vector float)(0), + vec_ld(a, b), + vec_lvsl(a, b)); +} + +static vector float __ATTRS_o_ai +vec_lvrx(int a, vector float *b) +{ + return vec_perm((vector float)(0), + vec_ld(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +/* vec_lvrxl */ + +static vector signed char __ATTRS_o_ai +vec_lvrxl(int a, signed char *b) +{ + return vec_perm((vector signed char)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector signed char __ATTRS_o_ai +vec_lvrxl(int a, vector signed char *b) +{ + return vec_perm((vector signed char)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvrxl(int a, unsigned char *b) +{ + return vec_perm((vector unsigned char)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvrxl(int a, vector unsigned char *b) +{ + return vec_perm((vector unsigned char)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool char __ATTRS_o_ai +vec_lvrxl(int a, vector bool char *b) +{ + return vec_perm((vector bool char)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector short __ATTRS_o_ai +vec_lvrxl(int a, short *b) +{ + return vec_perm((vector short)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector short __ATTRS_o_ai +vec_lvrxl(int a, vector short *b) +{ + return vec_perm((vector short)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvrxl(int a, unsigned short *b) +{ + return vec_perm((vector unsigned short)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvrxl(int a, vector unsigned short *b) +{ + return vec_perm((vector unsigned short)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool short __ATTRS_o_ai +vec_lvrxl(int a, vector bool short *b) +{ + return vec_perm((vector bool short)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector pixel __ATTRS_o_ai +vec_lvrxl(int a, vector pixel *b) +{ + return vec_perm((vector pixel)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector int __ATTRS_o_ai +vec_lvrxl(int a, int *b) +{ + return vec_perm((vector int)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector int __ATTRS_o_ai +vec_lvrxl(int a, vector int *b) +{ + return vec_perm((vector int)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvrxl(int a, unsigned int *b) +{ + return vec_perm((vector unsigned int)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvrxl(int a, vector unsigned int *b) +{ + return vec_perm((vector unsigned int)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector bool int __ATTRS_o_ai +vec_lvrxl(int a, vector bool int *b) +{ + return vec_perm((vector bool int)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +static vector float __ATTRS_o_ai +vec_lvrxl(int a, float *b) +{ + return vec_perm((vector float)(0), + vec_ldl(a, b), + vec_lvsl(a, b)); +} + +static vector float __ATTRS_o_ai +vec_lvrxl(int a, vector float *b) +{ + return vec_perm((vector float)(0), + vec_ldl(a, b), + vec_lvsl(a, (unsigned char *)b)); +} + +/* vec_stvlx */ + +static void __ATTRS_o_ai +vec_stvlx(vector signed char a, int b, signed char *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector signed char a, int b, vector signed char *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector unsigned char a, int b, unsigned char *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector unsigned char a, int b, vector unsigned char *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector bool char a, int b, vector bool char *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector short a, int b, short *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector short a, int b, vector short *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector unsigned short a, int b, unsigned short *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector unsigned short a, int b, vector unsigned short *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector bool short a, int b, vector bool short *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector pixel a, int b, vector pixel *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector int a, int b, int *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector int a, int b, vector int *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector unsigned int a, int b, unsigned int *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector unsigned int a, int b, vector unsigned int *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector bool int a, int b, vector bool int *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlx(vector float a, int b, vector float *c) +{ + return vec_st(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +/* vec_stvlxl */ + +static void __ATTRS_o_ai +vec_stvlxl(vector signed char a, int b, signed char *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector signed char a, int b, vector signed char *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector unsigned char a, int b, unsigned char *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector unsigned char a, int b, vector unsigned char *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector bool char a, int b, vector bool char *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector short a, int b, short *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector short a, int b, vector short *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector unsigned short a, int b, unsigned short *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector unsigned short a, int b, vector unsigned short *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector bool short a, int b, vector bool short *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector pixel a, int b, vector pixel *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector int a, int b, int *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector int a, int b, vector int *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector unsigned int a, int b, unsigned int *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector unsigned int a, int b, vector unsigned int *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector bool int a, int b, vector bool int *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvlxl(vector float a, int b, vector float *c) +{ + return vec_stl(vec_perm(vec_lvrx(b, c), + a, + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +/* vec_stvrx */ + +static void __ATTRS_o_ai +vec_stvrx(vector signed char a, int b, signed char *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector signed char a, int b, vector signed char *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector unsigned char a, int b, unsigned char *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector unsigned char a, int b, vector unsigned char *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector bool char a, int b, vector bool char *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector short a, int b, short *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector short a, int b, vector short *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector unsigned short a, int b, unsigned short *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector unsigned short a, int b, vector unsigned short *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector bool short a, int b, vector bool short *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector pixel a, int b, vector pixel *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector int a, int b, int *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector int a, int b, vector int *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector unsigned int a, int b, unsigned int *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector unsigned int a, int b, vector unsigned int *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector bool int a, int b, vector bool int *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrx(vector float a, int b, vector float *c) +{ + return vec_st(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +/* vec_stvrxl */ + +static void __ATTRS_o_ai +vec_stvrxl(vector signed char a, int b, signed char *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector signed char a, int b, vector signed char *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector unsigned char a, int b, unsigned char *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector unsigned char a, int b, vector unsigned char *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector bool char a, int b, vector bool char *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector short a, int b, short *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector short a, int b, vector short *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector unsigned short a, int b, unsigned short *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector unsigned short a, int b, vector unsigned short *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector bool short a, int b, vector bool short *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector pixel a, int b, vector pixel *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector int a, int b, int *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector int a, int b, vector int *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector unsigned int a, int b, unsigned int *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector unsigned int a, int b, vector unsigned int *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector bool int a, int b, vector bool int *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +static void __ATTRS_o_ai +vec_stvrxl(vector float a, int b, vector float *c) +{ + return vec_stl(vec_perm(a, + vec_lvlx(b, c), + vec_lvsr(b, (unsigned char *)c)), + b, c); +} + +/* vec_promote */ + +static vector signed char __ATTRS_o_ai +vec_promote(signed char a, int b) +{ + vector signed char res = (vector signed char)(0); + res[b] = a; + return res; +} + +static vector unsigned char __ATTRS_o_ai +vec_promote(unsigned char a, int b) +{ + vector unsigned char res = (vector unsigned char)(0); + res[b] = a; + return res; +} + +static vector short __ATTRS_o_ai +vec_promote(short a, int b) +{ + vector short res = (vector short)(0); + res[b] = a; + return res; +} + +static vector unsigned short __ATTRS_o_ai +vec_promote(unsigned short a, int b) +{ + vector unsigned short res = (vector unsigned short)(0); + res[b] = a; + return res; +} + +static vector int __ATTRS_o_ai +vec_promote(int a, int b) +{ + vector int res = (vector int)(0); + res[b] = a; + return res; +} + +static vector unsigned int __ATTRS_o_ai +vec_promote(unsigned int a, int b) +{ + vector unsigned int res = (vector unsigned int)(0); + res[b] = a; + return res; +} + +static vector float __ATTRS_o_ai +vec_promote(float a, int b) +{ + vector float res = (vector float)(0); + res[b] = a; + return res; +} + +/* vec_splats */ + +static vector signed char __ATTRS_o_ai +vec_splats(signed char a) +{ + return (vector signed char)(a); +} + +static vector unsigned char __ATTRS_o_ai +vec_splats(unsigned char a) +{ + return (vector unsigned char)(a); +} + +static vector short __ATTRS_o_ai +vec_splats(short a) +{ + return (vector short)(a); +} + +static vector unsigned short __ATTRS_o_ai +vec_splats(unsigned short a) +{ + return (vector unsigned short)(a); +} + +static vector int __ATTRS_o_ai +vec_splats(int a) +{ + return (vector int)(a); +} + +static vector unsigned int __ATTRS_o_ai +vec_splats(unsigned int a) +{ + return (vector unsigned int)(a); +} + +static vector float __ATTRS_o_ai +vec_splats(float a) +{ + return (vector float)(a); +} + /* ----------------------------- predicates --------------------------------- */ /* vec_all_eq */ diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c index 8627499cdc..e4717d9da6 100644 --- a/test/CodeGen/builtins-ppc-altivec.c +++ b/test/CodeGen/builtins-ppc-altivec.c @@ -32,7 +32,13 @@ int param_i; unsigned int param_ui; float param_f; +int res_sc; +int res_uc; +int res_s; +int res_us; int res_i; +int res_ui; +int res_f; // CHECK: define void @test1 void test1() { @@ -1761,9 +1767,958 @@ void test6() { res_vf = vec_vxor(vbi, vf); // CHECK: xor <4 x i32> res_vf = vec_vxor(vf, vbi); // CHECK: xor <4 x i32> + /* ------------------------------ extensions -------------------------------------- */ + + /* vec_extract */ + res_sc = vec_extract(vsc, param_i); // CHECK: extractelement <16 x i8> + res_uc = vec_extract(vuc, param_i); // CHECK: extractelement <16 x i8> + res_s = vec_extract(vs, param_i); // CHECK: extractelement <8 x i16> + res_us = vec_extract(vus, param_i); // CHECK: extractelement <8 x i16> + res_i = vec_extract(vi, param_i); // CHECK: extractelement <4 x i32> + res_ui = vec_extract(vui, param_i); // CHECK: extractelement <4 x i32> + res_f = vec_extract(vf, param_i); // CHECK: extractelement <4 x float> + + /* vec_insert */ + res_vsc = vec_insert(param_sc, vsc, param_i); // CHECK: insertelement <16 x i8> + res_vuc = vec_insert(param_uc, vuc, param_i); // CHECK: insertelement <16 x i8> + res_vs = vec_insert(param_s, vs, param_i); // CHECK: insertelement <8 x i16> + res_vus = vec_insert(param_us, vus, param_i); // CHECK: insertelement <8 x i16> + res_vi = vec_insert(param_i, vi, param_i); // CHECK: insertelement <4 x i32> + res_vui = vec_insert(param_ui, vui, param_i); // CHECK: insertelement <4 x i32> + res_vf = vec_insert(param_f, vf, param_i); // CHECK: insertelement <4 x float> + + /* vec_lvlx */ + res_vsc = vec_lvlx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vsc = vec_lvlx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvlx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvlx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbc = vec_lvlx(0, &vbc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvlx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvlx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvlx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvlx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbs = vec_lvlx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vp = vec_lvlx(0, &vp); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvlx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvlx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvlx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvlx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbi = vec_lvlx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vf = vec_lvlx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + /* vec_lvlxl */ + res_vsc = vec_lvlxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vsc = vec_lvlxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvlxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvlxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbc = vec_lvlxl(0, &vbc); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvlxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvlxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvlxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvlxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbs = vec_lvlxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vp = vec_lvlxl(0, &vp); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvlxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvlxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvlxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvlxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbi = vec_lvlxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vf = vec_lvlxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + /* vec_lvrx */ + res_vsc = vec_lvrx(0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vsc = vec_lvrx(0, &vsc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvrx(0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvrx(0, &vuc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbc = vec_lvrx(0, &vbc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvrx(0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvrx(0, &vs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvrx(0, ¶m_us); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvrx(0, &vus); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbs = vec_lvrx(0, &vbs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vp = vec_lvrx(0, &vp); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvrx(0, ¶m_i); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvrx(0, &vi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvrx(0, ¶m_ui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvrx(0, &vui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbi = vec_lvrx(0, &vbi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vf = vec_lvrx(0, &vf); // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + /* vec_lvrxl */ + res_vsc = vec_lvrxl(0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vsc = vec_lvrxl(0, &vsc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvrxl(0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vuc = vec_lvrxl(0, &vuc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbc = vec_lvrxl(0, &vbc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvrxl(0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vs = vec_lvrxl(0, &vs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvrxl(0, ¶m_us); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vus = vec_lvrxl(0, &vus); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbs = vec_lvrxl(0, &vbs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vp = vec_lvrxl(0, &vp); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvrxl(0, ¶m_i); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vi = vec_lvrxl(0, &vi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvrxl(0, ¶m_ui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vui = vec_lvrxl(0, &vui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vbi = vec_lvrxl(0, &vbi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + res_vf = vec_lvrxl(0, &vf); // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvxl + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + + /* vec_stvlx */ + vec_stvlx(vsc, 0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vsc, 0, &vsc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vuc, 0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vuc, 0, &vuc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vbc, 0, &vbc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vs, 0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vs, 0, &vs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vus, 0, ¶m_us); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vus, 0, &vus); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vbs, 0, &vbs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vp, 0, &vp); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vi, 0, ¶m_i); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vi, 0, &vi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vui, 0, ¶m_ui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vui, 0, &vui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vbi, 0, &vbi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvlx(vf, 0, &vf); // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + /* vec_stvlxl */ + vec_stvlxl(vsc, 0, ¶m_sc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vsc, 0, &vsc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vuc, 0, ¶m_uc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vuc, 0, &vuc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vbc, 0, &vbc); // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vs, 0, ¶m_s); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vs, 0, &vs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vus, 0, ¶m_us); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vus, 0, &vus); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vbs, 0, &vbs); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vp, 0, &vp); // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vi, 0, ¶m_i); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vi, 0, &vi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vui, 0, ¶m_ui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vui, 0, &vui); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vbi, 0, &vbi); // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvlxl(vf, 0, &vf); // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvx + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + /* vec_stvrx */ + vec_stvrx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + vec_stvrx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvx + + /* vec_stvrxl */ + vec_stvrxl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vbc, 0, &vbc); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vp, 0, &vp); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + vec_stvrxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx + // CHECK: store <4 x float> zeroinitializer + // CHECK: @llvm.ppc.altivec.lvsl + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.lvsr + // CHECK: @llvm.ppc.altivec.vperm + // CHECK: @llvm.ppc.altivec.stvxl + + /* vec_promote */ + res_vsc = vec_promote(param_sc, 0); // CHECK: store <16 x i8> zeroinitializer + // CHECK: insertelement <16 x i8> + + res_vuc = vec_promote(param_uc, 0); // CHECK: store <16 x i8> zeroinitializer + // CHECK: insertelement <16 x i8> + + res_vs = vec_promote(param_s, 0); // CHECK: store <8 x i16> zeroinitializer + // CHECK: insertelement <8 x i16> + + res_vus = vec_promote(param_us, 0); // CHECK: store <8 x i16> zeroinitializer + // CHECK: insertelement <8 x i16> + + res_vi = vec_promote(param_i, 0); // CHECK: store <4 x i32> zeroinitializer + // CHECK: insertelement <4 x i32> + + res_vui = vec_promote(param_ui, 0); // CHECK: store <4 x i32> zeroinitializer + // CHECK: insertelement <4 x i32> + + res_vf = vec_promote(param_f, 0); // CHECK: store <4 x float> zeroinitializer + // CHECK: insertelement <4 x float> + + /* vec_splats */ + res_vsc = vec_splats(param_sc); // CHECK: insertelement <16 x i8> + + res_vuc = vec_splats(param_uc); // CHECK: insertelement <16 x i8> + + res_vs = vec_splats(param_s); // CHECK: insertelement <8 x i16> + + res_vus = vec_splats(param_us); // CHECK: insertelement <8 x i16> + + res_vi = vec_splats(param_i); // CHECK: insertelement <4 x i32> + + res_vui = vec_splats(param_ui); // CHECK: insertelement <4 x i32> + + res_vf = vec_splats(param_f); // CHECK: insertelement <4 x float> + /* ------------------------------ predicates -------------------------------------- */ - /* vec_all_eq */ + /* vec_all_eq */ res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p res_i = vec_all_eq(vsc, vbc); // CHECK: @llvm.ppc.altivec.vcmpequb.p res_i = vec_all_eq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb.p