vec_st( tmp0v, 0x00, level );
vec_st( tmp1v, 0x10, level );
}
+
+void x264_zigzag_scan_8x8_frame_altivec( int16_t level[64], int16_t dct[64] )
+{
+ vec_s16_t tmpv[6];
+ vec_s16_t dct0v = vec_ld( 0*16, dct );
+ vec_s16_t dct1v = vec_ld( 1*16, dct );
+ vec_s16_t dct2v = vec_ld( 2*16, dct );
+ vec_s16_t dct3v = vec_ld( 3*16, dct );
+ vec_s16_t dct4v = vec_ld( 4*16, dct );
+ vec_s16_t dct5v = vec_ld( 5*16, dct );
+ vec_s16_t dct6v = vec_ld( 6*16, dct );
+ vec_s16_t dct7v = vec_ld( 7*16, dct );
+
+ const vec_u8_t mask1[14] = {
+ { 0x00, 0x01, 0x02, 0x03, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B, 0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D },
+ { 0x0A, 0x0B, 0x0C, 0x0D, 0x00, 0x00, 0x0E, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13 },
+ { 0x00, 0x01, 0x02, 0x03, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
+ { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
+ { 0x00, 0x00, 0x14, 0x15, 0x18, 0x19, 0x02, 0x03, 0x04, 0x05, 0x08, 0x09, 0x06, 0x07, 0x12, 0x13 },
+ { 0x12, 0x13, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
+ { 0x1A, 0x1B, 0x10, 0x11, 0x08, 0x09, 0x04, 0x05, 0x02, 0x03, 0x0C, 0x0D, 0x14, 0x15, 0x18, 0x19 },
+ { 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B },
+ { 0x00, 0x01, 0x02, 0x03, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x06, 0x07, 0x04, 0x05, 0x08, 0x09 },
+ { 0x00, 0x11, 0x16, 0x17, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x1A, 0x1B },
+ { 0x02, 0x03, 0x18, 0x19, 0x16, 0x17, 0x1A, 0x1B, 0x1C, 0x1D, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09 },
+ { 0x08, 0x09, 0x0A, 0x0B, 0x06, 0x07, 0x0E, 0x0F, 0x10, 0x11, 0x00, 0x00, 0x12, 0x13, 0x14, 0x15 },
+ { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
+ { 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x08, 0x09, 0x06, 0x07, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
+ };
+
+ tmpv[0] = vec_mergeh( dct0v, dct1v );
+ tmpv[1] = vec_mergeh( dct2v, dct3v );
+ tmpv[2] = (vec_s16_t)vec_mergeh( (vec_s32_t)tmpv[0], (vec_s32_t)tmpv[1] );
+ tmpv[3] = vec_perm( tmpv[2], dct0v, mask1[0] );
+ vec_st( tmpv[3], 0*16, level );
+
+ tmpv[4] = vec_mergeh( dct4v, dct5v );
+ tmpv[3] = vec_perm( tmpv[0], tmpv[4], mask1[1] );
+ tmpv[3] = vec_perm( tmpv[3], dct0v, mask1[2] );
+ tmpv[3] = vec_perm( tmpv[3], tmpv[1], mask1[3] );
+ vec_st( tmpv[3], 1*16, level );
+
+ tmpv[3] = vec_mergel( dct0v, dct1v );
+ tmpv[1] = vec_mergel( tmpv[1], dct2v );
+ tmpv[5] = vec_perm( tmpv[3], tmpv[1], mask1[4] );
+ tmpv[5] = vec_perm( tmpv[5], dct4v, mask1[5] );
+ vec_st( tmpv[5], 2*16, level );
+
+ tmpv[2] = vec_mergeh( dct5v, dct6v );
+ tmpv[5] = vec_mergeh( tmpv[2], dct7v );
+ tmpv[4] = vec_mergel( tmpv[4], tmpv[1] );
+ tmpv[0] = vec_perm( tmpv[5], tmpv[4], mask1[6] );
+ vec_st( tmpv[0], 3*16, level );
+
+ tmpv[1] = vec_mergel( dct2v, dct3v );
+ tmpv[0] = vec_mergel( dct4v, dct5v );
+ tmpv[4] = vec_perm( tmpv[1], tmpv[0], mask1[7] );
+ tmpv[3] = vec_perm( tmpv[4], tmpv[3], mask1[8] );
+ vec_st( tmpv[3], 4*16, level );
+
+ tmpv[3] = vec_mergeh( dct6v, dct7v );
+ tmpv[2] = vec_mergel( dct3v, dct4v );
+ tmpv[2] = vec_perm( tmpv[2], dct5v, mask1[9] );
+ tmpv[3] = vec_perm( tmpv[2], tmpv[3], mask1[10] );
+ vec_st( tmpv[3], 5*16, level );
+
+ tmpv[1] = vec_mergel( tmpv[1], tmpv[2] );
+ tmpv[2] = vec_mergel( dct6v, dct7v );
+ tmpv[1] = vec_perm( tmpv[1], tmpv[2], mask1[11] );
+ tmpv[1] = vec_perm( tmpv[1], dct7v, mask1[12] );
+ vec_st( tmpv[1], 6*16, level );
+
+ tmpv[2] = vec_perm( tmpv[2], tmpv[0], mask1[13] );
+ vec_st( tmpv[2], 7*16, level );
+
+}
#endif // !HIGH_BIT_DEPTH