&vp9_idct16x16_1_add_c,
TX_16X16, 1),
make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_10_add_c,
- TX_8X8, 10),
+ &vp9_idct8x8_12_add_c,
+ TX_8X8, 12),
make_tuple(&vp9_idct8x8_64_add_c,
&vp9_idct8x8_1_add_c,
TX_8X8, 1),
&vp9_idct16x16_1_add_neon,
TX_16X16, 1),
make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_10_add_neon,
- TX_8X8, 10),
+ &vp9_idct8x8_12_add_neon,
+ TX_8X8, 12),
make_tuple(&vp9_idct8x8_64_add_c,
&vp9_idct8x8_1_add_neon,
TX_8X8, 1),
&vp9_idct16x16_1_add_sse2,
TX_16X16, 1),
make_tuple(&vp9_idct8x8_64_add_c,
- &vp9_idct8x8_10_add_sse2,
- TX_8X8, 10),
+ &vp9_idct8x8_12_add_sse2,
+ TX_8X8, 12),
make_tuple(&vp9_idct8x8_64_add_c,
&vp9_idct8x8_1_add_sse2,
TX_8X8, 1),
;
EXPORT |vp9_idct8x8_64_add_neon|
- EXPORT |vp9_idct8x8_10_add_neon|
+ EXPORT |vp9_idct8x8_12_add_neon|
ARM
REQUIRE8
PRESERVE8
bx lr
ENDP ; |vp9_idct8x8_64_add_neon|
-;void vp9_idct8x8_10_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
+;void vp9_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride)
;
; r0 int16_t input
; r1 uint8_t *dest
; r2 int dest_stride)
-|vp9_idct8x8_10_add_neon| PROC
+|vp9_idct8x8_12_add_neon| PROC
push {r4-r9}
vpush {d8-d15}
vld1.s16 {q8,q9}, [r0]!
vpop {d8-d15}
pop {r4-r9}
bx lr
- ENDP ; |vp9_idct8x8_10_add_neon|
+ ENDP ; |vp9_idct8x8_12_add_neon|
END
}
}
-void vp9_idct8x8_10_add_dspr2(const int16_t *input, uint8_t *dest,
+void vp9_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest,
int dest_stride) {
DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
int16_t *outptr = out;
}
}
-void vp9_idct8x8_10_add_c(const int16_t *input, uint8_t *dest, int stride) {
+void vp9_idct8x8_12_add_c(const int16_t *input, uint8_t *dest, int stride) {
int16_t out[8 * 8] = { 0 };
int16_t *outptr = out;
int i, j;
if (eob == 1)
// DC only DCT coefficient
vp9_idct8x8_1_add(input, dest, stride);
- else if (eob <= 10)
- vp9_idct8x8_10_add(input, dest, stride);
+ else if (eob <= 12)
+ vp9_idct8x8_12_add(input, dest, stride);
else
vp9_idct8x8_64_add(input, dest, stride);
}
add_proto qw/void vp9_idct8x8_64_add/, "const int16_t *input, uint8_t *dest, int dest_stride";
specialize qw/vp9_idct8x8_64_add sse2 neon dspr2/, "$ssse3_x86_64";
-add_proto qw/void vp9_idct8x8_10_add/, "const int16_t *input, uint8_t *dest, int dest_stride";
-specialize qw/vp9_idct8x8_10_add sse2 neon dspr2/, "$ssse3_x86_64";
+add_proto qw/void vp9_idct8x8_12_add/, "const int16_t *input, uint8_t *dest, int dest_stride";
+specialize qw/vp9_idct8x8_12_add sse2 neon dspr2/, "$ssse3_x86_64";
add_proto qw/void vp9_idct16x16_1_add/, "const int16_t *input, uint8_t *dest, int dest_stride";
specialize qw/vp9_idct16x16_1_add sse2 neon dspr2/;
RECON_AND_STORE(dest, in[7]);
}
-void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void vp9_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1<<4);
RET
; inverse 8x8 2D-DCT transform with only first 10 coeffs non-zero
-cglobal idct8x8_10_add, 3, 5, 13, input, output, stride
+cglobal idct8x8_12_add, 3, 5, 13, input, output, stride
mova m8, [pd_8192]
mova m11, [pw_16]
mova m12, [pw_11585x2]