break;
}
}
+#if CONFIG_VP9_HIGHBITDEPTH
+static void vp9_highbd_intra_dpcm_add(tran_low_t *dqcoeff, uint8_t *dst8,
+ int stride, PREDICTION_MODE mode,
+ int bs, int shift, int bd) {
+ int r, c, temp;
+ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
+
+ switch (mode) {
+ case H_PRED:
+ for (r = 0; r < bs; r++)
+ dst[r * stride] = clip_pixel_highbd(dst[r * stride] +
+ (dqcoeff[r * bs] >> shift), bd);
+ for (r = 0; r < bs; r++)
+ for (c = 1; c < bs; c++)
+ dst[r * stride + c] =
+ clip_pixel_highbd(dst[r * stride + c - 1] +
+ (dqcoeff[r * bs + c] >> shift), bd);
+ break;
+ case V_PRED:
+ for (c = 0; c < bs; c++)
+ dst[c] = clip_pixel_highbd(dst[c] + (dqcoeff[c] >> shift), bd);
+ for (r = 1; r < bs; r++)
+ for (c = 0; c < bs; c++)
+ dst[r * stride + c] =
+ clip_pixel_highbd(dst[(r - 1) * stride + c] +
+ (dqcoeff[r * bs + c] >> shift), bd);
+ break;
+ case TM_PRED:
+ for (c = 0; c < bs; c++)
+ dst[c] = clip_pixel_highbd(dst[c] + (dqcoeff[c] >> shift), bd);
+ for (r = 1; r < bs; r++)
+ dst[r * stride] = clip_pixel_highbd(dst[r * stride] +
+ (dqcoeff[r * bs] >> shift), bd);
+ for (r = 1; r < bs; r++)
+ for (c = 1; c < bs; c++) {
+ temp = dst[stride * r + c - 1] + dst[stride * (r - 1) + c] -
+ dst[stride * (r - 1) + c - 1];
+ temp = clip_pixel_highbd(temp, bd);
+ dst[stride * r + c] =
+ clip_pixel_highbd(temp + (dqcoeff[r * bs + c] >> shift), bd);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void vp9_highbd_intra_dpcm_add_nocoeff(uint8_t *dst8, int stride,
+ PREDICTION_MODE mode, int bs, int bd) {
+ int r, c, temp;
+ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
+
+ switch (mode) {
+ case H_PRED:
+ for (r = 0; r < bs; r++)
+ for (c = 1; c < bs; c++)
+ dst[r * stride + c] = dst[r * stride];
+ break;
+ case V_PRED:
+ for (r = 1; r < bs; r++)
+ vpx_memcpy(dst + r * stride, dst, bs * sizeof(dst[0]));
+ break;
+ case TM_PRED:
+ for (r = 1; r < bs; r++)
+ for (c = 1; c < bs; c++) {
+ temp = dst[stride * r + c - 1] + dst[stride * (r - 1) + c] -
+ dst[stride * (r - 1) + c - 1];
+ dst[stride * r + c] = clip_pixel_highbd(temp, bd);
+ }
+ break;
+ default:
+ break;
+ }
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_TX_SKIP
static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd);
} else {
const PLANE_TYPE plane_type = pd->plane_type;
- switch (tx_size) {
- case TX_4X4:
- tx_type = get_tx_type_4x4(plane_type, xd, block);
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 4, shift);
- } else {
- vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
- }
-#else
- vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
-#endif
- break;
- case TX_8X8:
- tx_type = get_tx_type(plane_type, xd);
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 8, shift);
- } else {
- vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
- }
-#else
- vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
-#endif
- break;
- case TX_16X16:
- tx_type = get_tx_type(plane_type, xd);
#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 16, shift);
- } else {
+ if (mbmi->tx_skip[plane != 0]) {
+ int bs = 4 << tx_size;
+ if (tx_size <= TX_32X32 &&
+ (mode == V_PRED || mode == H_PRED || mode == TM_PRED))
+ vp9_highbd_intra_dpcm_add(dqcoeff, dst, stride, mode, bs, shift,
+ xd->bd);
+ else
+ vp9_highbd_tx_identity_add(dqcoeff, dst, stride, bs, shift,
+ xd->bd);
+ tx_type = DCT_DCT;
+ if (tx_size == TX_4X4)
+ tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
+ else if (tx_size <= TX_16X16)
+ tx_type = get_tx_type(pd->plane_type, xd);
+ } else {
+#endif // CONFIG_TX_SKIP
+ switch (tx_size) {
+ case TX_4X4:
+ tx_type = get_tx_type_4x4(plane_type, xd, block);
+ vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob,
+ xd->bd);
+ break;
+ case TX_8X8:
+ tx_type = get_tx_type(plane_type, xd);
+ vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob,
+ xd->bd);
+ break;
+ case TX_16X16:
+ tx_type = get_tx_type(plane_type, xd);
vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob,
xd->bd);
- }
-#else
- vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
-#endif
- break;
- case TX_32X32:
- tx_type = DCT_DCT;
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 32, shift);
- } else {
+ break;
+ case TX_32X32:
+ tx_type = DCT_DCT;
vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
- }
-#else
- vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
-#endif
- break;
+ break;
#if CONFIG_TX64X64
- case TX_64X64:
- tx_type = DCT_DCT;
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 64, shift);
- } else {
+ case TX_64X64:
+ tx_type = DCT_DCT;
vp9_highbd_idct64x64_add(dqcoeff, dst, stride, eob, xd->bd);
- }
-#else
- vp9_highbd_idct64x64_add(dqcoeff, dst, stride, eob, xd->bd);
-#endif // CONFIG_TX_SKIP
- break;
+ break;
#endif // CONFIG_TX64X64
- default:
- assert(0 && "Invalid transform size");
+ default:
+ assert(0 && "Invalid transform size");
+ }
+#if CONFIG_TX_SKIP
}
+#endif // CONFIG_TX_SKIP
}
} else {
#if CONFIG_TX_SKIP
if (xd->lossless && !mbmi->tx_skip[plane != 0]) {
#else
- if (xd->lossless) {
-#endif
- tx_type = DCT_DCT;
- vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
- } else {
- const PLANE_TYPE plane_type = pd->plane_type;
- switch (tx_size) {
- case TX_4X4:
- tx_type = get_tx_type_4x4(plane_type, xd, block);
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 4, shift);
- } else {
- vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
- }
-#else
- vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
+ if (xd->lossless) {
#endif
- break;
- case TX_8X8:
- tx_type = get_tx_type(plane_type, xd);
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 8, shift);
- } else {
- vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
- }
-#else
- vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
-#endif
- break;
- case TX_16X16:
- tx_type = get_tx_type(plane_type, xd);
+ tx_type = DCT_DCT;
+ vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
+ } else {
+ const PLANE_TYPE plane_type = pd->plane_type;
#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 16, shift);
- } else {
- vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
- }
-#else
- vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
-#endif
- break;
- case TX_32X32:
+ if (mbmi->tx_skip[plane != 0]) {
+ int bs = 4 << tx_size;
+ if (tx_size <= TX_32X32 &&
+ (mode == H_PRED || mode == V_PRED || mode == TM_PRED))
+ vp9_intra_dpcm_add(dqcoeff, dst, stride, mode, bs, shift);
+ else
+ vp9_tx_identity_add(dqcoeff, dst, stride, bs, shift);
tx_type = DCT_DCT;
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 32, shift);
- } else {
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
- }
-#else
- vp9_idct32x32_add(dqcoeff, dst, stride, eob);
-#endif
- break;
+ if (tx_size == TX_4X4)
+ tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
+ else if (tx_size <= TX_16X16)
+ tx_type = get_tx_type(pd->plane_type, xd);
+ } else {
+#endif // CONFIG_TX_SKIP
+
+ switch (tx_size) {
+ case TX_4X4:
+ tx_type = get_tx_type_4x4(plane_type, xd, block);
+ vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_8X8:
+ tx_type = get_tx_type(plane_type, xd);
+ vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_16X16:
+ tx_type = get_tx_type(plane_type, xd);
+ vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
+ break;
+ case TX_32X32:
+ tx_type = DCT_DCT;
+ vp9_idct32x32_add(dqcoeff, dst, stride, eob);
+ break;
#if CONFIG_TX64X64
- case TX_64X64:
- tx_type = DCT_DCT;
-#if CONFIG_TX_SKIP
- if (mbmi->tx_skip[plane != 0]) {
- vp9_tx_identity_add(dqcoeff, dst, stride, 64, shift);
- } else {
- vp9_idct64x64_add(dqcoeff, dst, stride, eob);
+ case TX_64X64:
+ tx_type = DCT_DCT;
+ vp9_idct64x64_add(dqcoeff, dst, stride, eob);
+ break;
+#endif // CONFIG_TX64X64
+ default:
+ assert(0 && "Invalid transform size");
+ return;
}
-#else
- vp9_idct64x64_add(dqcoeff, dst, stride, eob);
+#if CONFIG_TX_SKIP
+ }
#endif // CONFIG_TX_SKIP
- break;
-#endif // CONFIG_TX64X64
- default:
- assert(0 && "Invalid transform size");
- return;
}
}
- }
#else // CONFIG_VP9_HIGHBITDEPTH
if ((mi->mbmi.skip || no_coeff) && mi->mbmi.tx_skip[plane != 0] &&
mode == TM_PRED && tx_size <= TX_32X32) {
int bs = 4 * (1 << tx_size);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+ vp9_highbd_intra_dpcm_add_nocoeff(dst, pd->dst.stride, mode, bs, xd->bd);
+ else
+ vp9_intra_dpcm_add_nocoeff(dst, pd->dst.stride, mode, bs);
+#else
vp9_intra_dpcm_add_nocoeff(dst, pd->dst.stride, mode, bs);
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
#endif
if ((mi->mbmi.skip || no_coeff) && mi->mbmi.tx_skip[plane != 0] &&
(mode == H_PRED || mode == V_PRED) && fbit && tx_size <= TX_32X32) {
int bs = 4 * (1 << tx_size);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+ vp9_highbd_intra_dpcm_add_nocoeff(dst, pd->dst.stride, mode, bs, xd->bd);
+ else
+ vp9_intra_dpcm_add_nocoeff(dst, pd->dst.stride, mode, bs);
+#else
vp9_intra_dpcm_add_nocoeff(dst, pd->dst.stride, mode, bs);
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
-#endif
+#endif // CONFIG_TX_SKIP && CONFIG_FILTERINTRA
}
struct inter_args {
#if CONFIG_TX_SKIP
if (mbmi->tx_skip[plane != 0]) {
int bs = 4 << tx_size;
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
+#endif // CONFIG_VP9_HIGHBITDEPTH
vp9_tx_identity(src_diff, coeff, diff_stride, bs, shift);
if (tx_size <= TX_16X16) {
- vp9_quantize_fp(coeff, bs * bs, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_fp(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_fp(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
} else if (tx_size == TX_32X32) {
- vp9_quantize_fp_32x32(coeff, bs * bs, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_fp_32x32(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_fp_32x32(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
#if CONFIG_TX64X64
else if (tx_size == TX_64X64) {
- vp9_quantize_fp_64x64(coeff, bs * bs, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_fp_64x64(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_fp_64x64(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
#endif // CONFIG_TX64X64
#if CONFIG_TX_SKIP
if (mbmi->tx_skip[plane != 0]) {
int bs = 4 << tx_size;
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
+#endif // CONFIG_VP9_HIGHBITDEPTH
vp9_tx_identity(src_diff, coeff, diff_stride, bs, shift);
if (tx_size <= TX_16X16) {
- vp9_quantize_dc(coeff, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_dc(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_dc(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
} else if (tx_size == TX_32X32) {
- vp9_quantize_dc_32x32(coeff, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_dc_32x32(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_dc_32x32(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
}
#if CONFIG_TX64X64
else if (tx_size == TX_64X64) {
- vp9_quantize_dc_64x64(coeff, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_dc_64x64(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_dc_64x64(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
}
#endif // CONFIG_TX64X64
#if CONFIG_TX_SKIP
if (mbmi->tx_skip[plane != 0]) {
int bs = 4 << tx_size;
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
+#endif // CONFIG_VP9_HIGHBITDEPTH
vp9_tx_identity(src_diff, coeff, diff_stride, bs, shift);
if (tx_size <= TX_16X16) {
- vp9_quantize_b(coeff, bs * bs, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_b(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_b(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
} else if (tx_size == TX_32X32) {
- vp9_quantize_b_32x32(coeff, bs * bs, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_b_32x32(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_b_32x32(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
#if CONFIG_TX64X64
else if (tx_size == TX_64X64) {
- vp9_quantize_b_64x64(coeff, bs * bs, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_b_64x64(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift, qcoeff,
+ dqcoeff, pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_quantize_b_64x64(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
#endif // CONFIG_TX64X64
#if CONFIG_TX_SKIP
if (mbmi->tx_skip[plane != 0]) {
- switch (tx_size) {
-#if CONFIG_TX64X64
- case TX_64X64:
- vp9_tx_identity_add(dqcoeff, dst, pd->dst.stride, 64, shift);
- break;
-#endif // CONFIG_TX64X64
- case TX_32X32:
- vp9_tx_identity_add(dqcoeff, dst, pd->dst.stride, 32, shift);
- break;
- case TX_16X16:
- vp9_tx_identity_add(dqcoeff, dst, pd->dst.stride, 16, shift);
- break;
- case TX_8X8:
- vp9_tx_identity_add(dqcoeff, dst, pd->dst.stride, 8, shift);
- break;
- case TX_4X4:
- vp9_tx_identity_add(dqcoeff, dst, pd->dst.stride, 4, shift);
- break;
- default:
- assert(0 && "Invalid transform size");
- break;
- }
+ int bs = 4 << tx_size;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+ vp9_highbd_tx_identity_add(dqcoeff, dst, pd->dst.stride, bs, shift,
+ xd->bd);
+ else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ vp9_tx_identity_add(dqcoeff, dst, pd->dst.stride, bs, shift);
+
return;
}
#endif // CONFIG_TX_SKIP
TX_SIZE tx_size, int shift, int logsizeby32) {
int i, j, eob, temp;
const int bs = 4 << tx_size;
-#if CONFIG_NEW_QUANT
- // const uint8_t* band = get_band_translate(tx_size);
-#endif
+
vpx_memset(qcoeff, 0, bs * bs * sizeof(*qcoeff));
vpx_memset(dqcoeff, 0, bs * bs * sizeof(*dqcoeff));
diff_stride, bs, shift);
vp9_quantize_rect(coeff + i, bs, 1, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff + i, dqcoeff + i,
- pd->dequant, logsizeby32, bs, i == 0);
+ pd->dequant, logsizeby32, bs, i == 0, 0);
vp9_tx_identity_add_rect(dqcoeff + i, dst + i, bs, 1,
bs, dst_stride, shift);
- if ( i < bs - 1 && 1)
+ if (i < bs - 1)
for (j = 0 ; j < bs; j++)
*(dst + j * dst_stride + i + 1) =
*(dst + j * dst_stride + i);
diff_stride, bs, shift);
vp9_quantize_rect(coeff + bs * i, 1, bs, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff + bs * i, dqcoeff + bs * i,
- pd->dequant, logsizeby32, bs, i == 0);
+ pd->dequant, logsizeby32, bs, i == 0, 0);
vp9_tx_identity_add_rect(dqcoeff + bs * i, dst + dst_stride * i,
1, bs, bs, dst_stride, shift);
if (i < bs - 1)
vp9_tx_identity_rect(src_diff, coeff, 1, bs, diff_stride, bs, shift);
vp9_quantize_rect(coeff, 1, bs, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff, pd->dequant,
- logsizeby32, bs, 1);
+ logsizeby32, bs, 1, 0);
vp9_tx_identity_add_rect(dqcoeff, dst, 1, bs, bs, dst_stride, shift);
vp9_subtract_block_c(bs -1, 1, src_diff + diff_stride, diff_stride,
diff_stride, bs, shift);
vp9_quantize_rect(coeff + bs, bs - 1, 1, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff + bs, dqcoeff + bs,
- pd->dequant, logsizeby32, bs, 0);
+ pd->dequant, logsizeby32, bs, 0, 0);
vp9_tx_identity_add_rect(dqcoeff + bs, dst + dst_stride, bs - 1, 1,
bs, dst_stride, shift);
vp9_quantize_rect(coeff + bs * i + j, 1, 1, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff + bs * i + j,
dqcoeff + bs * i + j, pd->dequant,
- logsizeby32, bs, 0);
+ logsizeby32, bs, 0, 0);
vp9_tx_identity_add_rect(dqcoeff + bs * i + j,
dst + dst_stride * i + j, 1, 1, bs,
dst_stride, shift);
eob = get_eob(qcoeff, bs * bs, scan_order->scan);
return eob;
}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static int vp9_highbd_dpcm_intra(uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ int16_t *src_diff, int diff_stride,
+ tran_low_t *coeff, tran_low_t *qcoeff,
+ tran_low_t *dqcoeff,
+ struct macroblock_plane *p,
+ struct macroblockd_plane *pd,
+ const scan_order *scan_order,
+ PREDICTION_MODE mode, TX_SIZE tx_size,
+ int shift, int logsizeby32, int bd) {
+ int i, j, eob, temp;
+ const int bs = 4 << tx_size;
+ uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
+
+ vpx_memset(qcoeff, 0, bs * bs * sizeof(*qcoeff));
+ vpx_memset(dqcoeff, 0, bs * bs * sizeof(*dqcoeff));
+
+ switch (mode) {
+ case H_PRED:
+ for (i = 0 ; i < bs; i++) {
+ vp9_highbd_subtract_block_c(bs, 1, src_diff + i, diff_stride,
+ src + i, src_stride, dst + i,
+ dst_stride, bd);
+ vp9_tx_identity_rect(src_diff + i, coeff + i, bs, 1,
+ diff_stride, bs, shift);
+ vp9_quantize_rect(coeff + i, bs, 1, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff + i, dqcoeff + i,
+ pd->dequant, logsizeby32, bs, i == 0, 1);
+ vp9_highbd_tx_identity_add_rect(dqcoeff + i, dst + i, bs, 1,
+ bs, dst_stride, shift, bd);
+ if (i < bs - 1)
+ for (j = 0 ; j < bs; j++)
+ *(dst16 + j * dst_stride + i + 1) =
+ *(dst16 + j * dst_stride + i);
+ }
+ break;
+ case V_PRED:
+ for (i = 0 ; i < bs; i++) {
+ vp9_highbd_subtract_block_c(1, bs, src_diff + diff_stride * i,
+ diff_stride, src + src_stride * i,
+ src_stride, dst + dst_stride * i,
+ dst_stride, bd);
+ vp9_tx_identity_rect(src_diff + diff_stride * i, coeff + bs * i, 1, bs,
+ diff_stride, bs, shift);
+ vp9_quantize_rect(coeff + bs * i, 1, bs, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff + bs * i, dqcoeff + bs * i,
+ pd->dequant, logsizeby32, bs, i == 0, 1);
+ vp9_highbd_tx_identity_add_rect(dqcoeff + bs * i, dst + dst_stride * i,
+ 1, bs, bs, dst_stride, shift, bd);
+ if (i < bs - 1)
+ vpx_memcpy(dst16 + (i + 1) * dst_stride,
+ dst16 + i * dst_stride, bs * sizeof(dst16[0]));
+ }
+ break;
+ case TM_PRED:
+ vp9_highbd_subtract_block_c(1, bs, src_diff, diff_stride, src, src_stride,
+ dst, dst_stride, bd);
+ vp9_tx_identity_rect(src_diff, coeff, 1, bs, diff_stride, bs, shift);
+ vp9_quantize_rect(coeff, 1, bs, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant,
+ logsizeby32, bs, 1, 1);
+ vp9_highbd_tx_identity_add_rect(dqcoeff, dst, 1, bs, bs, dst_stride,
+ shift, bd);
+ vp9_highbd_subtract_block_c(bs -1, 1, src_diff + diff_stride, diff_stride,
+ src + src_stride, src_stride,
+ dst + dst_stride, dst_stride, bd);
+ vp9_tx_identity_rect(src_diff + diff_stride, coeff + bs, bs - 1, 1,
+ diff_stride, bs, shift);
+ vp9_quantize_rect(coeff + bs, bs - 1, 1, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff + bs, dqcoeff + bs,
+ pd->dequant, logsizeby32, bs, 0, 1);
+ vp9_highbd_tx_identity_add_rect(dqcoeff + bs, dst + dst_stride, bs - 1, 1,
+ bs, dst_stride, shift, bd);
+
+ for (i = 1 ; i < bs; i++) {
+ for (j = 1 ; j < bs; j++) {
+ temp = dst16[(i - 1) * dst_stride + j] +
+ dst16[i * dst_stride + j - 1] -
+ dst16[(i - 1) * dst_stride + j - 1];
+ dst16[i * dst_stride + j] = clip_pixel_highbd(temp, bd);
+ vp9_highbd_subtract_block_c(1, 1, src_diff + diff_stride * i + j,
+ diff_stride, src + src_stride * i + j,
+ src_stride, dst + dst_stride * i + j,
+ dst_stride, bd);
+ vp9_tx_identity_rect(src_diff + i * diff_stride + j,
+ coeff + bs * i + j, 1, 1, diff_stride,
+ bs, shift);
+ vp9_quantize_rect(coeff + bs * i + j, 1, 1, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff + bs * i + j,
+ dqcoeff + bs * i + j, pd->dequant,
+ logsizeby32, bs, 0, 1);
+ vp9_highbd_tx_identity_add_rect(dqcoeff + bs * i + j,
+ dst + dst_stride * i + j, 1, 1, bs,
+ dst_stride, shift, bd);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ eob = get_eob(qcoeff, bs * bs, scan_order->scan);
+ return eob;
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_TX_SKIP
static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
if (mbmi->tx_skip[plane != 0]) {
int shift = mbmi->tx_skip_shift;
int bs = 4 << tx_size;
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
+#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_NEW_QUANT
band = vp9_coefband_tx_skip;
#endif // CONFIG_NEW_QUANT
dst, dst_stride, i, j, plane);
if (!x->skip_recode && tx_size <= TX_32X32 &&
- (mode == V_PRED || mode == H_PRED || mode == TM_PRED)) {
+ (mode == H_PRED || mode == V_PRED || mode == TM_PRED)) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ *eob = vp9_highbd_dpcm_intra(src, src_stride, dst, dst_stride,
+ src_diff, diff_stride,
+ coeff, qcoeff, dqcoeff, p, pd,
+ scan_order, mode, tx_size, shift,
+ tx_size > TX_16X16 ? 0 : -1, xd->bd);
+ else
+ *eob = vp9_dpcm_intra(src, src_stride, dst, dst_stride,
+ src_diff, diff_stride,
+ coeff, qcoeff, dqcoeff, p, pd,
+ scan_order, mode, tx_size, shift,
+ tx_size > TX_16X16 ? 0 : -1);
+#else
*eob = vp9_dpcm_intra(src, src_stride, dst, dst_stride,
src_diff, diff_stride,
coeff, qcoeff, dqcoeff, p, pd,
scan_order, mode, tx_size, shift,
tx_size > TX_16X16 ? 0 : -1);
+#endif // CONFIG_VP9_HIGHBITDEPTH
if (*eob)
*(args->skip) = 0;
return;
}
+
if (!x->skip_recode) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd) {
+ vp9_highbd_subtract_block(bs, bs, src_diff, diff_stride,
+ src, src_stride, dst, dst_stride, xd->bd);
+ vp9_tx_identity(src_diff, coeff, diff_stride, bs, shift);
+ } else {
+ vp9_subtract_block(bs, bs, src_diff, diff_stride,
+ src, src_stride, dst, dst_stride);
+ vp9_tx_identity(src_diff, coeff, diff_stride, bs, shift);
+ }
+#else
vp9_subtract_block(bs, bs, src_diff, diff_stride,
src, src_stride, dst, dst_stride);
vp9_tx_identity(src_diff, coeff, diff_stride, bs, shift);
+#endif // CONFIG_VP9_HIGHBITDEPTH
if (tx_size <= TX_16X16) {
#if CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd) {
+ if (x->quant_fp)
+ vp9_highbd_quantize_fp_nuq(coeff, bs * bs, x->skip_block,
+ p->quant_fp, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ else
+ vp9_highbd_quantize_nuq(coeff, bs * bs, x->skip_block,
+ p->quant, p->quant_shift, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ } else {
+ if (x->quant_fp)
+ vp9_quantize_fp_nuq(coeff, bs * bs, x->skip_block,
+ p->quant_fp, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ else
+ vp9_quantize_nuq(coeff, bs * bs, x->skip_block,
+ p->quant, p->quant_shift, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ }
+#else // CONFIG_VP9_HIGHBITDEPTH
if (x->quant_fp)
vp9_quantize_fp_nuq(coeff, bs * bs, x->skip_block,
p->quant_fp, pd->dequant,
pd->dequant_val_nuq,
qcoeff, dqcoeff, eob,
scan_order->scan, band);
-#else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#else // CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_b(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift, qcoeff,
+ dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+ vp9_quantize_b(coeff, bs * bs, x->skip_block, p->zbin, p->round,
+ p->quant, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
+#else // CONFIG_VP9_HIGHBITDEPTH
vp9_quantize_b(coeff, bs * bs, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_NEW_QUANT
} else if (tx_size == TX_32X32) {
#if CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd) {
+ if (x->quant_fp)
+ vp9_highbd_quantize_32x32_fp_nuq(coeff, bs * bs, x->skip_block,
+ p->quant_fp, pd->dequant,
+ (const cumbins_type_nuq *)p->
+ cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ else
+ vp9_highbd_quantize_32x32_nuq(coeff, bs * bs, x->skip_block,
+ p->quant, p->quant_shift, pd->dequant,
+ (const cumbins_type_nuq *)
+ p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ } else {
+ if (x->quant_fp)
+ vp9_quantize_32x32_fp_nuq(coeff, bs * bs, x->skip_block,
+ p->quant_fp, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ else
+ vp9_quantize_32x32_nuq(coeff, bs * bs, x->skip_block,
+ p->quant, p->quant_shift, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ }
+#else // CONFIG_VP9_HIGHBITDEPTH
if (x->quant_fp)
vp9_quantize_32x32_fp_nuq(coeff, bs * bs, x->skip_block,
p->quant_fp, pd->dequant,
pd->dequant_val_nuq,
qcoeff, dqcoeff, eob,
scan_order->scan, band);
-#else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#else // CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_b_32x32(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+ vp9_quantize_b_32x32(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift, qcoeff,
+ dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+#else // CONFIG_VP9_HIGHBITDEPTH
vp9_quantize_b_32x32(coeff, bs * bs, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift, qcoeff,
dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_NEW_QUANT
}
#if CONFIG_TX64X64
else if (tx_size == TX_64X64) {
#if CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd) {
+ if (x->quant_fp)
+ vp9_highbd_quantize_64x64_fp_nuq(coeff, bs * bs, x->skip_block,
+ p->quant_fp, pd->dequant,
+ (const cumbins_type_nuq *)
+ p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ else
+ vp9_highbd_quantize_64x64_nuq(coeff, bs * bs, x->skip_block,
+ p->quant, p->quant_shift, pd->dequant,
+ (const cumbins_type_nuq *)
+ p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ } else {
+ if (x->quant_fp)
+ vp9_quantize_64x64_fp_nuq(coeff, bs * bs, x->skip_block,
+ p->quant_fp, pd->dequant,
+ (const cumbins_type_nuq *)
+ p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ else
+ vp9_quantize_64x64_nuq(coeff, bs * bs, x->skip_block,
+ p->quant, p->quant_shift, pd->dequant,
+ (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const dequant_val_type_nuq *)
+ pd->dequant_val_nuq,
+ qcoeff, dqcoeff, eob,
+ scan_order->scan, band);
+ }
+#else
if (x->quant_fp)
vp9_quantize_64x64_fp_nuq(coeff, bs * bs, x->skip_block,
p->quant_fp, pd->dequant,
- (const cumbins_type_nuq *)p->cumbins_nuq,
+ (const cumbins_type_nuq *)
+ p->cumbins_nuq,
(const dequant_val_type_nuq *)
pd->dequant_val_nuq,
qcoeff, dqcoeff, eob,
pd->dequant_val_nuq,
qcoeff, dqcoeff, eob,
scan_order->scan, band);
-#else
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#else // CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_quantize_b_64x64(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ else
+ vp9_quantize_b_64x64(coeff, bs * bs, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift, qcoeff,
+ dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+#else // CONFIG_VP9_HIGHBITDEPTH
vp9_quantize_b_64x64(coeff, bs * bs, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift, qcoeff,
dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
+#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_NEW_QUANT
}
#endif // CONFIG_TX64X64
}
- if (!x->skip_encode && *eob)
+ if (!x->skip_encode && *eob) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (use_hbd)
+ vp9_highbd_tx_identity_add(dqcoeff, dst, dst_stride, 4 << tx_size,
+ shift, xd->bd);
+ else
+ vp9_tx_identity_add(dqcoeff, dst, dst_stride, 4 << tx_size, shift);
+#else
vp9_tx_identity_add(dqcoeff, dst, dst_stride, 4 << tx_size, shift);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
if (*eob)
*(args->skip) = 0;