#define VP9_COMMON_VP9_BLOCKD_H_
#include "./vpx_config.h"
+
+#include "vpx_ports/mem.h"
#include "vpx_scale/yv12config.h"
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_common_data.h"
#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/vp9_enums.h"
#include "vp9/common/vp9_mv.h"
+#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_treecoder.h"
-#include "vpx_ports/mem.h"
-#include "vp9/common/vp9_common.h"
-#include "vp9/common/vp9_enums.h"
-#include "vp9/common/vp9_common_data.h"
#define BLOCK_SIZE_GROUPS 4
-#define MAX_MB_SEGMENTS 8
-#define MB_SEG_TREE_PROBS (MAX_MB_SEGMENTS-1)
#define PREDICTION_PROBS 3
#define MAX_MODE_LF_DELTAS 2
/* Segment Feature Masks */
-#define SEGMENT_DELTADATA 0
-#define SEGMENT_ABSDATA 1
#define MAX_MV_REF_CANDIDATES 2
#define INTRA_INTER_CONTEXTS 4
return mode >= NEARESTMV && mode <= NEWMV;
}
-// Segment level features.
-typedef enum {
- SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
- SEG_LVL_ALT_LF = 1, // Use alternate loop filter value...
- SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
- SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode
- SEG_LVL_MAX = 4 // Number of MB level features supported
-} SEG_LVL_FEATURES;
-
// Segment level features.
typedef enum {
TX_4X4 = 0, // 4x4 dct transform
int left_available;
int right_available;
+ struct segmentation seg;
+
// partition contexts
PARTITION_CONTEXT *above_seg_context;
PARTITION_CONTEXT *left_seg_context;
- /* 0 (disable) 1 (enable) segmentation */
- unsigned char segmentation_enabled;
-
- /* 0 (do not update) 1 (update) the macroblock segmentation map. */
- unsigned char update_mb_segmentation_map;
-
- /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
- unsigned char update_mb_segmentation_data;
-
- /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
- unsigned char mb_segment_abs_delta;
-
- /* Per frame flags that define which MB level features (such as quantizer or loop filter level) */
- /* are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO */
-
- // Probability Tree used to code Segment number
- vp9_prob mb_segment_tree_probs[MB_SEG_TREE_PROBS];
-
- // Segment features
- int16_t segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
- unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
-
/* mode_based Loop filter adjustment */
unsigned char mode_ref_lf_delta_enabled;
unsigned char mode_ref_lf_delta_update;
// Reset the segment feature data to the default stats:
// Features disabled, 0, with delta coding (Default state).
int i;
- vp9_clearall_segfeatures(xd);
- xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+ vp9_clearall_segfeatures(&xd->seg);
+ xd->seg.abs_delta = SEGMENT_DELTADATA;
if (cm->last_frame_seg_map)
vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
int lvl_seg = default_filt_lvl, ref, mode, intra_lvl;
// Set the baseline filter values for each segment
- if (vp9_segfeature_active(xd, seg, SEG_LVL_ALT_LF)) {
- const int data = vp9_get_segdata(xd, seg, SEG_LVL_ALT_LF);
- lvl_seg = xd->mb_segment_abs_delta == SEGMENT_ABSDATA
+ if (vp9_segfeature_active(&xd->seg, seg, SEG_LVL_ALT_LF)) {
+ const int data = vp9_get_segdata(&xd->seg, seg, SEG_LVL_ALT_LF);
+ lvl_seg = xd->seg.abs_delta == SEGMENT_ABSDATA
? data
: clamp(default_filt_lvl + data, 0, MAX_LOOP_FILTER);
}
#include "vpx_ports/mem.h"
#include "vpx_config.h"
+
#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_seg_common.h"
#define MAX_LOOP_FILTER 63
#define MAX_SHARPNESS 7
[VP9_INTRA_MODES - 1];
vp9_prob kf_uv_mode_prob[VP9_INTRA_MODES] [VP9_INTRA_MODES - 1];
- // Context probabilities when using predictive coding of segment id
- vp9_prob segment_pred_probs[PREDICTION_PROBS];
- unsigned char temporal_update;
-
// Context probabilities for reference frame prediction
int allow_comp_inter_inter;
MV_REFERENCE_FRAME comp_fixed_ref;
static INLINE vp9_prob vp9_get_pred_prob_seg_id(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
const int pred_context = vp9_get_pred_context_seg_id(cm, xd);
- return cm->segment_pred_probs[pred_context];
+ return xd->seg.pred_probs[pred_context];
}
static INLINE unsigned char vp9_get_pred_flag_seg_id(
const MACROBLOCKD * const xd) {
int vp9_get_qindex(MACROBLOCKD *xd, int segment_id, int base_qindex) {
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_ALT_Q)) {
- const int data = vp9_get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
- return xd->mb_segment_abs_delta == SEGMENT_ABSDATA ?
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_ALT_Q)) {
+ const int data = vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_ALT_Q);
+ return xd->seg.abs_delta == SEGMENT_ABSDATA ?
data : // Abs value
clamp(base_qindex + data, 0, MAXQ); // Delta value
} else {
*/
#include <assert.h>
+
#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_loopfilter.h"
#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_quant_common.h"
static const int seg_feature_data_signed[SEG_LVL_MAX] = { 1, 1, 0, 0 };
// the coding mechanism is still subject to change so these provide a
// convenient single point of change.
-int vp9_segfeature_active(const MACROBLOCKD *xd, int segment_id,
+int vp9_segfeature_active(const struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
- return xd->segmentation_enabled &&
- (xd->segment_feature_mask[segment_id] & (1 << feature_id));
+ return seg->enabled &&
+ (seg->feature_mask[segment_id] & (1 << feature_id));
}
-void vp9_clearall_segfeatures(MACROBLOCKD *xd) {
- vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
- vpx_memset(xd->segment_feature_mask, 0, sizeof(xd->segment_feature_mask));
+void vp9_clearall_segfeatures(struct segmentation *seg) {
+ vpx_memset(seg->feature_data, 0, sizeof(seg->feature_data));
+ vpx_memset(seg->feature_mask, 0, sizeof(seg->feature_mask));
}
-void vp9_enable_segfeature(MACROBLOCKD *xd, int segment_id,
+void vp9_enable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
- xd->segment_feature_mask[segment_id] |= 1 << feature_id;
+ seg->feature_mask[segment_id] |= 1 << feature_id;
}
-void vp9_disable_segfeature(MACROBLOCKD *xd, int segment_id,
+void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
- xd->segment_feature_mask[segment_id] &= ~(1 << feature_id);
+ seg->feature_mask[segment_id] &= ~(1 << feature_id);
}
int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
return seg_feature_data_signed[feature_id];
}
-void vp9_clear_segdata(MACROBLOCKD *xd, int segment_id,
+void vp9_clear_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
- xd->segment_feature_data[segment_id][feature_id] = 0;
+ seg->feature_data[segment_id][feature_id] = 0;
}
-void vp9_set_segdata(MACROBLOCKD *xd, int segment_id,
+void vp9_set_segdata(struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id, int seg_data) {
assert(seg_data <= seg_feature_data_max[feature_id]);
if (seg_data < 0) {
assert(-seg_data <= seg_feature_data_max[feature_id]);
}
- xd->segment_feature_data[segment_id][feature_id] = seg_data;
+ seg->feature_data[segment_id][feature_id] = seg_data;
}
-int vp9_get_segdata(const MACROBLOCKD *xd, int segment_id,
+int vp9_get_segdata(const struct segmentation *seg, int segment_id,
SEG_LVL_FEATURES feature_id) {
- return xd->segment_feature_data[segment_id][feature_id];
+ return seg->feature_data[segment_id][feature_id];
}
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "vp9/common/vp9_onyxc_int.h"
-#include "vp9/common/vp9_blockd.h"
-
#ifndef VP9_COMMON_VP9_SEG_COMMON_H_
#define VP9_COMMON_VP9_SEG_COMMON_H_
-int vp9_segfeature_active(const MACROBLOCKD *xd,
+#include "vp9/common/vp9_treecoder.h"
+
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
+
+#define MAX_MB_SEGMENTS 8
+#define MB_SEG_TREE_PROBS (MAX_MB_SEGMENTS-1)
+
+#define PREDICTION_PROBS 3
+
+// Segment level features.
+typedef enum {
+ SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
+ SEG_LVL_ALT_LF = 1, // Use alternate loop filter value...
+ SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
+ SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode
+ SEG_LVL_MAX = 4 // Number of MB level features supported
+} SEG_LVL_FEATURES;
+
+
+struct segmentation {
+ uint8_t enabled;
+ uint8_t update_map;
+ uint8_t update_data;
+ uint8_t abs_delta;
+ uint8_t temporal_update;
+
+ vp9_prob tree_probs[MB_SEG_TREE_PROBS];
+ vp9_prob pred_probs[PREDICTION_PROBS];
+
+ int16_t feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
+ unsigned int feature_mask[MAX_MB_SEGMENTS];
+};
+
+int vp9_segfeature_active(const struct segmentation *seg,
int segment_id,
SEG_LVL_FEATURES feature_id);
-void vp9_clearall_segfeatures(MACROBLOCKD *xd);
+void vp9_clearall_segfeatures(struct segmentation *seg);
-void vp9_enable_segfeature(MACROBLOCKD *xd,
+void vp9_enable_segfeature(struct segmentation *seg,
int segment_id,
SEG_LVL_FEATURES feature_id);
-void vp9_disable_segfeature(MACROBLOCKD *xd,
+void vp9_disable_segfeature(struct segmentation *seg,
int segment_id,
SEG_LVL_FEATURES feature_id);
int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
-void vp9_clear_segdata(MACROBLOCKD *xd,
+void vp9_clear_segdata(struct segmentation *seg,
int segment_id,
SEG_LVL_FEATURES feature_id);
-void vp9_set_segdata(MACROBLOCKD *xd,
+void vp9_set_segdata(struct segmentation *seg,
int segment_id,
SEG_LVL_FEATURES feature_id,
int seg_data);
-int vp9_get_segdata(const MACROBLOCKD *xd,
+int vp9_get_segdata(const struct segmentation *seg,
int segment_id,
SEG_LVL_FEATURES feature_id);
return (MB_PREDICTION_MODE)treed_read(r, vp9_sb_mv_ref_tree, p);
}
-static int read_segment_id(vp9_reader *r, MACROBLOCKD *xd) {
- return treed_read(r, vp9_segment_tree, xd->mb_segment_tree_probs);
+static int read_segment_id(vp9_reader *r, const struct segmentation *seg) {
+ return treed_read(r, vp9_segment_tree, seg->tree_probs);
}
static TX_SIZE read_selected_txfm_size(VP9_COMMON *cm, MACROBLOCKD *xd,
static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_reader *r) {
- VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
+ struct segmentation *const seg = &xd->seg;
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
- if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
- const int segment_id = read_segment_id(r, xd);
- set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
+ if (seg->enabled && seg->update_map) {
+ const int segment_id = read_segment_id(r, seg);
+ set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id);
return segment_id;
} else {
return 0;
static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- int skip_coeff = vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+ int skip_coeff = vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP);
if (!skip_coeff) {
const uint8_t ctx = vp9_get_pred_context_mbskip(cm, xd);
skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd));
MACROBLOCKD *const xd = &pbi->mb;
FRAME_CONTEXT *const fc = &cm->fc;
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME)) {
- ref_frame[0] = vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME);
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME)) {
+ ref_frame[0] = vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_REF_FRAME);
ref_frame[1] = NONE;
} else {
const int comp_ctx = vp9_get_pred_context_comp_inter_inter(cm, xd);
vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
+ struct segmentation *const seg = &xd->seg;
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
int pred_segment_id;
int segment_id;
- if (!xd->segmentation_enabled)
+ if (!seg->enabled)
return 0; // Default for disabled segmentation
pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
- bsize, mi_row, mi_col);
- if (!xd->update_mb_segmentation_map)
+ bsize, mi_row, mi_col);
+ if (!seg->update_map)
return pred_segment_id;
- if (cm->temporal_update) {
+
+ if (seg->temporal_update) {
const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(cm, xd);
const int pred_flag = vp9_read(r, pred_prob);
vp9_set_pred_flag_seg_id(xd, bsize, pred_flag);
segment_id = pred_flag ? pred_segment_id
- : read_segment_id(r, xd);
+ : read_segment_id(r, seg);
} else {
- segment_id = read_segment_id(r, xd);
+ segment_id = read_segment_id(r, seg);
}
set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
return segment_id;
MACROBLOCKD *const xd = &pbi->mb;
MV_REFERENCE_FRAME ref;
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME)) {
+ if (!vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME)) {
const int ctx = vp9_get_pred_context_intra_inter(cm, xd);
ref = (MV_REFERENCE_FRAME)
vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd));
cm->fc.intra_inter_count[ctx][ref != INTRA_FRAME]++;
} else {
- ref = (MV_REFERENCE_FRAME)
- vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
+ ref = (MV_REFERENCE_FRAME) vp9_get_segdata(&xd->seg, segment_id,
+ SEG_LVL_REF_FRAME) != INTRA_FRAME;
}
return ref;
}
mv_ref_p = cm->fc.inter_mode_probs[mbmi->mb_mode_context[ref0]];
- if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
+ if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
} else if (bsize >= BLOCK_SIZE_SB8X8) {
mbmi->mode = read_inter_mode(r, mv_ref_p);
vp9_reset_sb_tokens_context(xd, bsize);
return -1;
} else {
- if (xd->segmentation_enabled)
+ if (xd->seg.enabled)
mb_init_dequantizer(&pbi->common, xd);
// TODO(dkovalev) if (!vp9_reader_has_error(r))
read_coef_probs_common(fc, TX_32X32, r);
}
-static void setup_segmentation(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
+static void setup_segmentation(struct segmentation *seg,
+ struct vp9_read_bit_buffer *rb) {
int i, j;
- VP9_COMMON *const cm = &pbi->common;
- MACROBLOCKD *const xd = &pbi->mb;
-
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ seg->update_map = 0;
+ seg->update_data = 0;
- xd->segmentation_enabled = vp9_rb_read_bit(rb);
- if (!xd->segmentation_enabled)
+ seg->enabled = vp9_rb_read_bit(rb);
+ if (!seg->enabled)
return;
// Segmentation map update
- xd->update_mb_segmentation_map = vp9_rb_read_bit(rb);
- if (xd->update_mb_segmentation_map) {
+ seg->update_map = vp9_rb_read_bit(rb);
+ if (seg->update_map) {
for (i = 0; i < MB_SEG_TREE_PROBS; i++)
- xd->mb_segment_tree_probs[i] = vp9_rb_read_bit(rb) ?
- vp9_rb_read_literal(rb, 8) : MAX_PROB;
+ seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
+ : MAX_PROB;
- cm->temporal_update = vp9_rb_read_bit(rb);
- if (cm->temporal_update) {
+ seg->temporal_update = vp9_rb_read_bit(rb);
+ if (seg->temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++)
- cm->segment_pred_probs[i] = vp9_rb_read_bit(rb) ?
- vp9_rb_read_literal(rb, 8) : MAX_PROB;
+ seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
+ : MAX_PROB;
} else {
for (i = 0; i < PREDICTION_PROBS; i++)
- cm->segment_pred_probs[i] = MAX_PROB;
+ seg->pred_probs[i] = MAX_PROB;
}
}
// Segmentation data update
- xd->update_mb_segmentation_data = vp9_rb_read_bit(rb);
- if (xd->update_mb_segmentation_data) {
- xd->mb_segment_abs_delta = vp9_rb_read_bit(rb);
+ seg->update_data = vp9_rb_read_bit(rb);
+ if (seg->update_data) {
+ seg->abs_delta = vp9_rb_read_bit(rb);
- vp9_clearall_segfeatures(xd);
+ vp9_clearall_segfeatures(seg);
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
int data = 0;
const int feature_enabled = vp9_rb_read_bit(rb);
if (feature_enabled) {
- vp9_enable_segfeature(xd, i, j);
+ vp9_enable_segfeature(seg, i, j);
data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
if (vp9_is_segfeature_signed(j))
data = vp9_rb_read_bit(rb) ? -data : data;
}
- vp9_set_segdata(xd, i, j, data);
+ vp9_set_segdata(seg, i, j, data);
}
}
}
setup_loopfilter(pbi, rb);
setup_quantization(pbi, rb);
- setup_segmentation(pbi, rb);
+ setup_segmentation(&pbi->mb.seg, rb);
setup_tile_info(cm, rb);
return c;
}
-static int get_eob(MACROBLOCKD* const xd, int segment_id, int eob_max) {
- return vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
+static int get_eob(struct segmentation *seg, int segment_id, int eob_max) {
+ return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
}
struct decode_block_args {
struct macroblockd_plane* pd = &xd->plane[plane];
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const TX_SIZE ss_tx_size = ss_txfrm_size / 2;
- const int seg_eob = get_eob(xd, segment_id, 16 << ss_txfrm_size);
+ const int seg_eob = get_eob(&xd->seg, segment_id, 16 << ss_txfrm_size);
const int off = block >> ss_txfrm_size;
const int mod = bw - ss_tx_size - pd->subsampling_x;
const int aoff = (off & ((1 << mod) - 1)) << ss_tx_size;
static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m,
vp9_writer *w) {
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int skip_coeff = m->mbmi.mb_skip_coeff;
}
-static void write_segment_id(vp9_writer *w, const MACROBLOCKD *xd,
+static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
int segment_id) {
- if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
- treed_write(w, vp9_segment_tree, xd->mb_segment_tree_probs, segment_id, 3);
+ if (seg->enabled && seg->update_map)
+ treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3);
}
// This function encodes the reference frame
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
const int segment_id = mi->segment_id;
- int seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ int seg_ref_active = vp9_segfeature_active(&xd->seg, segment_id,
SEG_LVL_REF_FRAME);
// If segment level coding of this signal is disabled...
// or the segment allows multiple reference frame options
}
} else {
assert(mi->ref_frame[1] <= INTRA_FRAME);
- assert(vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME) ==
+ assert(vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_REF_FRAME) ==
mi->ref_frame[0]);
}
const nmv_context *nmvc = &pc->fc.nmvc;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
+ struct segmentation *seg = &xd->seg;
MB_MODE_INFO *const mi = &m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
const MB_PREDICTION_MODE mode = mi->mode;
active_section = 9;
#endif
- if (cpi->mb.e_mbd.update_mb_segmentation_map) {
- // Is temporal coding of the segment map enabled
- if (pc->temporal_update) {
+ if (seg->update_map) {
+ if (seg->temporal_update) {
unsigned char prediction_flag = vp9_get_pred_flag_seg_id(xd);
vp9_prob pred_prob = vp9_get_pred_prob_seg_id(pc, xd);
-
- // Code the segment id prediction flag for this mb
vp9_write(bc, prediction_flag, pred_prob);
-
- // If the mb segment id wasn't predicted code explicitly
if (!prediction_flag)
- write_segment_id(bc, xd, mi->segment_id);
+ write_segment_id(bc, seg, mi->segment_id);
} else {
- // Normal unpredicted coding
- write_segment_id(bc, xd, mi->segment_id);
+ write_segment_id(bc, seg, mi->segment_id);
}
}
skip_coeff = write_skip_coeff(cpi, segment_id, m, bc);
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME))
+ if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
vp9_write(bc, rf != INTRA_FRAME,
vp9_get_pred_prob_intra_inter(pc, xd));
if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->txfm_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
- (skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
+ (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
write_selected_txfm_size(cpi, mi->txfm_size, mi->sb_type, bc);
}
#endif
// If segment skip is not enabled code the mode.
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
const int mis = c->mode_info_stride;
const int segment_id = m->mbmi.segment_id;
- if (xd->update_mb_segmentation_map)
- write_segment_id(bc, xd, m->mbmi.segment_id);
+ if (xd->seg.update_map)
+ write_segment_id(bc, &xd->seg, m->mbmi.segment_id);
write_skip_coeff(cpi, segment_id, m, bc);
static void encode_segmentation(VP9_COMP *cpi,
- struct vp9_write_bit_buffer *wb) {
+ struct vp9_write_bit_buffer *wb) {
int i, j;
- VP9_COMMON *const cm = &cpi->common;
- MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- vp9_wb_write_bit(wb, xd->segmentation_enabled);
- if (!xd->segmentation_enabled)
+ struct segmentation *seg = &cpi->mb.e_mbd.seg;
+
+ vp9_wb_write_bit(wb, seg->enabled);
+ if (!seg->enabled)
return;
// Segmentation map
- vp9_wb_write_bit(wb, xd->update_mb_segmentation_map);
- if (xd->update_mb_segmentation_map) {
+ vp9_wb_write_bit(wb, seg->update_map);
+ if (seg->update_map) {
// Select the coding strategy (temporal or spatial)
vp9_choose_segmap_coding_method(cpi);
// Write out probabilities used to decode unpredicted macro-block segments
for (i = 0; i < MB_SEG_TREE_PROBS; i++) {
- const int prob = xd->mb_segment_tree_probs[i];
+ const int prob = seg->tree_probs[i];
const int update = prob != MAX_PROB;
vp9_wb_write_bit(wb, update);
if (update)
}
// Write out the chosen coding method.
- vp9_wb_write_bit(wb, cm->temporal_update);
- if (cm->temporal_update) {
+ vp9_wb_write_bit(wb, seg->temporal_update);
+ if (seg->temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++) {
- const int prob = cm->segment_pred_probs[i];
+ const int prob = seg->pred_probs[i];
const int update = prob != MAX_PROB;
vp9_wb_write_bit(wb, update);
if (update)
}
// Segmentation data
- vp9_wb_write_bit(wb, xd->update_mb_segmentation_data);
- if (xd->update_mb_segmentation_data) {
- vp9_wb_write_bit(wb, xd->mb_segment_abs_delta);
+ vp9_wb_write_bit(wb, seg->update_data);
+ if (seg->update_data) {
+ vp9_wb_write_bit(wb, seg->abs_delta);
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
- const int active = vp9_segfeature_active(xd, i, j);
+ const int active = vp9_segfeature_active(seg, i, j);
vp9_wb_write_bit(wb, active);
if (active) {
- const int data = vp9_get_segdata(xd, i, j);
+ const int data = vp9_get_segdata(seg, i, j);
const int data_max = vp9_seg_feature_data_max(j);
if (vp9_is_segfeature_signed(j)) {
if (!output_enabled)
return;
- if (!vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
+ if (!vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
}
x->rdmult = cpi->RDMULT;
/* segment ID */
- if (xd->segmentation_enabled) {
- uint8_t *map = xd->update_mb_segmentation_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ if (xd->seg.enabled) {
+ uint8_t *map = xd->seg.update_map ? cpi->segmentation_map
+ : cm->last_frame_seg_map;
mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
vp9_mb_init_quantizer(cpi, x);
- if (xd->segmentation_enabled && cpi->seg0_cnt > 0
- && !vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME)
- && vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
+ if (xd->seg.enabled && cpi->seg0_cnt > 0
+ && !vp9_segfeature_active(&xd->seg, 0, SEG_LVL_REF_FRAME)
+ && vp9_segfeature_active(&xd->seg, 1, SEG_LVL_REF_FRAME)) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
} else {
const int y = mb_row & ~3;
MB_MODE_INFO * const mbmi = &mi->mbmi;
if (cm->frame_type != KEY_FRAME) {
- int segment_id, seg_ref_active;
-
- segment_id = mbmi->segment_id;
- seg_ref_active = vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME);
+ const int seg_ref_active = vp9_segfeature_active(&xd->seg, mbmi->segment_id,
+ SEG_LVL_REF_FRAME);
if (!seg_ref_active)
cpi->intra_inter_count[vp9_get_pred_context_intra_inter(cm, xd)][mbmi
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
- if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
+ if (vp9_segfeature_active(&xd->seg, 1, SEG_LVL_REF_FRAME)) {
return 0;
} else {
return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
xd->mode_info_context = mi;
- assert(
- vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP) ||
- get_skip_flag(mi, mis, ymbs, xmbs));
+ assert(vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
+ get_skip_flag(mi, mis, ymbs, xmbs));
set_txfm_flag(mi, mis, ymbs, xmbs, txfm_max);
}
}
vp9_set_pred_flag_mbskip(xd, bsize, mi->mbmi.mb_skip_coeff);
if (output_enabled) {
- if (cm->txfm_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_SIZE_SB8X8
- && !(mbmi->ref_frame[0] != INTRA_FRAME
- && (mbmi->mb_skip_coeff
- || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
+ if (cm->txfm_mode == TX_MODE_SELECT &&
+ mbmi->sb_type >= BLOCK_SIZE_SB8X8 &&
+ !(mbmi->ref_frame[0] != INTRA_FRAME &&
+ (mbmi->mb_skip_coeff ||
+ vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) {
const int context = vp9_get_pred_context_tx_size(cm, xd);
if (bsize >= BLOCK_SIZE_SB32X32) {
cm->fc.tx_count_32x32p[context][mbmi->txfm_size]++;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Set up default state for MB feature flags
- xd->segmentation_enabled = 0;
+ xd->seg.enabled = 0;
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
- vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 0;
+ vpx_memset(xd->seg.tree_probs, 255, sizeof(xd->seg.tree_probs));
- vp9_clearall_segfeatures(xd);
+ vp9_clearall_segfeatures(&xd->seg);
xd->mode_ref_lf_delta_enabled = 0;
xd->mode_ref_lf_delta_update = 0;
if (cm->frame_type == KEY_FRAME) {
// Clear down the global segmentation map
vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 0;
cpi->static_mb_pct = 0;
// Disable segmentation
vp9_disable_segmentation((VP9_PTR)cpi);
// Clear down the segment features.
- vp9_clearall_segfeatures(xd);
+ vp9_clearall_segfeatures(&xd->seg);
} else if (cpi->refresh_alt_ref_frame) {
// If this is an alt ref frame
// Clear down the global segmentation map
vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 0;
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
vp9_disable_segmentation((VP9_PTR)cpi);
- vp9_clearall_segfeatures(xd);
+ vp9_clearall_segfeatures(&xd->seg);
// Scan frames from current to arf frame.
// This function re-enables segmentation if appropriate.
// If segmentation was enabled set those features needed for the
// arf itself.
- if (xd->segmentation_enabled) {
- xd->update_mb_segmentation_map = 1;
- xd->update_mb_segmentation_data = 1;
+ if (xd->seg.enabled) {
+ xd->seg.update_map = 1;
+ xd->seg.update_data = 1;
qi_delta = compute_qdelta(cpi, cpi->avg_q, (cpi->avg_q * 0.875));
- vp9_set_segdata(xd, 1, SEG_LVL_ALT_Q, (qi_delta - 2));
- vp9_set_segdata(xd, 1, SEG_LVL_ALT_LF, -2);
+ vp9_set_segdata(&xd->seg, 1, SEG_LVL_ALT_Q, (qi_delta - 2));
+ vp9_set_segdata(&xd->seg, 1, SEG_LVL_ALT_LF, -2);
- vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
- vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_ALT_Q);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_ALT_LF);
// Where relevant assume segment data is delta data
- xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+ xd->seg.abs_delta = SEGMENT_DELTADATA;
}
- } else if (xd->segmentation_enabled) {
+ } else if (xd->seg.enabled) {
// All other frames if segmentation has been enabled
// First normal frame in a valid gf or alt ref group
if (cpi->common.frames_since_golden == 0) {
// Set up segment features for normal frames in an arf group
if (cpi->source_alt_ref_active) {
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 1;
- xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 1;
+ xd->seg.abs_delta = SEGMENT_DELTADATA;
qi_delta = compute_qdelta(cpi, cpi->avg_q,
(cpi->avg_q * 1.125));
- vp9_set_segdata(xd, 1, SEG_LVL_ALT_Q, (qi_delta + 2));
- vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
+ vp9_set_segdata(&xd->seg, 1, SEG_LVL_ALT_Q, (qi_delta + 2));
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_ALT_Q);
- vp9_set_segdata(xd, 1, SEG_LVL_ALT_LF, -2);
- vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
+ vp9_set_segdata(&xd->seg, 1, SEG_LVL_ALT_LF, -2);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_ALT_LF);
// Segment coding disabled for compred testing
if (high_q || (cpi->static_mb_pct == 100)) {
- vp9_set_segdata(xd, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
- vp9_enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
- vp9_enable_segfeature(xd, 1, SEG_LVL_SKIP);
+ vp9_set_segdata(&xd->seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_SKIP);
}
} else {
// Disable segmentation and clear down features if alt ref
vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 0;
- vp9_clearall_segfeatures(xd);
+ vp9_clearall_segfeatures(&xd->seg);
}
} else if (cpi->is_src_frame_alt_ref) {
// Special case where we are coding over the top of a previous
// Segment coding disabled for compred testing
// Enable ref frame features for segment 0 as well
- vp9_enable_segfeature(xd, 0, SEG_LVL_REF_FRAME);
- vp9_enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(&xd->seg, 0, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_REF_FRAME);
// All mbs should use ALTREF_FRAME
- vp9_clear_segdata(xd, 0, SEG_LVL_REF_FRAME);
- vp9_set_segdata(xd, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
- vp9_clear_segdata(xd, 1, SEG_LVL_REF_FRAME);
- vp9_set_segdata(xd, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_clear_segdata(&xd->seg, 0, SEG_LVL_REF_FRAME);
+ vp9_set_segdata(&xd->seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_clear_segdata(&xd->seg, 1, SEG_LVL_REF_FRAME);
+ vp9_set_segdata(&xd->seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
// Skip all MBs if high Q (0,0 mv and skip coeffs)
if (high_q) {
- vp9_enable_segfeature(xd, 0, SEG_LVL_SKIP);
- vp9_enable_segfeature(xd, 1, SEG_LVL_SKIP);
+ vp9_enable_segfeature(&xd->seg, 0, SEG_LVL_SKIP);
+ vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_SKIP);
}
// Enable data udpate
- xd->update_mb_segmentation_data = 1;
+ xd->seg.update_data = 1;
} else {
// All other frames.
// No updates.. leave things as they are.
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 0;
}
}
}
setup_features(cpi);
// If segmentation is enabled force a map update for key frames
- if (xd->segmentation_enabled) {
- xd->update_mb_segmentation_map = 1;
- xd->update_mb_segmentation_data = 1;
+ if (xd->seg.enabled) {
+ xd->seg.update_map = 1;
+ xd->seg.update_data = 1;
}
// The alternate reference frame cannot be active for a key frame
cpi->dummy_packing = 0;
vp9_pack_bitstream(cpi, dest, size);
- if (xd->update_mb_segmentation_map) {
+ if (xd->seg.update_map)
update_reference_segmentation_map(cpi);
- }
release_scaled_references(cpi);
update_reference_frames(cpi);
}
// Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ xd->seg.update_map = 0;
+ xd->seg.update_data = 0;
xd->mode_ref_lf_delta_update = 0;
// keep track of the last coded dimensions
cpi->refresh_alt_ref_frame ||
cm->refresh_frame_context ||
mb->mode_ref_lf_delta_update ||
- mb->update_mb_segmentation_map ||
- mb->update_mb_segmentation_data;
+ mb->seg.update_map ||
+ mb->seg.update_data;
}
#if CONFIG_MULTIPLE_ARF
// Enable the loop and quant changes in the feature mask
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
if (delta_q[i])
- vp9_enable_segfeature(xd, i, SEG_LVL_ALT_Q);
+ vp9_enable_segfeature(&xd->seg, i, SEG_LVL_ALT_Q);
else
- vp9_disable_segfeature(xd, i, SEG_LVL_ALT_Q);
+ vp9_disable_segfeature(&xd->seg, i, SEG_LVL_ALT_Q);
if (delta_lf[i])
- vp9_enable_segfeature(xd, i, SEG_LVL_ALT_LF);
+ vp9_enable_segfeature(&xd->seg, i, SEG_LVL_ALT_LF);
else
- vp9_disable_segfeature(xd, i, SEG_LVL_ALT_LF);
+ vp9_disable_segfeature(&xd->seg, i, SEG_LVL_ALT_LF);
}
// Initialise the feature data structure
x->e_mbd.plane[3].dequant = cpi->common.a_dequant[qindex];
#endif
- x->skip_block = vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+ x->skip_block = vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP);
/* save this macroblock QIndex for vp9_update_zbin_extra() */
x->e_mbd.q_index = qindex;
vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
vp9_copy(cc->partition_prob, cm->fc.partition_prob);
- vp9_copy(cc->segment_pred_probs, cm->segment_pred_probs);
+ vp9_copy(cc->segment_pred_probs, xd->seg.pred_probs);
vp9_copy(cc->intra_inter_prob, cm->fc.intra_inter_prob);
vp9_copy(cc->comp_inter_prob, cm->fc.comp_inter_prob);
vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
vp9_copy(cm->fc.partition_prob, cc->partition_prob);
- vp9_copy(cm->segment_pred_probs, cc->segment_pred_probs);
+ vp9_copy(xd->seg.pred_probs, cc->segment_pred_probs);
vp9_copy(cm->fc.intra_inter_prob, cc->intra_inter_prob);
vp9_copy(cm->fc.comp_inter_prob, cc->comp_inter_prob);
pt = combine_entropy_contexts(above_ec, left_ec);
nb = vp9_get_coef_neighbors_handle(scan);
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP))
seg_eob = 0;
/* sanity check to ensure that we do not have spurious non-zero q values */
int segment_id = xd->mode_info_context->mbmi.segment_id;
// Dont account for mode here if segment skip is enabled.
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ if (!vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)) {
assert(NEARESTMV <= m && m <= NEWMV);
return x->inter_mode_cost[mode_context][m - NEARESTMV];
} else
vp9_prob *comp_mode_p) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- int seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ int seg_ref_active = vp9_segfeature_active(&xd->seg, segment_id,
SEG_LVL_REF_FRAME);
if (seg_ref_active) {
vpx_memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
// Do not allow compound prediction if the segment level reference
// frame feature is in use as in this case there can only be one reference.
if ((vp9_mode_order[mode_index].second_ref_frame > INTRA_FRAME) &&
- vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME))
+ vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME))
continue;
x->skip = 0;
set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
scale_factor);
- mode_excluded =
- mode_excluded ?
- mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
+ mode_excluded = mode_excluded
+ ? mode_excluded
+ : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
} else {
// mbmi->ref_frame[1] = vp9_mode_order[mode_index].ref_frame[1];
if (ref_frame != INTRA_FRAME) {
// If the segment reference frame feature is enabled....
// then do nothing if the current ref frame is not allowed..
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
- vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME) &&
+ vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_REF_FRAME) !=
+ (int)ref_frame) {
continue;
// If the segment skip feature is enabled....
// then do nothing if the current mode is not allowed..
- } else if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) &&
+ } else if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP) &&
(this_mode != ZEROMV && ref_frame != INTRA_FRAME)) {
continue;
// Disable this drop out case if the ref frame
// segment level feature is enabled for this segment. This is to
// prevent the possibility that we end up unable to pick any mode.
- } else if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME)) {
+ } else if (!vp9_segfeature_active(&xd->seg, segment_id,
+ SEG_LVL_REF_FRAME)) {
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
// unless ARNR filtering is enabled in which case we want
// an unfiltered alternative
// because there are no non zero coefficients and make any
// necessary adjustment for rate. Ignore if skip is coded at
// segment level as the cost wont have been added in.
- int mb_skip_allowed;
-
// Is Mb level skip allowed (i.e. not coded at segment level).
- mb_skip_allowed = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+ const int mb_skip_allowed = !vp9_segfeature_active(&xd->seg, segment_id,
+ SEG_LVL_SKIP);
if (skippable && bsize >= BLOCK_SIZE_SB8X8) {
// Back out the coefficient coding costs
// This code forces Altref,0,0 and skip for the frame that overlays a
// an alrtef unless Altref is filtered. However, this is unsafe if
// segment level coding of ref frame is enabled for this segment.
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
+ if (!vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME) &&
cpi->is_src_frame_alt_ref &&
(cpi->oxcf.arnr_max_frames == 0) &&
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame[0] != ALTREF_FRAME)
void vp9_enable_segmentation(VP9_PTR ptr) {
VP9_COMP *cpi = (VP9_COMP *)ptr;
- cpi->mb.e_mbd.segmentation_enabled = 1;
- cpi->mb.e_mbd.update_mb_segmentation_map = 1;
- cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ cpi->mb.e_mbd.seg.enabled = 1;
+ cpi->mb.e_mbd.seg.update_map = 1;
+ cpi->mb.e_mbd.seg.update_data = 1;
}
void vp9_disable_segmentation(VP9_PTR ptr) {
VP9_COMP *cpi = (VP9_COMP *)ptr;
- cpi->mb.e_mbd.segmentation_enabled = 0;
+ cpi->mb.e_mbd.seg.enabled = 0;
}
void vp9_set_segmentation_map(VP9_PTR ptr,
(cpi->common.mi_rows * cpi->common.mi_cols));
// Signal that the map should be updated.
- cpi->mb.e_mbd.update_mb_segmentation_map = 1;
- cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ cpi->mb.e_mbd.seg.update_map = 1;
+ cpi->mb.e_mbd.seg.update_data = 1;
}
void vp9_set_segment_data(VP9_PTR ptr,
unsigned char abs_delta) {
VP9_COMP *cpi = (VP9_COMP *)(ptr);
- cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
+ cpi->mb.e_mbd.seg.abs_delta = abs_delta;
- vpx_memcpy(cpi->mb.e_mbd.segment_feature_data, feature_data,
- sizeof(cpi->mb.e_mbd.segment_feature_data));
+ vpx_memcpy(cpi->mb.e_mbd.seg.feature_data, feature_data,
+ sizeof(cpi->mb.e_mbd.seg.feature_data));
// TBD ?? Set the feature mask
// vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
// Set default state for the segment tree probabilities and the
// temporal coding probabilities
- vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
- vpx_memset(cm->segment_pred_probs, 255, sizeof(cm->segment_pred_probs));
+ vpx_memset(xd->seg.tree_probs, 255, sizeof(xd->seg.tree_probs));
+ vpx_memset(xd->seg.pred_probs, 255, sizeof(xd->seg.pred_probs));
vpx_memset(no_pred_segcounts, 0, sizeof(no_pred_segcounts));
vpx_memset(t_unpred_seg_counts, 0, sizeof(t_unpred_seg_counts));
// Now choose which coding method to use.
if (t_pred_cost < no_pred_cost) {
- cm->temporal_update = 1;
- vpx_memcpy(xd->mb_segment_tree_probs, t_pred_tree, sizeof(t_pred_tree));
- vpx_memcpy(cm->segment_pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
+ xd->seg.temporal_update = 1;
+ vpx_memcpy(xd->seg.tree_probs, t_pred_tree, sizeof(t_pred_tree));
+ vpx_memcpy(xd->seg.pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
} else {
- cm->temporal_update = 0;
- vpx_memcpy(xd->mb_segment_tree_probs, no_pred_tree, sizeof(no_pred_tree));
+ xd->seg.temporal_update = 0;
+ vpx_memcpy(xd->seg.tree_probs, no_pred_tree, sizeof(no_pred_tree));
}
}
pt = combine_entropy_contexts(above_ec, left_ec);
nb = vp9_get_coef_neighbors_handle(scan);
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP))
seg_eob = 0;
c = 0;
MB_MODE_INFO * const mbmi = &xd->mode_info_context->mbmi;
TOKENEXTRA *t_backup = *t;
const int mb_skip_context = vp9_get_pred_context_mbskip(cm, xd);
- const int segment_id = mbmi->segment_id;
- const int skip_inc = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+ const int skip_inc = !vp9_segfeature_active(&xd->seg, mbmi->segment_id,
+ SEG_LVL_SKIP);
const TX_SIZE txfm_size = mbmi->txfm_size;
- struct tokenize_b_args arg = {
- cpi, xd, t, txfm_size, dry_run
- };
+ struct tokenize_b_args arg = { cpi, xd, t, txfm_size, dry_run };
mbmi->mb_skip_coeff = vp9_sb_is_skippable(xd, bsize);