int row, col;
int64_t rdmult;
int64_t wiener_variance = 0;
+ KMEANS_DATA *kmeans_data;
+ vpx_clear_system_state();
assert(cpi->norm_wiener_variance > 0);
for (col = mb_col_start; col < mb_col_end; ++col)
wiener_variance += cpi->mb_wiener_variance[row * cm->mb_cols + col];
+ kmeans_data = &cpi->kmeans_data_arr[cpi->kmeans_data_size++];
+ kmeans_data->value = log(1 + wiener_variance);
+ kmeans_data->pos = mi_row * cpi->kmeans_data_stride + mi_col;
if (wiener_variance)
wiener_variance /=
(mb_row_end - mb_row_start) * (mb_col_end - mb_col_start);
-
rdmult = (orig_rdmult * wiener_variance) / cpi->norm_wiener_variance;
rdmult = VPXMIN(rdmult, orig_rdmult * 3);
}
#endif
-#define MAX_KMEANS_GROUPS 8
-
-typedef struct KMEANS_DATA {
- int64_t value;
- int pos;
- int group_idx;
-} KMEANS_DATA;
-
static int compare_kmeans_data(const void *a, const void *b) {
if (((const KMEANS_DATA *)a)->value > ((const KMEANS_DATA *)b)->value) {
return 1;
}
void vp9_kmeans(double *ctr_ls, int k, KMEANS_DATA *arr, int size) {
- int64_t min, max;
+ double min, max;
double step;
int i, j;
int itr;
}
#endif
+ if (cpi->sf.enable_wiener_variance && cm->show_frame) {
+ cpi->kmeans_data_size = 0;
+ cpi->kmeans_ctr_num = 5;
+ }
+
if (!cpi->row_mt) {
cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read_dummy;
cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write_dummy;
vp9_encode_tiles_row_mt(cpi);
}
+ if (cpi->sf.enable_wiener_variance && cm->show_frame) {
+ vp9_kmeans(cpi->kmeans_ctr_ls, cpi->kmeans_ctr_num, cpi->kmeans_data_arr,
+ cpi->kmeans_data_size);
+ }
+
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
}
sizeof(*cpi->mb_wiener_variance)));
}
+ cpi->kmeans_data_arr_alloc = 0;
#if CONFIG_NON_GREEDY_MV
cpi->feature_score_loc_alloc = 0;
cpi->tpl_ready = 0;
vp9_denoiser_free(&(cpi->denoiser));
#endif
+ if (cpi->kmeans_data_arr_alloc) {
+ vpx_free(cpi->kmeans_data_arr);
+ }
+
#if CONFIG_NON_GREEDY_MV
vpx_free(cpi->feature_score_loc_arr);
vpx_free(cpi->feature_score_loc_sort);
for (i = 0; i < REFS_PER_FRAME; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
}
+ if (cpi->kmeans_data_arr_alloc == 0) {
+ const int mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int mi_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ CHECK_MEM_ERROR(
+ cm, cpi->kmeans_data_arr,
+ vpx_calloc(mi_rows * mi_cols, sizeof(*cpi->kmeans_data_arr)));
+ cpi->kmeans_data_stride = mi_cols;
+ cpi->kmeans_data_arr_alloc = 1;
+ }
+
if (gf_group_index == 1 &&
cpi->twopass.gf_group.update_type[gf_group_index] == ARF_UPDATE &&
cpi->sf.enable_tpl_model) {
} FEATURE_SCORE_LOC;
#endif
+#define MAX_KMEANS_GROUPS 8
+
+typedef struct KMEANS_DATA {
+ double value;
+ int pos;
+ int group_idx;
+} KMEANS_DATA;
+
typedef struct VP9_COMP {
QUANTS quants;
ThreadData td;
TplDepFrame tpl_stats[MAX_ARF_GOP_SIZE];
YV12_BUFFER_CONFIG *tpl_recon_frames[REF_FRAMES];
EncFrameBuf enc_frame_buf[REF_FRAMES];
+ int kmeans_data_arr_alloc;
+ KMEANS_DATA *kmeans_data_arr;
+ int kmeans_data_size;
+ int kmeans_data_stride;
+ double kmeans_ctr_ls[MAX_KMEANS_GROUPS];
+ int kmeans_ctr_num;
#if CONFIG_NON_GREEDY_MV
int tpl_ready;
int feature_score_loc_alloc;