summaryrefslogtreecommitdiff
path: root/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c')
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c123
1 files changed, 77 insertions, 46 deletions
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
index ad30951afa3..98343f0d243 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
@@ -930,7 +930,9 @@ static int scale_partitioning_svc(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
PARTITION_TYPE partition_high;
if (mi_row_high >= cm->mi_rows || mi_col_high >= cm->mi_cols) return 0;
- if (mi_row >= (cm->mi_rows >> 1) || mi_col >= (cm->mi_cols >> 1)) return 0;
+ if (mi_row >= svc->mi_rows[svc->spatial_layer_id - 1] ||
+ mi_col >= svc->mi_cols[svc->spatial_layer_id - 1])
+ return 0;
// Find corresponding (mi_col/mi_row) block down-scaled by 2x2.
start_pos = mi_row * (svc->mi_stride[svc->spatial_layer_id - 1]) + mi_col;
@@ -1378,6 +1380,20 @@ static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
x->sb_use_mv_part = 1;
x->sb_mvcol_part = mi->mv[0].as_mv.col;
x->sb_mvrow_part = mi->mv[0].as_mv.row;
+ if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
+ cpi->svc.spatial_layer_id == 0 &&
+ cpi->rc.high_num_blocks_with_motion && !x->zero_temp_sad_source &&
+ cm->width > 640 && cm->height > 480) {
+ // Disable split below 16x16 block size when scroll motion is detected.
+ // TODO(marpan/jianj): Improve this condition: issue is that search
+ // range is hard-coded/limited in vp9_int_pro_motion_estimation() so
+ // scroll motion may not be detected here.
+ if ((abs(x->sb_mvrow_part) >= 48 && abs(x->sb_mvcol_part) <= 8) ||
+ y_sad < 100000) {
+ compute_minmax_variance = 0;
+ thresholds[2] = INT64_MAX;
+ }
+ }
}
y_sad_last = y_sad;
@@ -3183,7 +3199,7 @@ static int ml_pruning_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
#define FEATURES 4
// ML-based partition search breakout.
-static int ml_predict_breakout(const VP9_COMP *const cpi, BLOCK_SIZE bsize,
+static int ml_predict_breakout(VP9_COMP *const cpi, BLOCK_SIZE bsize,
const MACROBLOCK *const x,
const RD_COST *const rd_cost) {
DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
@@ -3214,14 +3230,29 @@ static int ml_predict_breakout(const VP9_COMP *const cpi, BLOCK_SIZE bsize,
if (!linear_weights) return 0;
{ // Generate feature values.
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int ac_q =
+ vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
+#else
const int ac_q = vp9_ac_quant(qindex, 0, cm->bit_depth);
+#endif // CONFIG_VP9_HIGHBITDEPTH
const int num_pels_log2 = num_pels_log2_lookup[bsize];
int feature_index = 0;
unsigned int var, sse;
float rate_f, dist_f;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ var =
+ vp9_high_get_sby_variance(cpi, &x->plane[0].src, bsize, x->e_mbd.bd);
+ } else {
+ var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ vp9_64_zeros, 0, &sse);
+ }
+#else
var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, &sse);
+#endif
var = var >> num_pels_log2;
vpx_clear_system_state();
@@ -3288,7 +3319,12 @@ static void ml_prune_rect_partition(VP9_COMP *const cpi, MACROBLOCK *const x,
{
const int64_t none_rdcost = pc_tree->none.rdcost;
const VP9_COMMON *const cm = &cpi->common;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int dc_q =
+ vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
+#else
const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
+#endif // CONFIG_VP9_HIGHBITDEPTH
int feature_index = 0;
unsigned int block_var = 0;
unsigned int sub_block_var[4] = { 0 };
@@ -3404,31 +3440,38 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
const NN_CONFIG *nn_config = NULL;
- DECLARE_ALIGNED(16, uint8_t, pred_buf[64 * 64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64 * 2]);
+ uint8_t *const pred_buf = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+ ? (CONVERT_TO_BYTEPTR(pred_buffer))
+ : pred_buffer;
+#else
+ DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64]);
+ uint8_t *const pred_buf = pred_buffer;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const int speed = cpi->oxcf.speed;
int i;
- float thresh_low = -1.0f;
- float thresh_high = 0.0f;
+ float thresh = 0.0f;
switch (bsize) {
case BLOCK_64X64:
nn_config = &vp9_var_rd_part_nnconfig_64;
- thresh_low = -3.0f;
- thresh_high = 3.0f;
+ thresh = speed > 0 ? 3.5f : 3.0f;
break;
case BLOCK_32X32:
nn_config = &vp9_var_rd_part_nnconfig_32;
- thresh_low = -3.0;
- thresh_high = 3.0f;
+ thresh = speed > 0 ? 3.5f : 3.0f;
break;
case BLOCK_16X16:
nn_config = &vp9_var_rd_part_nnconfig_16;
- thresh_low = -4.0;
- thresh_high = 4.0f;
+ thresh = speed > 0 ? 3.5f : 4.0f;
break;
case BLOCK_8X8:
nn_config = &vp9_var_rd_part_nnconfig_8;
- thresh_low = -2.0;
- thresh_high = 2.0f;
+ if (cm->width >= 720 && cm->height >= 720)
+ thresh = speed > 0 ? 2.5f : 2.0f;
+ else
+ thresh = speed > 0 ? 3.5f : 2.0f;
break;
default: assert(0 && "Unexpected block size."); return;
}
@@ -3476,7 +3519,12 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
{
float features[FEATURES] = { 0.0f };
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int dc_q =
+ vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (xd->bd - 8);
+#else
const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
+#endif // CONFIG_VP9_HIGHBITDEPTH
int feature_idx = 0;
float score;
@@ -3520,8 +3568,8 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
// partition is better than the non-split partition. So if the score is
// high enough, we skip the none-split partition search; if the score is
// low enough, we skip the split partition search.
- if (score > thresh_high) *none = 0;
- if (score < thresh_low) *split = 0;
+ if (score > thresh) *none = 0;
+ if (score < -thresh) *split = 0;
}
}
#undef FEATURES
@@ -3529,7 +3577,8 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
int get_rdmult_delta(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row, int mi_col,
int orig_rdmult) {
- TplDepFrame *tpl_frame = &cpi->tpl_stats[cpi->twopass.gf_group.index];
+ const int gf_group_index = cpi->twopass.gf_group.index;
+ TplDepFrame *tpl_frame = &cpi->tpl_stats[gf_group_index];
TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
int tpl_stride = tpl_frame->stride;
int64_t intra_cost = 0;
@@ -3544,9 +3593,9 @@ int get_rdmult_delta(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row, int mi_col,
if (tpl_frame->is_valid == 0) return orig_rdmult;
- if (cpi->common.show_frame) return orig_rdmult;
+ if (cpi->twopass.gf_group.layer_depth[gf_group_index] > 1) return orig_rdmult;
- if (cpi->twopass.gf_group.index >= MAX_LAG_BUFFERS) return orig_rdmult;
+ if (gf_group_index >= MAX_ARF_GOP_SIZE) return orig_rdmult;
for (row = mi_row; row < mi_row + mi_high; ++row) {
for (col = mi_col; col < mi_col + mi_wide; ++col) {
@@ -3759,14 +3808,10 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
pc_tree->partitioning = PARTITION_NONE;
if (cpi->sf.ml_var_partition_pruning) {
- int do_ml_var_partition_pruning =
+ const int do_ml_var_partition_pruning =
!frame_is_intra_only(cm) && partition_none_allowed && do_split &&
mi_row + num_8x8_blocks_high_lookup[bsize] <= cm->mi_rows &&
mi_col + num_8x8_blocks_wide_lookup[bsize] <= cm->mi_cols;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- do_ml_var_partition_pruning = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
if (do_ml_var_partition_pruning) {
ml_predict_var_rd_paritioning(cpi, x, bsize, mi_row, mi_col,
&partition_none_allowed, &do_split);
@@ -3814,13 +3859,9 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
}
if ((do_split || do_rect) && !x->e_mbd.lossless && ctx->skippable) {
- int use_ml_based_breakout =
+ const int use_ml_based_breakout =
cpi->sf.use_ml_partition_search_breakout &&
cm->base_qindex >= 100;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- use_ml_based_breakout = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
if (use_ml_based_breakout) {
if (ml_predict_breakout(cpi, bsize, x, &this_rdc)) {
do_split = 0;
@@ -4019,13 +4060,9 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
}
{
- int do_ml_rect_partition_pruning =
+ const int do_ml_rect_partition_pruning =
!frame_is_intra_only(cm) && !force_horz_split && !force_vert_split &&
(partition_horz_allowed || partition_vert_allowed) && bsize > BLOCK_8X8;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- do_ml_rect_partition_pruning = 0;
-#endif
if (do_ml_rect_partition_pruning) {
ml_prune_rect_partition(cpi, x, bsize, pc_tree, &partition_horz_allowed,
&partition_vert_allowed, best_rdc.rdcost, mi_row,
@@ -4505,15 +4542,9 @@ static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const NN_CONFIG *nn_config = NULL;
- float thresh_low = -0.2f;
- float thresh_high = 0.0f;
switch (bsize) {
- case BLOCK_64X64:
- nn_config = &vp9_var_part_nnconfig_64;
- thresh_low = -0.3f;
- thresh_high = -0.1f;
- break;
+ case BLOCK_64X64: nn_config = &vp9_var_part_nnconfig_64; break;
case BLOCK_32X32: nn_config = &vp9_var_part_nnconfig_32; break;
case BLOCK_16X16: nn_config = &vp9_var_part_nnconfig_16; break;
case BLOCK_8X8: break;
@@ -4525,6 +4556,7 @@ static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
vpx_clear_system_state();
{
+ const float thresh = cpi->oxcf.speed <= 5 ? 1.25f : 0.0f;
float features[FEATURES] = { 0.0f };
const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
int feature_idx = 0;
@@ -4565,8 +4597,8 @@ static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
assert(feature_idx == FEATURES);
nn_predict(features, nn_config, score);
- if (score[0] > thresh_high) return 3;
- if (score[0] < thresh_low) return 0;
+ if (score[0] > thresh) return PARTITION_SPLIT;
+ if (score[0] < -thresh) return PARTITION_NONE;
return -1;
}
}
@@ -4644,8 +4676,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
if (partition_none_allowed && do_split) {
const int ml_predicted_partition =
ml_predict_var_paritioning(cpi, x, bsize, mi_row, mi_col);
- if (ml_predicted_partition == 0) do_split = 0;
- if (ml_predicted_partition == 3) partition_none_allowed = 0;
+ if (ml_predicted_partition == PARTITION_NONE) do_split = 0;
+ if (ml_predicted_partition == PARTITION_SPLIT) partition_none_allowed = 0;
}
}
#endif // CONFIG_ML_VAR_PARTITION
@@ -5628,7 +5660,6 @@ static void encode_frame_internal(VP9_COMP *cpi) {
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
-
vp9_zero(*td->counts);
vp9_zero(cpi->td.rd_counts);
@@ -5693,7 +5724,7 @@ static void encode_frame_internal(VP9_COMP *cpi) {
if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
source_var_based_partition_search_method(cpi);
- } else if (gf_group_index && gf_group_index < MAX_LAG_BUFFERS &&
+ } else if (gf_group_index && gf_group_index < MAX_ARF_GOP_SIZE &&
cpi->sf.enable_tpl_model) {
TplDepFrame *tpl_frame = &cpi->tpl_stats[cpi->twopass.gf_group.index];
TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;