summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/arm/arm_csystemdependent.c188
-rw-r--r--vp8/encoder/arm/boolhuff_arm.c35
-rw-r--r--vp8/encoder/arm/dct_arm.c7
-rw-r--r--vp8/encoder/arm/encodemb_arm.h18
-rw-r--r--vp8/encoder/arm/neon/picklpf_arm.c39
-rw-r--r--vp8/encoder/arm/quantize_arm.c53
-rw-r--r--vp8/encoder/arm/variance_arm.c136
-rw-r--r--vp8/encoder/arm/variance_arm.h30
-rw-r--r--vp8/encoder/bitstream.c4154
-rw-r--r--vp8/encoder/bitstream.h2
-rw-r--r--vp8/encoder/block.h260
-rw-r--r--vp8/encoder/boolhuff.c224
-rw-r--r--vp8/encoder/boolhuff.h106
-rw-r--r--vp8/encoder/dct.c817
-rw-r--r--vp8/encoder/dct.h17
-rw-r--r--vp8/encoder/encodeframe.c2709
-rw-r--r--vp8/encoder/encodeintra.c442
-rw-r--r--vp8/encoder/encodeintra.h2
-rw-r--r--vp8/encoder/encodemb.c1924
-rw-r--r--vp8/encoder/encodemb.h40
-rw-r--r--vp8/encoder/encodemv.c1097
-rw-r--r--vp8/encoder/find_rotation.c67
-rw-r--r--vp8/encoder/firstpass.c4359
-rw-r--r--vp8/encoder/generic/csystemdependent.c151
-rw-r--r--vp8/encoder/lookahead.c286
-rw-r--r--vp8/encoder/lookahead.h19
-rw-r--r--vp8/encoder/mbgraph.c932
-rw-r--r--vp8/encoder/mcomp.c3606
-rw-r--r--vp8/encoder/mcomp.h113
-rw-r--r--vp8/encoder/modecosts.c67
-rw-r--r--vp8/encoder/onyx_if.c7218
-rw-r--r--vp8/encoder/onyx_int.h1083
-rw-r--r--vp8/encoder/picklpf.c870
-rw-r--r--vp8/encoder/ppc/csystemdependent.c75
-rw-r--r--vp8/encoder/psnr.c19
-rw-r--r--vp8/encoder/quantize.c787
-rw-r--r--vp8/encoder/quantize.h6
-rw-r--r--vp8/encoder/ratectrl.c1067
-rw-r--r--vp8/encoder/ratectrl.h6
-rw-r--r--vp8/encoder/rdopt.c6539
-rw-r--r--vp8/encoder/rdopt.h16
-rw-r--r--vp8/encoder/sad_c.c583
-rw-r--r--vp8/encoder/satd_c.c51
-rw-r--r--vp8/encoder/segmentation.c502
-rw-r--r--vp8/encoder/segmentation.h6
-rw-r--r--vp8/encoder/ssim.c341
-rw-r--r--vp8/encoder/temporal_filter.c849
-rw-r--r--vp8/encoder/temporal_filter.h27
-rw-r--r--vp8/encoder/tokenize.c1591
-rw-r--r--vp8/encoder/tokenize.h22
-rw-r--r--vp8/encoder/treewriter.c43
-rw-r--r--vp8/encoder/treewriter.h106
-rw-r--r--vp8/encoder/variance.h284
-rw-r--r--vp8/encoder/variance_c.c589
-rw-r--r--vp8/encoder/x86/variance_mmx.c630
-rw-r--r--vp8/encoder/x86/variance_sse2.c825
-rw-r--r--vp8/encoder/x86/variance_ssse3.c228
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c329
58 files changed, 22104 insertions, 24488 deletions
diff --git a/vp8/encoder/arm/arm_csystemdependent.c b/vp8/encoder/arm/arm_csystemdependent.c
index e66835ae0..1166efd96 100644
--- a/vp8/encoder/arm/arm_csystemdependent.c
+++ b/vp8/encoder/arm/arm_csystemdependent.c
@@ -18,116 +18,112 @@ extern void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12
extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vpxyv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
-void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
-{
+void vp8_arch_arm_encoder_init(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
- int flags = cpi->common.rtcd.flags;
+ int flags = cpi->common.rtcd.flags;
#if HAVE_ARMV5TE
- if (flags & HAS_EDSP)
- {
- }
+ if (flags & HAS_EDSP) {
+ }
#endif
#if HAVE_ARMV6
- if (flags & HAS_MEDIA)
- {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
- /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
-
- /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
- /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
-
- /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
- /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
- /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
-
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_armv6;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_armv6;
-
- /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
- cpi->rtcd.encodemb.subb = vp8_subtract_b_armv6;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_armv6;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_armv6;
-
- /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
- cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_armv6;
- }
+ if (flags & HAS_MEDIA) {
+ cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
+ /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
+ cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
+ cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
+ cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
+
+ /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
+ cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
+ /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
+ cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
+ cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
+
+ /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
+ cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
+ /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
+ cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
+ cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
+ cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
+ cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
+ cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
+
+ cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
+ /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
+
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_armv6;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_armv6;
+
+ /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
+ cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
+ cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
+ cpi->rtcd.encodemb.subb = vp8_subtract_b_armv6;
+ cpi->rtcd.encodemb.submby = vp8_subtract_mby_armv6;
+ cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_armv6;
+
+ /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;*/
+ cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_armv6;
+ }
#endif
#if HAVE_ARMV7
- if (flags & HAS_NEON)
- {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
-
- /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
-
- /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
- /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
- /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
-
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_neon;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_neon;
-
- /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
- cpi->rtcd.encodemb.subb = vp8_subtract_b_neon;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_neon;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_neon;
-
- /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;
- cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;*/
- cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_neon;
- cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_neon;
- }
+ if (flags & HAS_NEON) {
+ cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
+ cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
+ cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
+ cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
+ cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
+
+ /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
+ cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
+ cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
+ cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
+ cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
+
+ /*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
+ cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
+ /*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
+ cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
+ cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
+ cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
+ cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
+ cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
+
+ cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
+ /*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
+
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_neon;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_neon;
+
+ /*cpi->rtcd.encodemb.berr = vp8_block_error_c;
+ cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
+ cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;*/
+ cpi->rtcd.encodemb.subb = vp8_subtract_b_neon;
+ cpi->rtcd.encodemb.submby = vp8_subtract_mby_neon;
+ cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_neon;
+
+ /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b;
+ cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;*/
+ cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_neon;
+ cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_neon;
+ }
#endif
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (flags & HAS_NEON)
+ if (flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_partial_frame_ptr = vpxyv12_copy_partial_frame_neon;
- }
+ {
+ vp8_yv12_copy_partial_frame_ptr = vpxyv12_copy_partial_frame_neon;
+ }
#endif
#endif
}
diff --git a/vp8/encoder/arm/boolhuff_arm.c b/vp8/encoder/arm/boolhuff_arm.c
index 9089663ca..07042071a 100644
--- a/vp8/encoder/arm/boolhuff_arm.c
+++ b/vp8/encoder/arm/boolhuff_arm.c
@@ -12,23 +12,22 @@
#include "vp8/encoder/boolhuff.h"
#include "vp8/common/blockd.h"
-const unsigned int vp8_prob_cost[256] =
-{
- 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
- 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
- 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
- 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516,
- 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433,
- 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
- 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307,
- 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257,
- 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
- 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174,
- 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139,
- 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
- 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
- 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50,
- 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
- 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
+const unsigned int vp8_prob_cost[256] = {
+ 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
+ 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
+ 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
+ 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516,
+ 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433,
+ 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
+ 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307,
+ 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257,
+ 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
+ 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174,
+ 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139,
+ 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
+ 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
+ 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50,
+ 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
+ 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
};
diff --git a/vp8/encoder/arm/dct_arm.c b/vp8/encoder/arm/dct_arm.c
index 2692acb49..913d5c0f0 100644
--- a/vp8/encoder/arm/dct_arm.c
+++ b/vp8/encoder/arm/dct_arm.c
@@ -13,10 +13,9 @@
#if HAVE_ARMV6
-void vp8_short_fdct8x4_armv6(short *input, short *output, int pitch)
-{
- vp8_short_fdct4x4_armv6(input, output, pitch);
- vp8_short_fdct4x4_armv6(input + 4, output + 16, pitch);
+void vp8_short_fdct8x4_armv6(short *input, short *output, int pitch) {
+ vp8_short_fdct4x4_armv6(input, output, pitch);
+ vp8_short_fdct4x4_armv6(input + 4, output + 16, pitch);
}
#endif /* HAVE_ARMV6 */
diff --git a/vp8/encoder/arm/encodemb_arm.h b/vp8/encoder/arm/encodemb_arm.h
index bf417fe1d..7bcc747fc 100644
--- a/vp8/encoder/arm/encodemb_arm.h
+++ b/vp8/encoder/arm/encodemb_arm.h
@@ -31,22 +31,22 @@ extern prototype_submbuv(vp8_subtract_mbuv_armv6);
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
-//extern prototype_berr(vp8_block_error_c);
-//extern prototype_mberr(vp8_mbblock_error_c);
-//extern prototype_mbuverr(vp8_mbuverror_c);
+// extern prototype_berr(vp8_block_error_c);
+// extern prototype_mberr(vp8_mbblock_error_c);
+// extern prototype_mbuverr(vp8_mbuverror_c);
extern prototype_subb(vp8_subtract_b_neon);
extern prototype_submby(vp8_subtract_mby_neon);
extern prototype_submbuv(vp8_subtract_mbuv_neon);
-//#undef vp8_encodemb_berr
-//#define vp8_encodemb_berr vp8_block_error_c
+// #undef vp8_encodemb_berr
+// #define vp8_encodemb_berr vp8_block_error_c
-//#undef vp8_encodemb_mberr
-//#define vp8_encodemb_mberr vp8_mbblock_error_c
+// #undef vp8_encodemb_mberr
+// #define vp8_encodemb_mberr vp8_mbblock_error_c
-//#undef vp8_encodemb_mbuverr
-//#define vp8_encodemb_mbuverr vp8_mbuverror_c
+// #undef vp8_encodemb_mbuverr
+// #define vp8_encodemb_mbuverr vp8_mbuverror_c
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_encodemb_subb
diff --git a/vp8/encoder/arm/neon/picklpf_arm.c b/vp8/encoder/arm/neon/picklpf_arm.c
index 3fb370c3d..44cb6a674 100644
--- a/vp8/encoder/arm/neon/picklpf_arm.c
+++ b/vp8/encoder/arm/neon/picklpf_arm.c
@@ -21,30 +21,29 @@ extern void vp8_memcpy_neon(unsigned char *dst_ptr, unsigned char *src_ptr, int
void
-vpxyv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction)
-{
- unsigned char *src_y, *dst_y;
- int yheight;
- int ystride;
- int border;
- int yoffset;
- int linestocopy;
+vpxyv12_copy_partial_frame_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction) {
+ unsigned char *src_y, *dst_y;
+ int yheight;
+ int ystride;
+ int border;
+ int yoffset;
+ int linestocopy;
- border = src_ybc->border;
- yheight = src_ybc->y_height;
- ystride = src_ybc->y_stride;
+ border = src_ybc->border;
+ yheight = src_ybc->y_height;
+ ystride = src_ybc->y_stride;
- linestocopy = (yheight >> (Fraction + 4));
+ linestocopy = (yheight >> (Fraction + 4));
- if (linestocopy < 1)
- linestocopy = 1;
+ if (linestocopy < 1)
+ linestocopy = 1;
- linestocopy <<= 4;
+ linestocopy <<= 4;
- yoffset = ystride * ((yheight >> 5) * 16 - 8);
- src_y = src_ybc->y_buffer + yoffset;
- dst_y = dst_ybc->y_buffer + yoffset;
+ yoffset = ystride * ((yheight >> 5) * 16 - 8);
+ src_y = src_ybc->y_buffer + yoffset;
+ dst_y = dst_ybc->y_buffer + yoffset;
- //vpx_memcpy (dst_y, src_y, ystride * (linestocopy +16));
- vp8_memcpy_neon((unsigned char *)dst_y, (unsigned char *)src_y, (int)(ystride *(linestocopy + 16)));
+ // vpx_memcpy (dst_y, src_y, ystride * (linestocopy +16));
+ vp8_memcpy_neon((unsigned char *)dst_y, (unsigned char *)src_y, (int)(ystride * (linestocopy + 16)));
}
diff --git a/vp8/encoder/arm/quantize_arm.c b/vp8/encoder/arm/quantize_arm.c
index 52d84013e..b78c2534b 100644
--- a/vp8/encoder/arm/quantize_arm.c
+++ b/vp8/encoder/arm/quantize_arm.c
@@ -21,42 +21,39 @@
/* vp8_quantize_mbX functions here differs from corresponding ones in
* quantize.c only by using quantize_b_pair function pointer instead of
* the regular quantize_b function pointer */
-void vp8_quantize_mby_neon(MACROBLOCK *x)
-{
- int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
-
- for (i = 0; i < 16; i+=2)
- x->quantize_b_pair(&x->block[i], &x->block[i+1],
- &x->e_mbd.block[i], &x->e_mbd.block[i+1]);
-
- if(has_2nd_order)
- x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
+void vp8_quantize_mby_neon(MACROBLOCK *x) {
+ int i;
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+
+ for (i = 0; i < 16; i += 2)
+ x->quantize_b_pair(&x->block[i], &x->block[i + 1],
+ &x->e_mbd.block[i], &x->e_mbd.block[i + 1]);
+
+ if (has_2nd_order)
+ x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
}
-void vp8_quantize_mb_neon(MACROBLOCK *x)
-{
- int i;
- int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+void vp8_quantize_mb_neon(MACROBLOCK *x) {
+ int i;
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for (i = 0; i < 24; i+=2)
- x->quantize_b_pair(&x->block[i], &x->block[i+1],
- &x->e_mbd.block[i], &x->e_mbd.block[i+1]);
+ for (i = 0; i < 24; i += 2)
+ x->quantize_b_pair(&x->block[i], &x->block[i + 1],
+ &x->e_mbd.block[i], &x->e_mbd.block[i + 1]);
- if (has_2nd_order)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ if (has_2nd_order)
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
-void vp8_quantize_mbuv_neon(MACROBLOCK *x)
-{
- int i;
+void vp8_quantize_mbuv_neon(MACROBLOCK *x) {
+ int i;
- for (i = 16; i < 24; i+=2)
- x->quantize_b_pair(&x->block[i], &x->block[i+1],
- &x->e_mbd.block[i], &x->e_mbd.block[i+1]);
+ for (i = 16; i < 24; i += 2)
+ x->quantize_b_pair(&x->block[i], &x->block[i + 1],
+ &x->e_mbd.block[i], &x->e_mbd.block[i + 1]);
}
#endif /* HAVE_ARMV7 */
diff --git a/vp8/encoder/arm/variance_arm.c b/vp8/encoder/arm/variance_arm.c
index 6e83c6e7b..b268f3e19 100644
--- a/vp8/encoder/arm/variance_arm.c
+++ b/vp8/encoder/arm/variance_arm.c
@@ -23,78 +23,69 @@
unsigned int vp8_sub_pixel_variance8x8_armv6
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned short first_pass[10*8];
- unsigned char second_pass[8*8];
- const short *HFilter, *VFilter;
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned short first_pass[10 * 8];
+ unsigned char second_pass[8 * 8];
+ const short *HFilter, *VFilter;
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ vp8_filter_block2d_bil_first_pass_armv6(src_ptr, first_pass,
+ src_pixels_per_line,
+ 9, 8, HFilter);
+ vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
+ 8, 8, 8, VFilter);
+
+ return vp8_variance8x8_armv6(second_pass, 8, dst_ptr,
+ dst_pixels_per_line, sse);
+}
+unsigned int vp8_sub_pixel_variance16x16_armv6
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned short first_pass[36 * 16];
+ unsigned char second_pass[20 * 16];
+ const short *HFilter, *VFilter;
+ unsigned int var;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ var = vp8_variance_halfpixvar16x16_h_armv6(src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, sse);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ var = vp8_variance_halfpixvar16x16_v_armv6(src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, sse);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ var = vp8_variance_halfpixvar16x16_hv_armv6(src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, sse);
+ } else {
HFilter = vp8_bilinear_filters[xoffset];
VFilter = vp8_bilinear_filters[yoffset];
vp8_filter_block2d_bil_first_pass_armv6(src_ptr, first_pass,
src_pixels_per_line,
- 9, 8, HFilter);
+ 17, 16, HFilter);
vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
- 8, 8, 8, VFilter);
+ 16, 16, 16, VFilter);
- return vp8_variance8x8_armv6(second_pass, 8, dst_ptr,
- dst_pixels_per_line, sse);
-}
-
-unsigned int vp8_sub_pixel_variance16x16_armv6
-(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned short first_pass[36*16];
- unsigned char second_pass[20*16];
- const short *HFilter, *VFilter;
- unsigned int var;
-
- if (xoffset == HALFNDX && yoffset == 0)
- {
- var = vp8_variance_halfpixvar16x16_h_armv6(src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, sse);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- var = vp8_variance_halfpixvar16x16_v_armv6(src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, sse);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- var = vp8_variance_halfpixvar16x16_hv_armv6(src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, sse);
- }
- else
- {
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
-
- vp8_filter_block2d_bil_first_pass_armv6(src_ptr, first_pass,
- src_pixels_per_line,
- 17, 16, HFilter);
- vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
- 16, 16, 16, VFilter);
-
- var = vp8_variance16x16_armv6(second_pass, 16, dst_ptr,
- dst_pixels_per_line, sse);
- }
- return var;
+ var = vp8_variance16x16_armv6(second_pass, 16, dst_ptr,
+ dst_pixels_per_line, sse);
+ }
+ return var;
}
#endif /* HAVE_ARMV6 */
@@ -104,15 +95,14 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
unsigned int vp8_sub_pixel_variance16x16_neon
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
if (xoffset == HALFNDX && yoffset == 0)
return vp8_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
else if (xoffset == 0 && yoffset == HALFNDX)
diff --git a/vp8/encoder/arm/variance_arm.h b/vp8/encoder/arm/variance_arm.h
index fdb7289b1..8deb72d4a 100644
--- a/vp8/encoder/arm/variance_arm.h
+++ b/vp8/encoder/arm/variance_arm.h
@@ -65,23 +65,23 @@ extern prototype_sad(vp8_sad8x16_neon);
extern prototype_sad(vp8_sad16x8_neon);
extern prototype_sad(vp8_sad16x16_neon);
-//extern prototype_variance(vp8_variance4x4_c);
+// extern prototype_variance(vp8_variance4x4_c);
extern prototype_variance(vp8_variance8x8_neon);
extern prototype_variance(vp8_variance8x16_neon);
extern prototype_variance(vp8_variance16x8_neon);
extern prototype_variance(vp8_variance16x16_neon);
-//extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
+// extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_neon);
-//extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
-//extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
+// extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
+// extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
-//extern prototype_getmbss(vp8_get_mb_ss_c);
+// extern prototype_getmbss(vp8_get_mb_ss_c);
extern prototype_variance(vp8_mse16x16_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
@@ -100,8 +100,8 @@ extern prototype_variance(vp8_mse16x16_neon);
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_neon
-//#undef vp8_variance_var4x4
-//#define vp8_variance_var4x4 vp8_variance4x4_c
+// #undef vp8_variance_var4x4
+// #define vp8_variance_var4x4 vp8_variance4x4_c
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_neon
@@ -115,17 +115,17 @@ extern prototype_variance(vp8_mse16x16_neon);
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_neon
-//#undef vp8_variance_subpixvar4x4
-//#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
+// #undef vp8_variance_subpixvar4x4
+// #define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_neon
-//#undef vp8_variance_subpixvar8x16
-//#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
+// #undef vp8_variance_subpixvar8x16
+// #define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
-//#undef vp8_variance_subpixvar16x8
-//#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
+// #undef vp8_variance_subpixvar16x8
+// #define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_neon
@@ -139,8 +139,8 @@ extern prototype_variance(vp8_mse16x16_neon);
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_neon
-//#undef vp8_variance_getmbss
-//#define vp8_variance_getmbss vp8_get_mb_ss_c
+// #undef vp8_variance_getmbss
+// #define vp8_variance_getmbss vp8_get_mb_ss_c
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_neon
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 99201ba78..3d0119572 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -34,16 +34,16 @@ unsigned __int64 Sectionbits[500];
#ifdef ENTROPY_STATS
int intra_mode_stats[VP8_BINTRAMODES]
- [VP8_BINTRAMODES]
- [VP8_BINTRAMODES];
+[VP8_BINTRAMODES]
+[VP8_BINTRAMODES];
unsigned int tree_update_hist [BLOCK_TYPES]
- [COEF_BANDS]
- [PREV_COEF_CONTEXTS]
- [ENTROPY_NODES][2];
+[COEF_BANDS]
+[PREV_COEF_CONTEXTS]
+[ENTROPY_NODES][2];
unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8]
- [COEF_BANDS]
- [PREV_COEF_CONTEXTS]
- [ENTROPY_NODES] [2];
+[COEF_BANDS]
+[PREV_COEF_CONTEXTS]
+[ENTROPY_NODES] [2];
extern unsigned int active_section;
#endif
@@ -59,2792 +59,2462 @@ int count_mb_seg[4] = { 0, 0, 0, 0 };
#define SEARCH_NEWP
static int update_bits[255];
-static void compute_update_table()
-{
- int i;
- for (i=0; i<255; i++)
- update_bits[i] = vp8_count_term_subexp(i, SUBEXP_PARAM, 255);
+static void compute_update_table() {
+ int i;
+ for (i = 0; i < 255; i++)
+ update_bits[i] = vp8_count_term_subexp(i, SUBEXP_PARAM, 255);
}
-static int split_index(int i, int n, int modulus)
-{
- int max1 = (n-1 - modulus/2)/modulus + 1;
- if (i%modulus == modulus/2) i = i/modulus;
- else i = max1 + i - (i + modulus-modulus/2)/modulus;
- return i;
+static int split_index(int i, int n, int modulus) {
+ int max1 = (n - 1 - modulus / 2) / modulus + 1;
+ if (i % modulus == modulus / 2) i = i / modulus;
+ else i = max1 + i - (i + modulus - modulus / 2) / modulus;
+ return i;
}
-static int remap_prob(int v, int m)
-{
- const int n = 256;
- const int modulus = MODULUS_PARAM;
- const int max1 = (n-2-modulus/2+modulus-1)/modulus;
- int i;
- if ((m<<1)<=n)
- i = recenter_nonneg(v, m) - 1;
- else
- i = recenter_nonneg(n-1-v, n-1-m) - 1;
-
- i = split_index(i, n-1, modulus);
- return i;
+static int remap_prob(int v, int m) {
+ const int n = 256;
+ const int modulus = MODULUS_PARAM;
+ const int max1 = (n - 2 - modulus / 2 + modulus - 1) / modulus;
+ int i;
+ if ((m << 1) <= n)
+ i = recenter_nonneg(v, m) - 1;
+ else
+ i = recenter_nonneg(n - 1 - v, n - 1 - m) - 1;
+
+ i = split_index(i, n - 1, modulus);
+ return i;
}
static void write_prob_diff_update(vp8_writer *const w,
- vp8_prob newp, vp8_prob oldp)
-{
- int delp = remap_prob(newp, oldp);
- vp8_encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
+ vp8_prob newp, vp8_prob oldp) {
+ int delp = remap_prob(newp, oldp);
+ vp8_encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
}
-static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp)
-{
- int delp = remap_prob(newp, oldp);
- return update_bits[delp]*256;
+static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp) {
+ int delp = remap_prob(newp, oldp);
+ return update_bits[delp] * 256;
}
#endif
static void update_mode(
- vp8_writer *const w,
- int n,
- vp8_token tok [/* n */],
- vp8_tree tree,
- vp8_prob Pnew [/* n-1 */],
- vp8_prob Pcur [/* n-1 */],
- unsigned int bct [/* n-1 */] [2],
- const unsigned int num_events[/* n */]
-)
-{
- unsigned int new_b = 0, old_b = 0;
+ vp8_writer *const w,
+ int n,
+ vp8_token tok [/* n */],
+ vp8_tree tree,
+ vp8_prob Pnew [/* n-1 */],
+ vp8_prob Pcur [/* n-1 */],
+ unsigned int bct [/* n-1 */] [2],
+ const unsigned int num_events[/* n */]
+) {
+ unsigned int new_b = 0, old_b = 0;
+ int i = 0;
+
+ vp8_tree_probs_from_distribution(
+ n--, tok, tree,
+ Pnew, bct, num_events,
+ 256, 1
+ );
+
+ do {
+ new_b += vp8_cost_branch(bct[i], Pnew[i]);
+ old_b += vp8_cost_branch(bct[i], Pcur[i]);
+ } while (++i < n);
+
+ if (new_b + (n << 8) < old_b) {
int i = 0;
- vp8_tree_probs_from_distribution(
- n--, tok, tree,
- Pnew, bct, num_events,
- 256, 1
- );
-
- do
- {
- new_b += vp8_cost_branch(bct[i], Pnew[i]);
- old_b += vp8_cost_branch(bct[i], Pcur[i]);
- }
- while (++i < n);
-
- if (new_b + (n << 8) < old_b)
- {
- int i = 0;
-
- vp8_write_bit(w, 1);
+ vp8_write_bit(w, 1);
- do
- {
- const vp8_prob p = Pnew[i];
+ do {
+ const vp8_prob p = Pnew[i];
- vp8_write_literal(w, Pcur[i] = p ? p : 1, 8);
- }
- while (++i < n);
- }
- else
- vp8_write_bit(w, 0);
+ vp8_write_literal(w, Pcur[i] = p ? p : 1, 8);
+ } while (++i < n);
+ } else
+ vp8_write_bit(w, 0);
}
-static void update_mbintra_mode_probs(VP8_COMP *cpi)
-{
- VP8_COMMON *const x = & cpi->common;
+static void update_mbintra_mode_probs(VP8_COMP *cpi) {
+ VP8_COMMON *const x = & cpi->common;
- vp8_writer *const w = & cpi->bc;
+ vp8_writer *const w = & cpi->bc;
- {
- vp8_prob Pnew [VP8_YMODES-1];
- unsigned int bct [VP8_YMODES-1] [2];
+ {
+ vp8_prob Pnew [VP8_YMODES - 1];
+ unsigned int bct [VP8_YMODES - 1] [2];
- update_mode(
- w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
- Pnew, x->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
- );
- }
+ update_mode(
+ w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
+ Pnew, x->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
+ );
+ }
}
-void update_skip_probs(VP8_COMP *cpi)
-{
+void update_skip_probs(VP8_COMP *cpi) {
#if CONFIG_NEWENTROPY
- VP8_COMMON *const pc = & cpi->common;
- int prob_skip_false[3] = {0, 0, 0};
- int k;
+ VP8_COMMON *const pc = & cpi->common;
+ int prob_skip_false[3] = {0, 0, 0};
+ int k;
- for (k=0;k<MBSKIP_CONTEXTS;++k)
- {
- if ( (cpi->skip_false_count[k] + cpi->skip_true_count[k]) )
- {
- prob_skip_false[k] =
- cpi->skip_false_count[k] * 256 /
- (cpi->skip_false_count[k] + cpi->skip_true_count[k]);
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
+ if ((cpi->skip_false_count[k] + cpi->skip_true_count[k])) {
+ prob_skip_false[k] =
+ cpi->skip_false_count[k] * 256 /
+ (cpi->skip_false_count[k] + cpi->skip_true_count[k]);
- if (prob_skip_false[k] <= 1)
- prob_skip_false[k] = 1;
+ if (prob_skip_false[k] <= 1)
+ prob_skip_false[k] = 1;
- if (prob_skip_false[k] > 255)
- prob_skip_false[k] = 255;
- }
- else
- prob_skip_false[k] = 128;
+ if (prob_skip_false[k] > 255)
+ prob_skip_false[k] = 255;
+ } else
+ prob_skip_false[k] = 128;
- pc->mbskip_pred_probs[k] = prob_skip_false[k];
- }
+ pc->mbskip_pred_probs[k] = prob_skip_false[k];
+ }
#else
- int prob_skip_false = 0;
+ int prob_skip_false = 0;
- if ( (cpi->skip_false_count + cpi->skip_true_count) )
- {
- prob_skip_false = cpi->skip_false_count * 256 /
- (cpi->skip_false_count + cpi->skip_true_count);
+ if ((cpi->skip_false_count + cpi->skip_true_count)) {
+ prob_skip_false = cpi->skip_false_count * 256 /
+ (cpi->skip_false_count + cpi->skip_true_count);
- if (prob_skip_false <= 1)
- prob_skip_false = 1;
+ if (prob_skip_false <= 1)
+ prob_skip_false = 1;
- if (prob_skip_false > 255)
- prob_skip_false = 255;
- }
- else
- prob_skip_false = 128;
+ if (prob_skip_false > 255)
+ prob_skip_false = 255;
+ } else
+ prob_skip_false = 128;
- cpi->prob_skip_false = prob_skip_false;
+ cpi->prob_skip_false = prob_skip_false;
#endif
}
// This function updates the reference frame prediction stats
-static void update_refpred_stats( VP8_COMP *cpi )
-{
- VP8_COMMON *const cm = & cpi->common;
- int i;
- int tot_count;
- vp8_prob new_pred_probs[PREDICTION_PROBS];
- int old_cost, new_cost;
-
- // Set the prediction probability structures to defaults
- if ( cm->frame_type == KEY_FRAME )
- {
- // Set the prediction probabilities to defaults
- cm->ref_pred_probs[0] = 120;
- cm->ref_pred_probs[1] = 80;
- cm->ref_pred_probs[2] = 40;
-
- vpx_memset(cpi->ref_pred_probs_update, 0,
- sizeof(cpi->ref_pred_probs_update) );
- }
- else
- {
- // From the prediction counts set the probabilities for each context
- for ( i = 0; i < PREDICTION_PROBS; i++ )
- {
- tot_count = cpi->ref_pred_count[i][0] + cpi->ref_pred_count[i][1];
- if ( tot_count )
- {
- new_pred_probs[i] =
- ( cpi->ref_pred_count[i][0] * 255 + (tot_count >> 1)) / tot_count;
-
- // Clamp to minimum allowed value
- new_pred_probs[i] += !new_pred_probs[i];
- }
- else
- new_pred_probs[i] = 128;
-
- // Decide whether or not to update the reference frame probs.
- // Returned costs are in 1/256 bit units.
- old_cost =
- (cpi->ref_pred_count[i][0] * vp8_cost_zero(cm->ref_pred_probs[i])) +
- (cpi->ref_pred_count[i][1] * vp8_cost_one(cm->ref_pred_probs[i]));
-
- new_cost =
- (cpi->ref_pred_count[i][0] * vp8_cost_zero(new_pred_probs[i])) +
- (cpi->ref_pred_count[i][1] * vp8_cost_one(new_pred_probs[i]));
-
- // Cost saving must be >= 8 bits (2048 in these units)
- if ( (old_cost - new_cost) >= 2048 )
- {
- cpi->ref_pred_probs_update[i] = 1;
- cm->ref_pred_probs[i] = new_pred_probs[i];
- }
- else
- cpi->ref_pred_probs_update[i] = 0;
+static void update_refpred_stats(VP8_COMP *cpi) {
+ VP8_COMMON *const cm = & cpi->common;
+ int i;
+ int tot_count;
+ vp8_prob new_pred_probs[PREDICTION_PROBS];
+ int old_cost, new_cost;
+
+ // Set the prediction probability structures to defaults
+ if (cm->frame_type == KEY_FRAME) {
+ // Set the prediction probabilities to defaults
+ cm->ref_pred_probs[0] = 120;
+ cm->ref_pred_probs[1] = 80;
+ cm->ref_pred_probs[2] = 40;
+
+ vpx_memset(cpi->ref_pred_probs_update, 0,
+ sizeof(cpi->ref_pred_probs_update));
+ } else {
+ // From the prediction counts set the probabilities for each context
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ tot_count = cpi->ref_pred_count[i][0] + cpi->ref_pred_count[i][1];
+ if (tot_count) {
+ new_pred_probs[i] =
+ (cpi->ref_pred_count[i][0] * 255 + (tot_count >> 1)) / tot_count;
+
+ // Clamp to minimum allowed value
+ new_pred_probs[i] += !new_pred_probs[i];
+ } else
+ new_pred_probs[i] = 128;
+
+ // Decide whether or not to update the reference frame probs.
+ // Returned costs are in 1/256 bit units.
+ old_cost =
+ (cpi->ref_pred_count[i][0] * vp8_cost_zero(cm->ref_pred_probs[i])) +
+ (cpi->ref_pred_count[i][1] * vp8_cost_one(cm->ref_pred_probs[i]));
+
+ new_cost =
+ (cpi->ref_pred_count[i][0] * vp8_cost_zero(new_pred_probs[i])) +
+ (cpi->ref_pred_count[i][1] * vp8_cost_one(new_pred_probs[i]));
+
+ // Cost saving must be >= 8 bits (2048 in these units)
+ if ((old_cost - new_cost) >= 2048) {
+ cpi->ref_pred_probs_update[i] = 1;
+ cm->ref_pred_probs[i] = new_pred_probs[i];
+ } else
+ cpi->ref_pred_probs_update[i] = 0;
- }
}
+ }
}
-static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p)
-{
- vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
+static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
}
-static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p)
-{
- vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
+static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
}
-static void write_i8x8_mode(vp8_writer *bc, int m, const vp8_prob *p)
-{
- vp8_write_token(bc,vp8_i8x8_mode_tree, p, vp8_i8x8_mode_encodings + m);
+static void write_i8x8_mode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_i8x8_mode_tree, p, vp8_i8x8_mode_encodings + m);
}
-static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p)
-{
- vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
+static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
}
-static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p)
-{
- vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
+static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) {
+ vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
}
-static void write_split(vp8_writer *bc, int x, const vp8_prob *p)
-{
- vp8_write_token(
- bc, vp8_mbsplit_tree, p, vp8_mbsplit_encodings + x
- );
+static void write_split(vp8_writer *bc, int x, const vp8_prob *p) {
+ vp8_write_token(
+ bc, vp8_mbsplit_tree, p, vp8_mbsplit_encodings + x
+ );
}
static int prob_update_savings(const unsigned int *ct,
const vp8_prob oldp, const vp8_prob newp,
- const vp8_prob upd)
-{
- const int old_b = vp8_cost_branch256(ct, oldp);
- const int new_b = vp8_cost_branch256(ct, newp);
- const int update_b = 2048 + vp8_cost_upd256;
- return (old_b - new_b - update_b);
+ const vp8_prob upd) {
+ const int old_b = vp8_cost_branch256(ct, oldp);
+ const int new_b = vp8_cost_branch256(ct, newp);
+ const int update_b = 2048 + vp8_cost_upd256;
+ return (old_b - new_b - update_b);
}
#if CONFIG_NEWUPDATE
static int prob_diff_update_savings(const unsigned int *ct,
- const vp8_prob oldp, const vp8_prob newp,
- const vp8_prob upd)
-{
- const int old_b = vp8_cost_branch256(ct, oldp);
- const int new_b = vp8_cost_branch256(ct, newp);
- const int update_b = (newp == oldp ? 0 :
- prob_diff_update_cost(newp, oldp) + vp8_cost_upd256);
- return (old_b - new_b - update_b);
+ const vp8_prob oldp, const vp8_prob newp,
+ const vp8_prob upd) {
+ const int old_b = vp8_cost_branch256(ct, oldp);
+ const int new_b = vp8_cost_branch256(ct, newp);
+ const int update_b = (newp == oldp ? 0 :
+ prob_diff_update_cost(newp, oldp) + vp8_cost_upd256);
+ return (old_b - new_b - update_b);
}
static int prob_diff_update_savings_search(const unsigned int *ct,
- const vp8_prob oldp, vp8_prob *bestp,
- const vp8_prob upd)
-{
- const int old_b = vp8_cost_branch256(ct, oldp);
- int new_b, update_b, savings, bestsavings, step;
- vp8_prob newp, bestnewp;
-
- bestsavings = 0;
- bestnewp = oldp;
-
- step = (*bestp > oldp ? -1 : 1);
- for (newp = *bestp; newp != oldp; newp+=step)
- {
- new_b = vp8_cost_branch256(ct, newp);
- update_b = prob_diff_update_cost(newp, oldp) + vp8_cost_upd256;
- savings = old_b - new_b - update_b;
- if (savings > bestsavings)
- {
- bestsavings = savings;
- bestnewp = newp;
- }
+ const vp8_prob oldp, vp8_prob *bestp,
+ const vp8_prob upd) {
+ const int old_b = vp8_cost_branch256(ct, oldp);
+ int new_b, update_b, savings, bestsavings, step;
+ vp8_prob newp, bestnewp;
+
+ bestsavings = 0;
+ bestnewp = oldp;
+
+ step = (*bestp > oldp ? -1 : 1);
+ for (newp = *bestp; newp != oldp; newp += step) {
+ new_b = vp8_cost_branch256(ct, newp);
+ update_b = prob_diff_update_cost(newp, oldp) + vp8_cost_upd256;
+ savings = old_b - new_b - update_b;
+ if (savings > bestsavings) {
+ bestsavings = savings;
+ bestnewp = newp;
}
- *bestp = bestnewp;
- return bestsavings;
+ }
+ *bestp = bestnewp;
+ return bestsavings;
}
#endif
-static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
-{
- const TOKENEXTRA *const stop = p + xcount;
- unsigned int split;
- unsigned int shift;
- int count = w->count;
- unsigned int range = w->range;
- unsigned int lowvalue = w->lowvalue;
+static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
+ const TOKENEXTRA *const stop = p + xcount;
+ unsigned int split;
+ unsigned int shift;
+ int count = w->count;
+ unsigned int range = w->range;
+ unsigned int lowvalue = w->lowvalue;
- while (p < stop)
- {
- const int t = p->Token;
- vp8_token *const a = vp8_coef_encodings + t;
- const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
- int i = 0;
- const unsigned char *pp = p->context_tree;
- int v = a->value;
- int n = a->Len;
-
- /* skip one or two nodes */
- if (p->skip_eob_node)
- {
- n-=p->skip_eob_node;
- i = 2*p->skip_eob_node;
- }
+ while (p < stop) {
+ const int t = p->Token;
+ vp8_token *const a = vp8_coef_encodings + t;
+ const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
+ int i = 0;
+ const unsigned char *pp = p->context_tree;
+ int v = a->value;
+ int n = a->Len;
+
+ /* skip one or two nodes */
+ if (p->skip_eob_node) {
+ n -= p->skip_eob_node;
+ i = 2 * p->skip_eob_node;
+ }
- do
- {
- const int bb = (v >> --n) & 1;
- split = 1 + (((range - 1) * pp[i>>1]) >> 8);
- i = vp8_coef_tree[i+bb];
+ do {
+ const int bb = (v >> --n) & 1;
+ split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
+ i = vp8_coef_tree[i + bb];
- if (bb)
- {
- lowvalue += split;
- range = range - split;
- }
- else
- {
- range = split;
- }
+ if (bb) {
+ lowvalue += split;
+ range = range - split;
+ } else {
+ range = split;
+ }
- shift = vp8_norm[range];
- range <<= shift;
- count += shift;
+ shift = vp8_norm[range];
+ range <<= shift;
+ count += shift;
- if (count >= 0)
- {
- int offset = shift - count;
+ if (count >= 0) {
+ int offset = shift - count;
- if ((lowvalue << (offset - 1)) & 0x80000000)
- {
- int x = w->pos - 1;
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = w->pos - 1;
- while (x >= 0 && w->buffer[x] == 0xff)
- {
- w->buffer[x] = (unsigned char)0;
- x--;
- }
+ while (x >= 0 && w->buffer[x] == 0xff) {
+ w->buffer[x] = (unsigned char)0;
+ x--;
+ }
- w->buffer[x] += 1;
- }
+ w->buffer[x] += 1;
+ }
- w->buffer[w->pos++] = (lowvalue >> (24 - offset));
- lowvalue <<= offset;
- shift = count;
- lowvalue &= 0xffffff;
- count -= 8 ;
- }
+ w->buffer[w->pos++] = (lowvalue >> (24 - offset));
+ lowvalue <<= offset;
+ shift = count;
+ lowvalue &= 0xffffff;
+ count -= 8;
+ }
- lowvalue <<= shift;
- }
- while (n);
+ lowvalue <<= shift;
+ } while (n);
- if (b->base_val)
- {
- const int e = p->Extra, L = b->Len;
+ if (b->base_val) {
+ const int e = p->Extra, L = b->Len;
- if (L)
- {
- const unsigned char *pp = b->prob;
- int v = e >> 1;
- int n = L; /* number of bits in v, assumed nonzero */
- int i = 0;
+ if (L) {
+ const unsigned char *pp = b->prob;
+ int v = e >> 1;
+ int n = L; /* number of bits in v, assumed nonzero */
+ int i = 0;
- do
- {
- const int bb = (v >> --n) & 1;
- split = 1 + (((range - 1) * pp[i>>1]) >> 8);
- i = b->tree[i+bb];
+ do {
+ const int bb = (v >> --n) & 1;
+ split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
+ i = b->tree[i + bb];
- if (bb)
- {
- lowvalue += split;
- range = range - split;
- }
- else
- {
- range = split;
- }
+ if (bb) {
+ lowvalue += split;
+ range = range - split;
+ } else {
+ range = split;
+ }
- shift = vp8_norm[range];
- range <<= shift;
- count += shift;
+ shift = vp8_norm[range];
+ range <<= shift;
+ count += shift;
- if (count >= 0)
- {
- int offset = shift - count;
-
- if ((lowvalue << (offset - 1)) & 0x80000000)
- {
- int x = w->pos - 1;
-
- while (x >= 0 && w->buffer[x] == 0xff)
- {
- w->buffer[x] = (unsigned char)0;
- x--;
- }
-
- w->buffer[x] += 1;
- }
-
- w->buffer[w->pos++] = (lowvalue >> (24 - offset));
- lowvalue <<= offset;
- shift = count;
- lowvalue &= 0xffffff;
- count -= 8 ;
- }
+ if (count >= 0) {
+ int offset = shift - count;
- lowvalue <<= shift;
- }
- while (n);
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = w->pos - 1;
+
+ while (x >= 0 && w->buffer[x] == 0xff) {
+ w->buffer[x] = (unsigned char)0;
+ x--;
+ }
+
+ w->buffer[x] += 1;
}
+ w->buffer[w->pos++] = (lowvalue >> (24 - offset));
+ lowvalue <<= offset;
+ shift = count;
+ lowvalue &= 0xffffff;
+ count -= 8;
+ }
- {
+ lowvalue <<= shift;
+ } while (n);
+ }
- split = (range + 1) >> 1;
- if (e & 1)
- {
- lowvalue += split;
- range = range - split;
- }
- else
- {
- range = split;
- }
+ {
- range <<= 1;
+ split = (range + 1) >> 1;
- if ((lowvalue & 0x80000000))
- {
- int x = w->pos - 1;
+ if (e & 1) {
+ lowvalue += split;
+ range = range - split;
+ } else {
+ range = split;
+ }
- while (x >= 0 && w->buffer[x] == 0xff)
- {
- w->buffer[x] = (unsigned char)0;
- x--;
- }
+ range <<= 1;
- w->buffer[x] += 1;
+ if ((lowvalue & 0x80000000)) {
+ int x = w->pos - 1;
- }
+ while (x >= 0 && w->buffer[x] == 0xff) {
+ w->buffer[x] = (unsigned char)0;
+ x--;
+ }
- lowvalue <<= 1;
+ w->buffer[x] += 1;
- if (!++count)
- {
- count = -8;
- w->buffer[w->pos++] = (lowvalue >> 24);
- lowvalue &= 0xffffff;
- }
- }
+ }
+ lowvalue <<= 1;
+
+ if (!++count) {
+ count = -8;
+ w->buffer[w->pos++] = (lowvalue >> 24);
+ lowvalue &= 0xffffff;
}
+ }
- ++p;
}
- w->count = count;
- w->lowvalue = lowvalue;
- w->range = range;
+ ++p;
+ }
+
+ w->count = count;
+ w->lowvalue = lowvalue;
+ w->range = range;
}
-static void write_partition_size(unsigned char *cx_data, int size)
-{
- signed char csize;
+static void write_partition_size(unsigned char *cx_data, int size) {
+ signed char csize;
- csize = size & 0xff;
- *cx_data = csize;
- csize = (size >> 8) & 0xff;
- *(cx_data + 1) = csize;
- csize = (size >> 16) & 0xff;
- *(cx_data + 2) = csize;
+ csize = size & 0xff;
+ *cx_data = csize;
+ csize = (size >> 8) & 0xff;
+ *(cx_data + 1) = csize;
+ csize = (size >> 16) & 0xff;
+ *(cx_data + 2) = csize;
}
static void write_mv_ref
(
- vp8_writer *w, MB_PREDICTION_MODE m, const vp8_prob *p
-)
-{
+ vp8_writer *w, MB_PREDICTION_MODE m, const vp8_prob *p
+) {
#if CONFIG_DEBUG
- assert(NEARESTMV <= m && m <= SPLITMV);
+ assert(NEARESTMV <= m && m <= SPLITMV);
#endif
- vp8_write_token(w, vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_write_token(w, vp8_mv_ref_tree, p,
+ vp8_mv_ref_encoding_array - NEARESTMV + m);
}
static void write_sub_mv_ref
(
- vp8_writer *w, B_PREDICTION_MODE m, const vp8_prob *p
-)
-{
+ vp8_writer *w, B_PREDICTION_MODE m, const vp8_prob *p
+) {
#if CONFIG_DEBUG
- assert(LEFT4X4 <= m && m <= NEW4X4);
+ assert(LEFT4X4 <= m && m <= NEW4X4);
#endif
- vp8_write_token(w, vp8_sub_mv_ref_tree, p,
- vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
+ vp8_write_token(w, vp8_sub_mv_ref_tree, p,
+ vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
static void write_mv
(
- vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
-)
-{
- MV e;
- e.row = mv->row - ref->as_mv.row;
- e.col = mv->col - ref->as_mv.col;
-
- vp8_encode_motion_vector(w, &e, mvc);
+ vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
+) {
+ MV e;
+ e.row = mv->row - ref->as_mv.row;
+ e.col = mv->col - ref->as_mv.col;
+
+ vp8_encode_motion_vector(w, &e, mvc);
}
#if CONFIG_HIGH_PRECISION_MV
static void write_mv_hp
(
- vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
-)
-{
- MV e;
- e.row = mv->row - ref->as_mv.row;
- e.col = mv->col - ref->as_mv.col;
-
- vp8_encode_motion_vector_hp(w, &e, mvc);
+ vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
+) {
+ MV e;
+ e.row = mv->row - ref->as_mv.row;
+ e.col = mv->col - ref->as_mv.col;
+
+ vp8_encode_motion_vector_hp(w, &e, mvc);
}
#endif
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *w,
- const MB_MODE_INFO *mi, const MACROBLOCKD *x)
-{
- // Encode the MB segment id.
- if (x->segmentation_enabled && x->update_mb_segmentation_map)
- {
- switch (mi->segment_id)
- {
- case 0:
- vp8_write(w, 0, x->mb_segment_tree_probs[0]);
- vp8_write(w, 0, x->mb_segment_tree_probs[1]);
- break;
- case 1:
- vp8_write(w, 0, x->mb_segment_tree_probs[0]);
- vp8_write(w, 1, x->mb_segment_tree_probs[1]);
- break;
- case 2:
- vp8_write(w, 1, x->mb_segment_tree_probs[0]);
- vp8_write(w, 0, x->mb_segment_tree_probs[2]);
- break;
- case 3:
- vp8_write(w, 1, x->mb_segment_tree_probs[0]);
- vp8_write(w, 1, x->mb_segment_tree_probs[2]);
- break;
-
- // TRAP.. This should not happen
- default:
- vp8_write(w, 0, x->mb_segment_tree_probs[0]);
- vp8_write(w, 0, x->mb_segment_tree_probs[1]);
- break;
- }
+ const MB_MODE_INFO *mi, const MACROBLOCKD *x) {
+ // Encode the MB segment id.
+ if (x->segmentation_enabled && x->update_mb_segmentation_map) {
+ switch (mi->segment_id) {
+ case 0:
+ vp8_write(w, 0, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+ break;
+ case 1:
+ vp8_write(w, 0, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 1, x->mb_segment_tree_probs[1]);
+ break;
+ case 2:
+ vp8_write(w, 1, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 0, x->mb_segment_tree_probs[2]);
+ break;
+ case 3:
+ vp8_write(w, 1, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 1, x->mb_segment_tree_probs[2]);
+ break;
+
+ // TRAP.. This should not happen
+ default:
+ vp8_write(w, 0, x->mb_segment_tree_probs[0]);
+ vp8_write(w, 0, x->mb_segment_tree_probs[1]);
+ break;
}
+ }
}
// This function encodes the reference frame
-static void encode_ref_frame( vp8_writer *const w,
- VP8_COMMON *const cm,
- MACROBLOCKD *xd,
- int segment_id,
- MV_REFERENCE_FRAME rf )
-{
- int seg_ref_active;
- int seg_ref_count = 0;
- seg_ref_active = segfeature_active( xd,
- segment_id,
- SEG_LVL_REF_FRAME );
-
- if ( seg_ref_active )
- {
- seg_ref_count = check_segref( xd, segment_id, INTRA_FRAME ) +
- check_segref( xd, segment_id, LAST_FRAME ) +
- check_segref( xd, segment_id, GOLDEN_FRAME ) +
- check_segref( xd, segment_id, ALTREF_FRAME );
- }
-
- // If segment level coding of this signal is disabled...
- // or the segment allows multiple reference frame options
- if ( !seg_ref_active || (seg_ref_count > 1) )
- {
- // Values used in prediction model coding
- unsigned char prediction_flag;
- vp8_prob pred_prob;
- MV_REFERENCE_FRAME pred_rf;
-
- // Get the context probability the prediction flag
- pred_prob = get_pred_prob( cm, xd, PRED_REF );
-
- // Get the predicted value.
- pred_rf = get_pred_ref( cm, xd );
-
- // Did the chosen reference frame match its predicted value.
- prediction_flag =
- ( xd->mode_info_context->mbmi.ref_frame == pred_rf );
-
- set_pred_flag( xd, PRED_REF, prediction_flag );
- vp8_write( w, prediction_flag, pred_prob );
-
- // If not predicted correctly then code value explicitly
- if ( !prediction_flag )
- {
- vp8_prob mod_refprobs[PREDICTION_PROBS];
-
- vpx_memcpy( mod_refprobs,
- cm->mod_refprobs[pred_rf], sizeof(mod_refprobs) );
-
- // If segment coding enabled blank out options that cant occur by
- // setting the branch probability to 0.
- if ( seg_ref_active )
- {
- mod_refprobs[INTRA_FRAME] *=
- check_segref( xd, segment_id, INTRA_FRAME );
- mod_refprobs[LAST_FRAME] *=
- check_segref( xd, segment_id, LAST_FRAME );
- mod_refprobs[GOLDEN_FRAME] *=
- ( check_segref( xd, segment_id, GOLDEN_FRAME ) *
- check_segref( xd, segment_id, ALTREF_FRAME ) );
- }
-
- if ( mod_refprobs[0] )
- {
- vp8_write(w, (rf != INTRA_FRAME), mod_refprobs[0] );
- }
-
- // Inter coded
- if (rf != INTRA_FRAME)
- {
- if ( mod_refprobs[1] )
- {
- vp8_write(w, (rf != LAST_FRAME), mod_refprobs[1] );
- }
+static void encode_ref_frame(vp8_writer *const w,
+ VP8_COMMON *const cm,
+ MACROBLOCKD *xd,
+ int segment_id,
+ MV_REFERENCE_FRAME rf) {
+ int seg_ref_active;
+ int seg_ref_count = 0;
+ seg_ref_active = segfeature_active(xd,
+ segment_id,
+ SEG_LVL_REF_FRAME);
+
+ if (seg_ref_active) {
+ seg_ref_count = check_segref(xd, segment_id, INTRA_FRAME) +
+ check_segref(xd, segment_id, LAST_FRAME) +
+ check_segref(xd, segment_id, GOLDEN_FRAME) +
+ check_segref(xd, segment_id, ALTREF_FRAME);
+ }
+
+ // If segment level coding of this signal is disabled...
+ // or the segment allows multiple reference frame options
+ if (!seg_ref_active || (seg_ref_count > 1)) {
+ // Values used in prediction model coding
+ unsigned char prediction_flag;
+ vp8_prob pred_prob;
+ MV_REFERENCE_FRAME pred_rf;
+
+ // Get the context probability the prediction flag
+ pred_prob = get_pred_prob(cm, xd, PRED_REF);
+
+ // Get the predicted value.
+ pred_rf = get_pred_ref(cm, xd);
+
+ // Did the chosen reference frame match its predicted value.
+ prediction_flag =
+ (xd->mode_info_context->mbmi.ref_frame == pred_rf);
+
+ set_pred_flag(xd, PRED_REF, prediction_flag);
+ vp8_write(w, prediction_flag, pred_prob);
+
+ // If not predicted correctly then code value explicitly
+ if (!prediction_flag) {
+ vp8_prob mod_refprobs[PREDICTION_PROBS];
+
+ vpx_memcpy(mod_refprobs,
+ cm->mod_refprobs[pred_rf], sizeof(mod_refprobs));
+
+ // If segment coding enabled blank out options that cant occur by
+ // setting the branch probability to 0.
+ if (seg_ref_active) {
+ mod_refprobs[INTRA_FRAME] *=
+ check_segref(xd, segment_id, INTRA_FRAME);
+ mod_refprobs[LAST_FRAME] *=
+ check_segref(xd, segment_id, LAST_FRAME);
+ mod_refprobs[GOLDEN_FRAME] *=
+ (check_segref(xd, segment_id, GOLDEN_FRAME) *
+ check_segref(xd, segment_id, ALTREF_FRAME));
+ }
+
+ if (mod_refprobs[0]) {
+ vp8_write(w, (rf != INTRA_FRAME), mod_refprobs[0]);
+ }
+
+ // Inter coded
+ if (rf != INTRA_FRAME) {
+ if (mod_refprobs[1]) {
+ vp8_write(w, (rf != LAST_FRAME), mod_refprobs[1]);
+ }
- if (rf != LAST_FRAME)
- {
- if ( mod_refprobs[2] )
- {
- vp8_write(w, (rf != GOLDEN_FRAME), mod_refprobs[2] );
- }
- }
- }
+ if (rf != LAST_FRAME) {
+ if (mod_refprobs[2]) {
+ vp8_write(w, (rf != GOLDEN_FRAME), mod_refprobs[2]);
+ }
}
+ }
}
+ }
- // if using the prediction mdoel we have nothing further to do because
- // the reference frame is fully coded by the segment
+ // if using the prediction mdoel we have nothing further to do because
+ // the reference frame is fully coded by the segment
}
// Update the probabilities used to encode reference frame data
-static void update_ref_probs( VP8_COMP *const cpi )
-{
- VP8_COMMON *const cm = & cpi->common;
+static void update_ref_probs(VP8_COMP *const cpi) {
+ VP8_COMMON *const cm = & cpi->common;
- const int *const rfct = cpi->count_mb_ref_frame_usage;
- const int rf_intra = rfct[INTRA_FRAME];
- const int rf_inter = rfct[LAST_FRAME] +
- rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
+ const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int rf_intra = rfct[INTRA_FRAME];
+ const int rf_inter = rfct[LAST_FRAME] +
+ rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
- cm->prob_intra_coded = (rf_intra + rf_inter)
- ? rf_intra * 255 / (rf_intra + rf_inter) : 1;
+ cm->prob_intra_coded = (rf_intra + rf_inter)
+ ? rf_intra * 255 / (rf_intra + rf_inter) : 1;
- if (!cm->prob_intra_coded)
- cm->prob_intra_coded = 1;
+ if (!cm->prob_intra_coded)
+ cm->prob_intra_coded = 1;
- cm->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
+ cm->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
- if (!cm->prob_last_coded)
- cm->prob_last_coded = 1;
+ if (!cm->prob_last_coded)
+ cm->prob_last_coded = 1;
- cm->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
- ? (rfct[GOLDEN_FRAME] * 255) /
- (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
+ cm->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
+ ? (rfct[GOLDEN_FRAME] * 255) /
+ (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
- if (!cm->prob_gf_coded)
- cm->prob_gf_coded = 1;
+ if (!cm->prob_gf_coded)
+ cm->prob_gf_coded = 1;
- // Compute a modified set of probabilities to use when prediction of the
- // reference frame fails
- compute_mod_refprobs( cm );
+ // Compute a modified set of probabilities to use when prediction of the
+ // reference frame fails
+ compute_mod_refprobs(cm);
}
-static void pack_inter_mode_mvs(VP8_COMP *const cpi)
-{
- int i;
- VP8_COMMON *const pc = & cpi->common;
- vp8_writer *const w = & cpi->bc;
- const MV_CONTEXT *mvc = pc->fc.mvc;
+static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
+ int i;
+ VP8_COMMON *const pc = & cpi->common;
+ vp8_writer *const w = & cpi->bc;
+ const MV_CONTEXT *mvc = pc->fc.mvc;
#if CONFIG_HIGH_PRECISION_MV
- const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
+ const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
#endif
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- MODE_INFO *m;
- MODE_INFO *prev_m;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ MODE_INFO *m;
+ MODE_INFO *prev_m;
- const int mis = pc->mode_info_stride;
- int mb_row, mb_col;
- int row, col;
+ const int mis = pc->mode_info_stride;
+ int mb_row, mb_col;
+ int row, col;
- // Values used in prediction model coding
- vp8_prob pred_prob;
- unsigned char prediction_flag;
+ // Values used in prediction model coding
+ vp8_prob pred_prob;
+ unsigned char prediction_flag;
- int row_delta[4] = { 0, +1, 0, -1};
- int col_delta[4] = {+1, -1, +1, +1};
+ int row_delta[4] = { 0, +1, 0, -1};
+ int col_delta[4] = { +1, -1, +1, +1};
- cpi->mb.partition_info = cpi->mb.pi;
+ cpi->mb.partition_info = cpi->mb.pi;
- // Update the probabilities used to encode reference frame data
- update_ref_probs( cpi );
+ // Update the probabilities used to encode reference frame data
+ update_ref_probs(cpi);
#ifdef ENTROPY_STATS
- active_section = 1;
+ active_section = 1;
#endif
- if (pc->mb_no_coeff_skip)
- {
+ if (pc->mb_no_coeff_skip) {
#if CONFIG_NEWENTROPY
- int k;
+ int k;
- update_skip_probs( cpi );
- for (k=0;k<MBSKIP_CONTEXTS;++k)
- vp8_write_literal(w, pc->mbskip_pred_probs[k], 8);
+ update_skip_probs(cpi);
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ vp8_write_literal(w, pc->mbskip_pred_probs[k], 8);
#else
- update_skip_probs( cpi );
- vp8_write_literal(w, cpi->prob_skip_false, 8);
+ update_skip_probs(cpi);
+ vp8_write_literal(w, cpi->prob_skip_false, 8);
#endif
- }
+ }
#if CONFIG_PRED_FILTER
- // Write the prediction filter mode used for this frame
- vp8_write_literal(w, pc->pred_filter_mode, 2);
-
- // Write prediction filter on/off probability if signaling at MB level
- if (pc->pred_filter_mode == 2)
- vp8_write_literal(w, pc->prob_pred_filter_off, 8);
-
- //printf("pred_filter_mode:%d prob_pred_filter_off:%d\n",
- // pc->pred_filter_mode, pc->prob_pred_filter_off);
-#endif
-
- vp8_write_literal(w, pc->prob_intra_coded, 8);
- vp8_write_literal(w, pc->prob_last_coded, 8);
- vp8_write_literal(w, pc->prob_gf_coded, 8);
-
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- vp8_write(w, 1, 128);
- vp8_write(w, 1, 128);
- for (i = 0; i < COMP_PRED_CONTEXTS; i++)
- {
- if (cpi->single_pred_count[i] + cpi->comp_pred_count[i])
- {
- pc->prob_comppred[i] = cpi->single_pred_count[i] * 255 /
- (cpi->single_pred_count[i] + cpi->comp_pred_count[i]);
- if (pc->prob_comppred[i] < 1)
- pc->prob_comppred[i] = 1;
- }
- else
- {
- pc->prob_comppred[i] = 128;
- }
- vp8_write_literal(w, pc->prob_comppred[i], 8);
- }
- }
- else if (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY)
- {
- vp8_write(w, 0, 128);
- }
- else /* compound prediction only */
- {
- vp8_write(w, 1, 128);
- vp8_write(w, 0, 128);
+ // Write the prediction filter mode used for this frame
+ vp8_write_literal(w, pc->pred_filter_mode, 2);
+
+ // Write prediction filter on/off probability if signaling at MB level
+ if (pc->pred_filter_mode == 2)
+ vp8_write_literal(w, pc->prob_pred_filter_off, 8);
+
+ // printf("pred_filter_mode:%d prob_pred_filter_off:%d\n",
+ // pc->pred_filter_mode, pc->prob_pred_filter_off);
+#endif
+
+ vp8_write_literal(w, pc->prob_intra_coded, 8);
+ vp8_write_literal(w, pc->prob_last_coded, 8);
+ vp8_write_literal(w, pc->prob_gf_coded, 8);
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ vp8_write(w, 1, 128);
+ vp8_write(w, 1, 128);
+ for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
+ if (cpi->single_pred_count[i] + cpi->comp_pred_count[i]) {
+ pc->prob_comppred[i] = cpi->single_pred_count[i] * 255 /
+ (cpi->single_pred_count[i] + cpi->comp_pred_count[i]);
+ if (pc->prob_comppred[i] < 1)
+ pc->prob_comppred[i] = 1;
+ } else {
+ pc->prob_comppred[i] = 128;
+ }
+ vp8_write_literal(w, pc->prob_comppred[i], 8);
}
+ } else if (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY) {
+ vp8_write(w, 0, 128);
+ } else { /* compound prediction only */
+ vp8_write(w, 1, 128);
+ vp8_write(w, 0, 128);
+ }
- update_mbintra_mode_probs(cpi);
+ update_mbintra_mode_probs(cpi);
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- vp8_write_mvprobs_hp(cpi);
- else
-#endif
- vp8_write_mvprobs(cpi);
-
- mb_row = 0;
- for (row=0; row < pc->mb_rows; row += 2)
- {
- m = pc->mi + row * mis;
- prev_m = pc->prev_mi + row * mis;
-
- mb_col = 0;
- for (col=0; col < pc->mb_cols; col += 2)
- {
- int i;
-
- // Process the 4 MBs in the order:
- // top-left, top-right, bottom-left, bottom-right
- for (i=0; i<4; i++)
- {
- MB_MODE_INFO *mi;
- MV_REFERENCE_FRAME rf;
- MB_PREDICTION_MODE mode;
- int segment_id;
-
- int dy = row_delta[i];
- int dx = col_delta[i];
- int offset_extended = dy * mis + dx;
-
- if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols))
- {
- // MB lies outside frame, move on
- mb_row += dy;
- mb_col += dx;
- m += offset_extended;
- prev_m += offset_extended;
- cpi->mb.partition_info += offset_extended;
- continue;
- }
+ if (xd->allow_high_precision_mv)
+ vp8_write_mvprobs_hp(cpi);
+ else
+#endif
+ vp8_write_mvprobs(cpi);
+
+ mb_row = 0;
+ for (row = 0; row < pc->mb_rows; row += 2) {
+ m = pc->mi + row * mis;
+ prev_m = pc->prev_mi + row * mis;
+
+ mb_col = 0;
+ for (col = 0; col < pc->mb_cols; col += 2) {
+ int i;
+
+ // Process the 4 MBs in the order:
+ // top-left, top-right, bottom-left, bottom-right
+ for (i = 0; i < 4; i++) {
+ MB_MODE_INFO *mi;
+ MV_REFERENCE_FRAME rf;
+ MB_PREDICTION_MODE mode;
+ int segment_id;
+
+ int dy = row_delta[i];
+ int dx = col_delta[i];
+ int offset_extended = dy * mis + dx;
+
+ if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols)) {
+ // MB lies outside frame, move on
+ mb_row += dy;
+ mb_col += dx;
+ m += offset_extended;
+ prev_m += offset_extended;
+ cpi->mb.partition_info += offset_extended;
+ continue;
+ }
- mi = & m->mbmi;
- rf = mi->ref_frame;
- mode = mi->mode;
- segment_id = mi->segment_id;
+ mi = & m->mbmi;
+ rf = mi->ref_frame;
+ mode = mi->mode;
+ segment_id = mi->segment_id;
- // Distance of Mb to the various image edges.
- // These specified to 8th pel as they are always compared to MV
- // values that are in 1/8th pel units
- xd->mb_to_left_edge = -((mb_col * 16) << 3);
- xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
- xd->mb_to_top_edge = -((mb_row * 16)) << 3;
- xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
+ // Distance of Mb to the various image edges.
+ // These specified to 8th pel as they are always compared to MV
+ // values that are in 1/8th pel units
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
+ xd->mb_to_top_edge = -((mb_row * 16)) << 3;
+ xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
- // Make sure the MacroBlockD mode info pointer is set correctly
- xd->mode_info_context = m;
- xd->prev_mode_info_context = prev_m;
+ // Make sure the MacroBlockD mode info pointer is set correctly
+ xd->mode_info_context = m;
+ xd->prev_mode_info_context = prev_m;
#ifdef ENTROPY_STATS
- active_section = 9;
+ active_section = 9;
#endif
- if (cpi->mb.e_mbd.update_mb_segmentation_map)
- {
- // Is temporal coding of the segment map enabled
- if (pc->temporal_update)
- {
- prediction_flag = get_pred_flag( xd, PRED_SEG_ID );
- pred_prob = get_pred_prob( pc, xd, PRED_SEG_ID);
+ if (cpi->mb.e_mbd.update_mb_segmentation_map) {
+ // Is temporal coding of the segment map enabled
+ if (pc->temporal_update) {
+ prediction_flag = get_pred_flag(xd, PRED_SEG_ID);
+ pred_prob = get_pred_prob(pc, xd, PRED_SEG_ID);
- // Code the segment id prediction flag for this mb
- vp8_write( w, prediction_flag, pred_prob );
+ // Code the segment id prediction flag for this mb
+ vp8_write(w, prediction_flag, pred_prob);
- // If the mb segment id wasn't predicted code explicitly
- if (!prediction_flag)
- write_mb_segid(w, mi, &cpi->mb.e_mbd);
- }
- else
- {
- // Normal unpredicted coding
- write_mb_segid(w, mi, &cpi->mb.e_mbd);
- }
- }
+ // If the mb segment id wasn't predicted code explicitly
+ if (!prediction_flag)
+ write_mb_segid(w, mi, &cpi->mb.e_mbd);
+ } else {
+ // Normal unpredicted coding
+ write_mb_segid(w, mi, &cpi->mb.e_mbd);
+ }
+ }
- if ( pc->mb_no_coeff_skip &&
- ( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
- ( get_segdata( xd, segment_id, SEG_LVL_EOB ) != 0 ) ) )
- {
+ if (pc->mb_no_coeff_skip &&
+ (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
+ (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) {
#if CONFIG_NEWENTROPY
- vp8_encode_bool(w, mi->mb_skip_coeff,
- get_pred_prob(pc, xd, PRED_MBSKIP));
+ vp8_encode_bool(w, mi->mb_skip_coeff,
+ get_pred_prob(pc, xd, PRED_MBSKIP));
#else
- vp8_encode_bool(w, mi->mb_skip_coeff, cpi->prob_skip_false);
+ vp8_encode_bool(w, mi->mb_skip_coeff, cpi->prob_skip_false);
#endif
- }
+ }
- // Encode the reference frame.
- encode_ref_frame( w, pc, xd, segment_id, rf );
+ // Encode the reference frame.
+ encode_ref_frame(w, pc, xd, segment_id, rf);
- if (rf == INTRA_FRAME)
- {
+ if (rf == INTRA_FRAME) {
#ifdef ENTROPY_STATS
- active_section = 6;
+ active_section = 6;
#endif
- if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
- {
- write_ymode(w, mode, pc->fc.ymode_prob);
- }
+ if (!segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
+ write_ymode(w, mode, pc->fc.ymode_prob);
+ }
- if (mode == B_PRED)
- {
- int j = 0;
+ if (mode == B_PRED) {
+ int j = 0;
#if CONFIG_COMP_INTRA_PRED
- int uses_second =
- m->bmi[0].as_mode.second !=
- (B_PREDICTION_MODE) (B_DC_PRED - 1);
- vp8_write(w, uses_second, 128);
+ int uses_second =
+ m->bmi[0].as_mode.second !=
+ (B_PREDICTION_MODE)(B_DC_PRED - 1);
+ vp8_write(w, uses_second, 128);
#endif
- do {
+ do {
#if CONFIG_COMP_INTRA_PRED
- B_PREDICTION_MODE mode2 = m->bmi[j].as_mode.second;
+ B_PREDICTION_MODE mode2 = m->bmi[j].as_mode.second;
#endif
- write_bmode(w, m->bmi[j].as_mode.first,
- pc->fc.bmode_prob);
+ write_bmode(w, m->bmi[j].as_mode.first,
+ pc->fc.bmode_prob);
#if CONFIG_COMP_INTRA_PRED
- if (uses_second)
- {
- write_bmode(w, mode2, pc->fc.bmode_prob);
- }
-#endif
- } while (++j < 16);
- }
- if(mode == I8X8_PRED)
- {
- write_i8x8_mode(w, m->bmi[0].as_mode.first,
- pc->fc.i8x8_mode_prob);
- write_i8x8_mode(w, m->bmi[2].as_mode.first,
- pc->fc.i8x8_mode_prob);
- write_i8x8_mode(w, m->bmi[8].as_mode.first,
- pc->fc.i8x8_mode_prob);
- write_i8x8_mode(w, m->bmi[10].as_mode.first,
- pc->fc.i8x8_mode_prob);
- }
- else
- {
- write_uv_mode(w, mi->uv_mode,
- pc->fc.uv_mode_prob[mode]);
- }
- }
- else
- {
- int_mv best_mv, best_second_mv;
- int ct[4];
-
- vp8_prob mv_ref_p [VP8_MVREFS-1];
-
- {
- int_mv n1, n2;
-
- vp8_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
- rf, cpi->common.ref_frame_sign_bias);
- vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
+ if (uses_second) {
+ write_bmode(w, mode2, pc->fc.bmode_prob);
+ }
+#endif
+ } while (++j < 16);
+ }
+ if (mode == I8X8_PRED) {
+ write_i8x8_mode(w, m->bmi[0].as_mode.first,
+ pc->fc.i8x8_mode_prob);
+ write_i8x8_mode(w, m->bmi[2].as_mode.first,
+ pc->fc.i8x8_mode_prob);
+ write_i8x8_mode(w, m->bmi[8].as_mode.first,
+ pc->fc.i8x8_mode_prob);
+ write_i8x8_mode(w, m->bmi[10].as_mode.first,
+ pc->fc.i8x8_mode_prob);
+ } else {
+ write_uv_mode(w, mi->uv_mode,
+ pc->fc.uv_mode_prob[mode]);
+ }
+ } else {
+ int_mv best_mv, best_second_mv;
+ int ct[4];
+
+ vp8_prob mv_ref_p [VP8_MVREFS - 1];
+
+ {
+ int_mv n1, n2;
+
+ vp8_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
+ rf, cpi->common.ref_frame_sign_bias);
+ vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
#ifdef ENTROPY_STATS
- accum_mv_refs(mode, ct);
+ accum_mv_refs(mode, ct);
#endif
- }
+ }
#ifdef ENTROPY_STATS
- active_section = 3;
+ active_section = 3;
#endif
- // Is the segment coding of mode enabled
- if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
- {
- write_mv_ref(w, mode, mv_ref_p);
- vp8_accum_mv_refs(&cpi->common, mode, ct);
- }
+ // Is the segment coding of mode enabled
+ if (!segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
+ write_mv_ref(w, mode, mv_ref_p);
+ vp8_accum_mv_refs(&cpi->common, mode, ct);
+ }
#if CONFIG_PRED_FILTER
- // Is the prediction filter enabled
- if (mode >= NEARESTMV && mode < SPLITMV)
- {
- if (cpi->common.pred_filter_mode == 2 )
- vp8_write(w, mi->pred_filter_enabled,
- pc->prob_pred_filter_off);
- else
- assert (mi->pred_filter_enabled ==
- cpi->common.pred_filter_mode);
- }
-#endif
- if (mi->second_ref_frame &&
- (mode == NEWMV || mode == SPLITMV))
- {
- int_mv n1, n2;
-
- vp8_find_near_mvs(xd, m,
- prev_m,
- &n1, &n2, &best_second_mv, ct,
- mi->second_ref_frame, cpi->common.ref_frame_sign_bias);
- }
-
- // does the feature use compound prediction or not
- // (if not specified at the frame/segment level)
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
- get_pred_prob( pc, xd, PRED_COMP ) );
- }
-
- {
- switch (mode) /* new, split require MVs */
- {
- case NEWMV:
+ // Is the prediction filter enabled
+ if (mode >= NEARESTMV && mode < SPLITMV) {
+ if (cpi->common.pred_filter_mode == 2)
+ vp8_write(w, mi->pred_filter_enabled,
+ pc->prob_pred_filter_off);
+ else
+ assert(mi->pred_filter_enabled ==
+ cpi->common.pred_filter_mode);
+ }
+#endif
+ if (mi->second_ref_frame &&
+ (mode == NEWMV || mode == SPLITMV)) {
+ int_mv n1, n2;
+
+ vp8_find_near_mvs(xd, m,
+ prev_m,
+ &n1, &n2, &best_second_mv, ct,
+ mi->second_ref_frame, cpi->common.ref_frame_sign_bias);
+ }
+
+ // does the feature use compound prediction or not
+ // (if not specified at the frame/segment level)
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
+ get_pred_prob(pc, xd, PRED_COMP));
+ }
+
+ {
+ switch (mode) { /* new, split require MVs */
+ case NEWMV:
#ifdef ENTROPY_STATS
- active_section = 5;
+ active_section = 5;
#endif
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- write_mv_hp(w, &mi->mv.as_mv, &best_mv, mvc_hp);
- }
- else
-#endif
- {
- write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
- }
-
- if (mi->second_ref_frame)
- {
+ if (xd->allow_high_precision_mv) {
+ write_mv_hp(w, &mi->mv.as_mv, &best_mv, mvc_hp);
+ } else
+#endif
+ {
+ write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
+ }
+
+ if (mi->second_ref_frame) {
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- write_mv_hp(w, &mi->second_mv.as_mv,
- &best_second_mv, mvc_hp);
- }
- else
-#endif
- {
- write_mv(w, &mi->second_mv.as_mv,
- &best_second_mv, mvc);
- }
- }
- break;
- case SPLITMV:
- {
- int j = 0;
+ if (xd->allow_high_precision_mv) {
+ write_mv_hp(w, &mi->second_mv.as_mv,
+ &best_second_mv, mvc_hp);
+ } else
+#endif
+ {
+ write_mv(w, &mi->second_mv.as_mv,
+ &best_second_mv, mvc);
+ }
+ }
+ break;
+ case SPLITMV: {
+ int j = 0;
#ifdef MODE_STATS
- ++count_mb_seg [mi->partitioning];
+ ++count_mb_seg [mi->partitioning];
#endif
- write_split(w, mi->partitioning, cpi->common.fc.mbsplit_prob);
+ write_split(w, mi->partitioning, cpi->common.fc.mbsplit_prob);
#if CONFIG_ADAPTIVE_ENTROPY
- cpi->mbsplit_count[mi->partitioning]++;
+ cpi->mbsplit_count[mi->partitioning]++;
#endif
- do
- {
- B_PREDICTION_MODE blockmode;
- int_mv blockmv;
- const int *const L =
- vp8_mbsplits [mi->partitioning];
- int k = -1; /* first block in subset j */
- int mv_contz;
- int_mv leftmv, abovemv;
+ do {
+ B_PREDICTION_MODE blockmode;
+ int_mv blockmv;
+ const int *const L =
+ vp8_mbsplits [mi->partitioning];
+ int k = -1; /* first block in subset j */
+ int mv_contz;
+ int_mv leftmv, abovemv;
- blockmode = cpi->mb.partition_info->bmi[j].mode;
- blockmv = cpi->mb.partition_info->bmi[j].mv;
+ blockmode = cpi->mb.partition_info->bmi[j].mode;
+ blockmv = cpi->mb.partition_info->bmi[j].mv;
#if CONFIG_DEBUG
- while (j != L[++k])
- if (k >= 16)
- assert(0);
+ while (j != L[++k])
+ if (k >= 16)
+ assert(0);
#else
- while (j != L[++k]);
+ while (j != L[++k]);
#endif
- leftmv.as_int = left_block_mv(m, k);
- abovemv.as_int = above_block_mv(m, k, mis);
- mv_contz = vp8_mv_cont(&leftmv, &abovemv);
+ leftmv.as_int = left_block_mv(m, k);
+ abovemv.as_int = above_block_mv(m, k, mis);
+ mv_contz = vp8_mv_cont(&leftmv, &abovemv);
- write_sub_mv_ref(w, blockmode,
- cpi->common.fc.sub_mv_ref_prob [mv_contz]);
+ write_sub_mv_ref(w, blockmode,
+ cpi->common.fc.sub_mv_ref_prob [mv_contz]);
#if CONFIG_ADAPTIVE_ENTROPY
- cpi->sub_mv_ref_count[mv_contz][blockmode-LEFT4X4]++;
+ cpi->sub_mv_ref_count[mv_contz][blockmode - LEFT4X4]++;
#endif
- if (blockmode == NEW4X4)
- {
+ if (blockmode == NEW4X4) {
#ifdef ENTROPY_STATS
- active_section = 11;
+ active_section = 11;
#endif
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- write_mv_hp(w, &blockmv.as_mv, &best_mv,
- (const MV_CONTEXT_HP *) mvc_hp);
- }
- else
-#endif
- {
- write_mv(w, &blockmv.as_mv, &best_mv,
- (const MV_CONTEXT *) mvc);
- }
-
- if (mi->second_ref_frame)
- {
-#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
- }
- else
-#endif
- {
- write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &best_second_mv, (const MV_CONTEXT *) mvc);
- }
- }
- }
- }
- while (++j < cpi->mb.partition_info->count);
- }
- break;
- default:
- break;
- }
+ if (xd->allow_high_precision_mv) {
+ write_mv_hp(w, &blockmv.as_mv, &best_mv,
+ (const MV_CONTEXT_HP *) mvc_hp);
+ } else
+#endif
+ {
+ write_mv(w, &blockmv.as_mv, &best_mv,
+ (const MV_CONTEXT *) mvc);
}
- }
- // Next MB
- mb_row += dy;
- mb_col += dx;
- m += offset_extended;
- prev_m += offset_extended;
- cpi->mb.partition_info += offset_extended;
-#if CONFIG_DEBUG
- assert((prev_m-cpi->common.prev_mip)==(m-cpi->common.mip));
- assert((prev_m-cpi->common.prev_mi)==(m-cpi->common.mi));
-#endif
+ if (mi->second_ref_frame) {
+#if CONFIG_HIGH_PRECISION_MV
+ if (xd->allow_high_precision_mv) {
+ write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
+ } else
+#endif
+ {
+ write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv, (const MV_CONTEXT *) mvc);
+ }
+ }
+ }
+ } while (++j < cpi->mb.partition_info->count);
+ }
+ break;
+ default:
+ break;
}
+ }
}
- // Next SB
- mb_row += 2;
- m += mis + (1 - (pc->mb_cols & 0x1));
- prev_m += mis + (1 - (pc->mb_cols & 0x1));
- cpi->mb.partition_info += mis + (1 - (pc->mb_cols & 0x1));
+ // Next MB
+ mb_row += dy;
+ mb_col += dx;
+ m += offset_extended;
+ prev_m += offset_extended;
+ cpi->mb.partition_info += offset_extended;
+#if CONFIG_DEBUG
+ assert((prev_m - cpi->common.prev_mip) == (m - cpi->common.mip));
+ assert((prev_m - cpi->common.prev_mi) == (m - cpi->common.mi));
+#endif
+ }
}
+
+ // Next SB
+ mb_row += 2;
+ m += mis + (1 - (pc->mb_cols & 0x1));
+ prev_m += mis + (1 - (pc->mb_cols & 0x1));
+ cpi->mb.partition_info += mis + (1 - (pc->mb_cols & 0x1));
+ }
}
-static void write_kfmodes(VP8_COMP *cpi)
-{
- vp8_writer *const bc = & cpi->bc;
- VP8_COMMON *const c = & cpi->common;
- const int mis = c->mode_info_stride;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- MODE_INFO *m;
- int i;
- int row, col;
- int mb_row, mb_col;
+static void write_kfmodes(VP8_COMP *cpi) {
+ vp8_writer *const bc = & cpi->bc;
+ VP8_COMMON *const c = & cpi->common;
+ const int mis = c->mode_info_stride;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ MODE_INFO *m;
+ int i;
+ int row, col;
+ int mb_row, mb_col;
#if CONFIG_NEWENTROPY
- int prob_skip_false[3] = {0, 0, 0};
+ int prob_skip_false[3] = {0, 0, 0};
#else
- int prob_skip_false = 0;
+ int prob_skip_false = 0;
#endif
- int row_delta[4] = { 0, +1, 0, -1};
- int col_delta[4] = {+1, -1, +1, +1};
+ int row_delta[4] = { 0, +1, 0, -1};
+ int col_delta[4] = { +1, -1, +1, +1};
- //printf("write_kfmodes\n");
- if (c->mb_no_coeff_skip)
- {
- // Divide by 0 check. 0 case possible with segment features
+ // printf("write_kfmodes\n");
+ if (c->mb_no_coeff_skip) {
+ // Divide by 0 check. 0 case possible with segment features
#if CONFIG_NEWENTROPY
- int k;
- for (k=0;k<MBSKIP_CONTEXTS;++k)
- {
- if ( (cpi->skip_false_count[k] + cpi->skip_true_count[k]) )
- {
- prob_skip_false[k] = cpi->skip_false_count[k] * 256 /
- (cpi->skip_false_count[k] + cpi->skip_true_count[k]);
-
- if (prob_skip_false[k] <= 1)
- prob_skip_false[k] = 1;
-
- if (prob_skip_false[k] > 255)
- prob_skip_false[k] = 255;
- }
- else
- prob_skip_false[k] = 255;
-
- c->mbskip_pred_probs[k] = prob_skip_false[k];
- vp8_write_literal(bc, prob_skip_false[k], 8);
- }
-#else
- if ( (cpi->skip_false_count + cpi->skip_true_count) )
- {
- prob_skip_false = cpi->skip_false_count * 256 /
- (cpi->skip_false_count + cpi->skip_true_count);
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
+ if ((cpi->skip_false_count[k] + cpi->skip_true_count[k])) {
+ prob_skip_false[k] = cpi->skip_false_count[k] * 256 /
+ (cpi->skip_false_count[k] + cpi->skip_true_count[k]);
- if (prob_skip_false <= 1)
- prob_skip_false = 1;
+ if (prob_skip_false[k] <= 1)
+ prob_skip_false[k] = 1;
- if (prob_skip_false > 255)
- prob_skip_false = 255;
- }
- else
- prob_skip_false = 255;
+ if (prob_skip_false[k] > 255)
+ prob_skip_false[k] = 255;
+ } else
+ prob_skip_false[k] = 255;
- cpi->prob_skip_false = prob_skip_false;
- vp8_write_literal(bc, prob_skip_false, 8);
-#endif
+ c->mbskip_pred_probs[k] = prob_skip_false[k];
+ vp8_write_literal(bc, prob_skip_false[k], 8);
}
+#else
+ if ((cpi->skip_false_count + cpi->skip_true_count)) {
+ prob_skip_false = cpi->skip_false_count * 256 /
+ (cpi->skip_false_count + cpi->skip_true_count);
- if(!c->kf_ymode_probs_update)
- {
- vp8_write_literal(bc, c->kf_ymode_probs_index, 3);
- }
+ if (prob_skip_false <= 1)
+ prob_skip_false = 1;
- mb_row = 0;
- for (row=0; row < c->mb_rows; row += 2)
- {
- m = c->mi + row * mis;
-
- mb_col = 0;
- for (col=0; col < c->mb_cols; col += 2)
- {
- // Process the 4 MBs in the order:
- // top-left, top-right, bottom-left, bottom-right
- for (i=0; i<4; i++)
- {
- int ym;
- int segment_id;
- int dy = row_delta[i];
- int dx = col_delta[i];
- int offset_extended = dy * mis + dx;
-
- if ((mb_row >= c->mb_rows) || (mb_col >= c->mb_cols))
- {
- // MB lies outside frame, move on
- mb_row += dy;
- mb_col += dx;
- m += offset_extended;
- continue;
- }
+ if (prob_skip_false > 255)
+ prob_skip_false = 255;
+ } else
+ prob_skip_false = 255;
- // Make sure the MacroBlockD mode info pointer is set correctly
- xd->mode_info_context = m;
+ cpi->prob_skip_false = prob_skip_false;
+ vp8_write_literal(bc, prob_skip_false, 8);
+#endif
+ }
+
+ if (!c->kf_ymode_probs_update) {
+ vp8_write_literal(bc, c->kf_ymode_probs_index, 3);
+ }
+
+ mb_row = 0;
+ for (row = 0; row < c->mb_rows; row += 2) {
+ m = c->mi + row * mis;
+
+ mb_col = 0;
+ for (col = 0; col < c->mb_cols; col += 2) {
+ // Process the 4 MBs in the order:
+ // top-left, top-right, bottom-left, bottom-right
+ for (i = 0; i < 4; i++) {
+ int ym;
+ int segment_id;
+ int dy = row_delta[i];
+ int dx = col_delta[i];
+ int offset_extended = dy * mis + dx;
+
+ if ((mb_row >= c->mb_rows) || (mb_col >= c->mb_cols)) {
+ // MB lies outside frame, move on
+ mb_row += dy;
+ mb_col += dx;
+ m += offset_extended;
+ continue;
+ }
- ym = m->mbmi.mode;
- segment_id = m->mbmi.segment_id;
+ // Make sure the MacroBlockD mode info pointer is set correctly
+ xd->mode_info_context = m;
- if (cpi->mb.e_mbd.update_mb_segmentation_map)
- {
- write_mb_segid(bc, &m->mbmi, &cpi->mb.e_mbd);
- }
+ ym = m->mbmi.mode;
+ segment_id = m->mbmi.segment_id;
- if ( c->mb_no_coeff_skip &&
- ( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
- (get_segdata( xd, segment_id, SEG_LVL_EOB ) != 0) ) )
- {
+ if (cpi->mb.e_mbd.update_mb_segmentation_map) {
+ write_mb_segid(bc, &m->mbmi, &cpi->mb.e_mbd);
+ }
+
+ if (c->mb_no_coeff_skip &&
+ (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
+ (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) {
#if CONFIG_NEWENTROPY
- vp8_encode_bool(bc, m->mbmi.mb_skip_coeff,
- get_pred_prob(c, xd, PRED_MBSKIP));
+ vp8_encode_bool(bc, m->mbmi.mb_skip_coeff,
+ get_pred_prob(c, xd, PRED_MBSKIP));
#else
- vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
+ vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
#endif
- }
- kfwrite_ymode(bc, ym,
- c->kf_ymode_prob[c->kf_ymode_probs_index]);
+ }
+ kfwrite_ymode(bc, ym,
+ c->kf_ymode_prob[c->kf_ymode_probs_index]);
- if (ym == B_PRED)
- {
- const int mis = c->mode_info_stride;
- int i = 0;
+ if (ym == B_PRED) {
+ const int mis = c->mode_info_stride;
+ int i = 0;
#if CONFIG_COMP_INTRA_PRED
- int uses_second =
- m->bmi[0].as_mode.second !=
- (B_PREDICTION_MODE) (B_DC_PRED - 1);
- vp8_write(bc, uses_second, 128);
-#endif
- do
- {
- const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
- const B_PREDICTION_MODE L = left_block_mode(m, i);
- const int bm = m->bmi[i].as_mode.first;
+ int uses_second =
+ m->bmi[0].as_mode.second !=
+ (B_PREDICTION_MODE)(B_DC_PRED - 1);
+ vp8_write(bc, uses_second, 128);
+#endif
+ do {
+ const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(m, i);
+ const int bm = m->bmi[i].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- const int bm2 = m->bmi[i].as_mode.second;
+ const int bm2 = m->bmi[i].as_mode.second;
#endif
#ifdef ENTROPY_STATS
- ++intra_mode_stats [A] [L] [bm];
+ ++intra_mode_stats [A] [L] [bm];
#endif
- write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
- //printf(" mode: %d\n", bm);
+ write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
+ // printf(" mode: %d\n", bm);
#if CONFIG_COMP_INTRA_PRED
- if (uses_second)
- {
- write_bmode(bc, bm2, c->kf_bmode_prob [A] [L]);
- }
-#endif
- }
- while (++i < 16);
- }
- if(ym == I8X8_PRED)
- {
- write_i8x8_mode(bc, m->bmi[0].as_mode.first,
- c->fc.i8x8_mode_prob);
- //printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[2].as_mode.first,
- c->fc.i8x8_mode_prob);
- //printf(" mode: %d\n", m->bmi[2].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[8].as_mode.first,
- c->fc.i8x8_mode_prob);
- //printf(" mode: %d\n", m->bmi[8].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[10].as_mode.first,
- c->fc.i8x8_mode_prob);
- //printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
- }
- else
- write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
-
- // Next MB
- mb_row += dy;
- mb_col += dx;
- m += offset_extended;
+ if (uses_second) {
+ write_bmode(bc, bm2, c->kf_bmode_prob [A] [L]);
}
+#endif
+ } while (++i < 16);
}
- mb_row += 2;
+ if (ym == I8X8_PRED) {
+ write_i8x8_mode(bc, m->bmi[0].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[2].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[2].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[8].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[8].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[10].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
+ } else
+ write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
+
+ // Next MB
+ mb_row += dy;
+ mb_col += dx;
+ m += offset_extended;
+ }
}
+ mb_row += 2;
+ }
}
/* This function is used for debugging probability trees. */
static void print_prob_tree(vp8_prob
- coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES])
-{
- /* print coef probability tree */
- int i,j,k,l;
- FILE* f = fopen("enc_tree_probs.txt", "a");
- fprintf(f, "{\n");
- for (i = 0; i < BLOCK_TYPES; i++)
- {
- fprintf(f, " {\n");
- for (j = 0; j < COEF_BANDS; j++)
- {
- fprintf(f, " {\n");
- for (k = 0; k < PREV_COEF_CONTEXTS; k++)
- {
- fprintf(f, " {");
- for (l = 0; l < ENTROPY_NODES; l++)
- {
- fprintf(f, "%3u, ",
- (unsigned int)(coef_probs [i][j][k][l]));
- }
- fprintf(f, " }\n");
- }
- fprintf(f, " }\n");
+ coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]) {
+ /* print coef probability tree */
+ int i, j, k, l;
+ FILE *f = fopen("enc_tree_probs.txt", "a");
+ fprintf(f, "{\n");
+ for (i = 0; i < BLOCK_TYPES; i++) {
+ fprintf(f, " {\n");
+ for (j = 0; j < COEF_BANDS; j++) {
+ fprintf(f, " {\n");
+ for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
+ fprintf(f, " {");
+ for (l = 0; l < ENTROPY_NODES; l++) {
+ fprintf(f, "%3u, ",
+ (unsigned int)(coef_probs [i][j][k][l]));
}
- fprintf(f, " }\n");
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
}
- fprintf(f, "}\n");
- fclose(f);
+ fprintf(f, " }\n");
+ }
+ fprintf(f, "}\n");
+ fclose(f);
}
-void build_coeff_contexts(VP8_COMP *cpi)
-{
- int i = 0;
- do
- {
- int j = 0;
- do
- {
- int k = 0;
- do
- {
+void build_coeff_contexts(VP8_COMP *cpi) {
+ int i = 0;
+ do {
+ int j = 0;
+ do {
+ int k = 0;
+ do {
#ifdef ENTROPY_STATS
- int t;
+ int t;
#endif
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
-#endif
- vp8_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
- cpi->frame_coef_probs [i][j][k],
- cpi->frame_branch_ct [i][j][k],
- cpi->coef_counts [i][j][k],
- 256, 1
- );
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+#endif
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_coef_probs [i][j][k],
+ cpi->frame_branch_ct [i][j][k],
+ cpi->coef_counts [i][j][k],
+ 256, 1
+ );
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- {
- t = 0;
- do
- {
- context_counters [i][j][k][t] +=
- cpi->coef_counts [i][j][k][t];
- }
- while (++t < MAX_ENTROPY_TOKENS);
- }
-#endif
- }
- while (++k < PREV_COEF_CONTEXTS);
+ if (!cpi->dummy_packing) {
+ t = 0;
+ do {
+ context_counters [i][j][k][t] +=
+ cpi->coef_counts [i][j][k][t];
+ } while (++t < MAX_ENTROPY_TOKENS);
}
- while (++j < COEF_BANDS);
- }
- while (++i < BLOCK_TYPES);
-
-
- i= 0;
- if(cpi->common.txfm_mode == ALLOW_8X8)
- {
- do
- {
- int j = 0; /* token/prob index */
- do
- {
- int k = 0;
- do
- {
- /* at every context */
- /* calc probs and branch cts for this frame only */
- //vp8_prob new_p [ENTROPY_NODES];
- //unsigned int branch_ct [ENTROPY_NODES] [2];
+#endif
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+
+
+ i = 0;
+ if (cpi->common.txfm_mode == ALLOW_8X8) {
+ do {
+ int j = 0; /* token/prob index */
+ do {
+ int k = 0;
+ do {
+ /* at every context */
+ /* calc probs and branch cts for this frame only */
+ // vp8_prob new_p [ENTROPY_NODES];
+ // unsigned int branch_ct [ENTROPY_NODES] [2];
#ifdef ENTROPY_STATS
- int t = 0; /* token/prob index */
+ int t = 0; /* token/prob index */
#endif
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
-#endif
- vp8_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
- cpi->frame_coef_probs_8x8 [i][j][k],
- cpi->frame_branch_ct_8x8 [i][j][k],
- cpi->coef_counts_8x8 [i][j][k],
- 256, 1
- );
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+#endif
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_coef_probs_8x8 [i][j][k],
+ cpi->frame_branch_ct_8x8 [i][j][k],
+ cpi->coef_counts_8x8 [i][j][k],
+ 256, 1
+ );
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- {
- t = 0;
- do
- {
- context_counters_8x8 [i][j][k][t] +=
- cpi->coef_counts_8x8 [i][j][k][t];
- }
- while (++t < MAX_ENTROPY_TOKENS);
- }
+ if (!cpi->dummy_packing) {
+ t = 0;
+ do {
+ context_counters_8x8 [i][j][k][t] +=
+ cpi->coef_counts_8x8 [i][j][k][t];
+ } while (++t < MAX_ENTROPY_TOKENS);
+ }
#endif
- }
- while (++k < PREV_COEF_CONTEXTS);
- }
- while (++j < COEF_BANDS);
- }
- while (++i < BLOCK_TYPES_8X8);
- }
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES_8X8);
+ }
}
#if CONFIG_NEWUPDATE
-static void update_coef_probs3(VP8_COMP *cpi)
-{
- const vp8_prob grpupd = 216;
- int i, j, k, t;
- vp8_writer *const w = & cpi->bc;
- int update[2];
- int savings;
- int bestupdndx[2*ENTROPY_NODES];
-
- vp8_clear_system_state(); //__asm emms;
- // Build the cofficient contexts based on counts collected in encode loop
- build_coeff_contexts(cpi);
-
- i = 0;
- for (i = 0; i < BLOCK_TYPES; ++i)
- {
- for (t = 0; t < ENTROPY_NODES; ++t)
- {
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- update[0] = update[1] = 0;
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s;
- int u = 0;
+static void update_coef_probs3(VP8_COMP *cpi) {
+ const vp8_prob grpupd = 216;
+ int i, j, k, t;
+ vp8_writer *const w = & cpi->bc;
+ int update[2];
+ int savings;
+ int bestupdndx[2 * ENTROPY_NODES];
+
+ vp8_clear_system_state(); // __asm emms;
+ // Build the cofficient contexts based on counts collected in encode loop
+ build_coeff_contexts(cpi);
+
+ i = 0;
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ /* dry run to see if there is any udpate at all needed */
+ savings = 0;
+ update[0] = update[1] = 0;
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
- if (s > 0 && newp != *Pold) u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold) u = 1;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
- if (s > 0) u = 1;
- if (u)
- savings += s;
-#endif
- //printf(" %d %d %d: %d\n", i, j, k, u);
- update[u]++;
- }
- }
- if (update[1] == 0 || savings < 0)
- {
- vp8_write(w, 0, grpupd);
- continue;
- }
- vp8_write(w, 1, grpupd);
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s;
- int u = 0;
+ s = prob_update_savings(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
+ if (s > 0) u = 1;
+ if (u)
+ savings += s;
+#endif
+ // printf(" %d %d %d: %d\n", i, j, k, u);
+ update[u]++;
+ }
+ }
+ if (update[1] == 0 || savings < 0) {
+ vp8_write(w, 0, grpupd);
+ continue;
+ }
+ vp8_write(w, 1, grpupd);
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
- if (s > 0 && newp != *Pold) u = 1;
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold) u = 1;
#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
- if (s > 0) u = 1;
+ s = prob_update_savings(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
+ if (s > 0) u = 1;
#endif
- //printf(" %d %d %d: %d (%d)\n", i, j, k, u, upd);
- vp8_write(w, u, upd);
+ // printf(" %d %d %d: %d (%d)\n", i, j, k, u, upd);
+ vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- ++ tree_update_hist [i][j][k][t] [u];
+ if (!cpi->dummy_packing)
+ ++ tree_update_hist [i][j][k][t] [u];
#endif
- if (u)
- { /* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
- *Pold = newp;
- }
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, *Pold);
+ *Pold = newp;
+ }
- }
- }
}
+ }
}
-
- if(cpi->common.txfm_mode != ALLOW_8X8) return;
-
- for (i = 0; i < BLOCK_TYPES_8X8; ++i)
- {
- for (t = 0; t < ENTROPY_NODES; ++t)
- {
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- update[0] = update[1] = 0;
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s;
- int u = 0;
+ }
+
+ if (cpi->common.txfm_mode != ALLOW_8X8) return;
+
+ for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ /* dry run to see if there is any udpate at all needed */
+ savings = 0;
+ update[0] = update[1] = 0;
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
#else
- s = prob_update_savings(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
- if (u)
- savings += s;
-#endif
- update[u]++;
- }
- }
- if (update[1] == 0 || savings < 0)
- {
- vp8_write(w, 0, grpupd);
- continue;
- }
- vp8_write(w, 1, grpupd);
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s;
- int u = 0;
+ s = prob_update_savings(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
+ if (u)
+ savings += s;
+#endif
+ update[u]++;
+ }
+ }
+ if (update[1] == 0 || savings < 0) {
+ vp8_write(w, 0, grpupd);
+ continue;
+ }
+ vp8_write(w, 1, grpupd);
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
#else
- s = prob_update_savings(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
+ s = prob_update_savings(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
#endif
- vp8_write(w, u, upd);
+ vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- ++ tree_update_hist_8x8 [i][j][k][t] [u];
-#endif
- if (u)
- {
- /* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
- *Pold = newp;
- }
- }
- }
+ if (!cpi->dummy_packing)
+ ++ tree_update_hist_8x8 [i][j][k][t] [u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, *Pold);
+ *Pold = newp;
+ }
}
+ }
}
+ }
}
-static void update_coef_probs2(VP8_COMP *cpi)
-{
- const vp8_prob grpupd = 192;
- int i, j, k, t;
- vp8_writer *const w = & cpi->bc;
- int update[2];
- int savings;
- int bestupdndx[2*ENTROPY_NODES];
+static void update_coef_probs2(VP8_COMP *cpi) {
+ const vp8_prob grpupd = 192;
+ int i, j, k, t;
+ vp8_writer *const w = & cpi->bc;
+ int update[2];
+ int savings;
+ int bestupdndx[2 * ENTROPY_NODES];
- vp8_clear_system_state(); //__asm emms;
- // Build the cofficient contexts based on counts collected in encode loop
- build_coeff_contexts(cpi);
+ vp8_clear_system_state(); // __asm emms;
+ // Build the cofficient contexts based on counts collected in encode loop
+ build_coeff_contexts(cpi);
- for (t = 0; t < ENTROPY_NODES; ++t)
- {
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- update[0] = update[1] = 0;
- for (i = 0; i < BLOCK_TYPES; ++i)
- {
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s;
- int u = 0;
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ /* dry run to see if there is any udpate at all needed */
+ savings = 0;
+ update[0] = update[1] = 0;
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
- if (s > 0 && newp != *Pold) u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold) u = 1;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
- if (s > 0) u = 1;
- if (u)
- savings += s;
-#endif
- //printf(" %d %d %d: %d\n", i, j, k, u);
- update[u]++;
- }
- }
- }
- if (update[1] == 0 || savings < 0)
- {
- vp8_write(w, 0, grpupd);
- continue;
+ s = prob_update_savings(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
+ if (s > 0) u = 1;
+ if (u)
+ savings += s;
+#endif
+ // printf(" %d %d %d: %d\n", i, j, k, u);
+ update[u]++;
}
- vp8_write(w, 1, grpupd);
- for (i = 0; i < BLOCK_TYPES; ++i)
- {
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s;
- int u = 0;
+ }
+ }
+ if (update[1] == 0 || savings < 0) {
+ vp8_write(w, 0, grpupd);
+ continue;
+ }
+ vp8_write(w, 1, grpupd);
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
- if (s > 0 && newp != *Pold) u = 1;
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold) u = 1;
#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
- if (s > 0) u = 1;
+ s = prob_update_savings(
+ cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
+ if (s > 0) u = 1;
#endif
- //printf(" %d %d %d: %d (%d)\n", i, j, k, u, upd);
- vp8_write(w, u, upd);
+ // printf(" %d %d %d: %d (%d)\n", i, j, k, u, upd);
+ vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
- ++ tree_update_hist [i][j][k][t] [u];
+ ++ tree_update_hist [i][j][k][t] [u];
#endif
- if (u)
- { /* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
- *Pold = newp;
- }
- }
- }
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, *Pold);
+ *Pold = newp;
+ }
}
+ }
}
+ }
- if(cpi->common.txfm_mode != ALLOW_8X8) return;
+ if (cpi->common.txfm_mode != ALLOW_8X8) return;
- for (t = 0; t < ENTROPY_NODES; ++t)
- {
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- update[0] = update[1] = 0;
- for (i = 0; i < BLOCK_TYPES_8X8; ++i)
- {
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s;
- int u = 0;
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ /* dry run to see if there is any udpate at all needed */
+ savings = 0;
+ update[0] = update[1] = 0;
+ for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
#else
- s = prob_update_savings(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
- if (u)
- savings += s;
-#endif
- update[u]++;
- }
- }
- }
- if (update[1] == 0 || savings < 0)
- {
- vp8_write(w, 0, grpupd);
- continue;
+ s = prob_update_savings(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
+ if (u)
+ savings += s;
+#endif
+ update[u]++;
}
- vp8_write(w, 1, grpupd);
- for (i = 0; i < BLOCK_TYPES_8X8; ++i)
- {
- for (j = !i; j < COEF_BANDS; ++j)
- {
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
- {
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s;
- int u = 0;
+ }
+ }
+ if (update[1] == 0 || savings < 0) {
+ vp8_write(w, 0, grpupd);
+ continue;
+ }
+ vp8_write(w, 1, grpupd);
+ for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int s;
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
#else
- s = prob_update_savings(
- cpi->frame_branch_ct_8x8 [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
+ s = prob_update_savings(
+ cpi->frame_branch_ct_8x8 [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
#endif
- vp8_write(w, u, upd);
+ vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- ++ tree_update_hist_8x8 [i][j][k][t] [u];
-#endif
- if (u)
- {
- /* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
- *Pold = newp;
- }
- }
- }
+ if (!cpi->dummy_packing)
+ ++ tree_update_hist_8x8 [i][j][k][t] [u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, *Pold);
+ *Pold = newp;
+ }
}
+ }
}
+ }
}
#endif
-static void update_coef_probs(VP8_COMP *cpi)
-{
- int i = 0;
- vp8_writer *const w = & cpi->bc;
- int update[2] = {0, 0};
- int savings;
+static void update_coef_probs(VP8_COMP *cpi) {
+ int i = 0;
+ vp8_writer *const w = & cpi->bc;
+ int update[2] = {0, 0};
+ int savings;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state(); // __asm emms;
- // Build the cofficient contexts based on counts collected in encode loop
+ // Build the cofficient contexts based on counts collected in encode loop
- build_coeff_contexts(cpi);
+ build_coeff_contexts(cpi);
- //vp8_prob bestupd = find_coef_update_prob(cpi);
+ // vp8_prob bestupd = find_coef_update_prob(cpi);
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- do
- {
+ /* dry run to see if there is any udpate at all needed */
+ savings = 0;
+ do {
#if CONFIG_NEWUPDATE
- int j = !i;
+ int j = !i;
#else
- int j = 0; /* token/prob index */
-#endif
- do
- {
- int k = 0;
- int prev_coef_savings[ENTROPY_NODES] = {0};
- do
- {
- int t = 0; /* token/prob index */
- do
- {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s = prev_coef_savings[t];
- int u = 0;
+ int j = 0; /* token/prob index */
+#endif
+ do {
+ int k = 0;
+ int prev_coef_savings[ENTROPY_NODES] = {0};
+ do {
+ int t = 0; /* token/prob index */
+ do {
+ vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s = prev_coef_savings[t];
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if CONFIG_NEWUPDATE && defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
- if (u)
- savings += s;
-#endif
-
- update[u]++;
- }
- while (++t < ENTROPY_NODES);
- }
- while (++k < PREV_COEF_CONTEXTS);
- }
- while (++j < COEF_BANDS);
- }
- while (++i < BLOCK_TYPES);
-
- //printf("Update %d %d, savings %d\n", update[0], update[1], savings);
- /* Is coef updated at all */
+ s = prob_update_savings(
+ cpi->frame_branch_ct [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
+ if (u)
+ savings += s;
+#endif
+
+ update[u]++;
+ } while (++t < ENTROPY_NODES);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+
+ // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
+ /* Is coef updated at all */
#if CONFIG_NEWUPDATE
- if(update[1] == 0 || savings < 0)
+ if (update[1] == 0 || savings < 0)
#else
- if(update[1] == 0)
+ if (update[1] == 0)
#endif
- {
- vp8_write_bit(w, 0);
- }
- else
- {
- vp8_write_bit(w, 1);
- i=0;
- do
- {
+ {
+ vp8_write_bit(w, 0);
+ } else {
+ vp8_write_bit(w, 1);
+ i = 0;
+ do {
#if CONFIG_NEWUPDATE
- int j = !i;
+ int j = !i;
#else
- int j = 0; /* token/prob index */
-#endif
- do
- {
- int k = 0;
- int prev_coef_savings[ENTROPY_NODES] = {0};
-
- do
- {
- // calc probs and branch cts for this frame only
- int t = 0; /* token/prob index */
- do
- {
- vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
- const vp8_prob upd = COEF_UPDATE_PROB;
- int s = prev_coef_savings[t];
- int u = 0;
+ int j = 0; /* token/prob index */
+#endif
+ do {
+ int k = 0;
+ int prev_coef_savings[ENTROPY_NODES] = {0};
+
+ do {
+ // calc probs and branch cts for this frame only
+ int t = 0; /* token/prob index */
+ do {
+ vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s = prev_coef_savings[t];
+ int u = 0;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if CONFIG_NEWUPDATE && defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(
- cpi->frame_branch_ct [i][j][k][t],
- *Pold, &newp, upd);
- if (s > 0 && newp != *Pold)
- u = 1;
+ s = prob_diff_update_savings_search(
+ cpi->frame_branch_ct [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
#else
- s = prob_update_savings(
- cpi->frame_branch_ct [i][j][k][t],
- *Pold, newp, upd);
- if (s > 0)
- u = 1;
+ s = prob_update_savings(
+ cpi->frame_branch_ct [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
#endif
- vp8_write(w, u, upd);
+ vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- ++ tree_update_hist [i][j][k][t] [u];
+ if (!cpi->dummy_packing)
+ ++ tree_update_hist [i][j][k][t] [u];
#endif
- if (u)
- {
- /* send/use new probability */
+ if (u) {
+ /* send/use new probability */
#if CONFIG_NEWUPDATE
- write_prob_diff_update(w, newp, *Pold);
+ write_prob_diff_update(w, newp, *Pold);
#else
- vp8_write_literal(w, newp, 8);
+ vp8_write_literal(w, newp, 8);
#endif
- *Pold = newp;
- }
- }
- while (++t < ENTROPY_NODES);
-
- }
- while (++k < PREV_COEF_CONTEXTS);
+ *Pold = newp;
}
- while (++j < COEF_BANDS);
- }
- while (++i < BLOCK_TYPES);
- }
+ } while (++t < ENTROPY_NODES);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES);
+ }
- /* do not do this if not evena allowed */
- if(cpi->common.txfm_mode == ALLOW_8X8)
- {
- /* dry run to see if update is necessary */
- update[0] = update[1] = 0;
- savings = 0;
- i = 0;
- do
- {
+
+ /* do not do this if not evena allowed */
+ if (cpi->common.txfm_mode == ALLOW_8X8) {
+ /* dry run to see if update is necessary */
+ update[0] = update[1] = 0;
+ savings = 0;
+ i = 0;
+ do {
#if CONFIG_NEWUPDATE
- int j = !i;
+ int j = !i;
#else
- int j = 0; /* token/prob index */
-#endif
- do
- {
- int k = 0;
- do
- {
- // calc probs and branch cts for this frame only
- int t = 0; /* token/prob index */
- do
- {
- const unsigned int *ct = cpi->frame_branch_ct_8x8 [i][j][k][t];
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob oldp = *Pold;
- int s,u;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int j = 0; /* token/prob index */
+#endif
+ do {
+ int k = 0;
+ do {
+ // calc probs and branch cts for this frame only
+ int t = 0; /* token/prob index */
+ do {
+ const unsigned int *ct = cpi->frame_branch_ct_8x8 [i][j][k][t];
+ vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob oldp = *Pold;
+ int s, u;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
#endif
#if CONFIG_NEWUPDATE && defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
- u = s > 0 && newp != oldp ? 1 : 0;
- if (u)
- savings += s - (int)(vp8_cost_zero(upd));
- else
- savings -= (int)(vp8_cost_zero(upd));
+ s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
+ u = s > 0 && newp != oldp ? 1 : 0;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
#else
- s = prob_update_savings(ct, oldp, newp, upd);
- u = s > 0 ? 1 : 0;
- if (u)
- savings += s;
+ s = prob_update_savings(ct, oldp, newp, upd);
+ u = s > 0 ? 1 : 0;
+ if (u)
+ savings += s;
#endif
- update[u]++;
- }
- while (++t < MAX_ENTROPY_TOKENS - 1);
- }
- while (++k < PREV_COEF_CONTEXTS);
- }
- while (++j < COEF_BANDS);
- }
- while (++i < BLOCK_TYPES_8X8);
+ update[u]++;
+ } while (++t < MAX_ENTROPY_TOKENS - 1);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES_8X8);
#if CONFIG_NEWUPDATE
- if (update[1] == 0 || savings < 0)
+ if (update[1] == 0 || savings < 0)
#else
- if (update[1] == 0)
+ if (update[1] == 0)
#endif
- {
- vp8_write_bit(w, 0);
- }
- else
- {
- vp8_write_bit(w, 1);
- i = 0;
- do
- {
+ {
+ vp8_write_bit(w, 0);
+ } else {
+ vp8_write_bit(w, 1);
+ i = 0;
+ do {
#if CONFIG_NEWUPDATE
- int j = !i;
+ int j = !i;
#else
- int j = 0; /* token/prob index */
+ int j = 0; /* token/prob index */
#endif
- do
- {
- int k = 0;
- do
- {
- int t = 0; /* token/prob index */
- do
- {
- const unsigned int *ct = cpi->frame_branch_ct_8x8 [i][j][k][t];
- vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
- vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
- const vp8_prob oldp = *Pold;
- const vp8_prob upd = COEF_UPDATE_PROB_8X8;
- int s, u;
+ do {
+ int k = 0;
+ do {
+ int t = 0; /* token/prob index */
+ do {
+ const unsigned int *ct = cpi->frame_branch_ct_8x8 [i][j][k][t];
+ vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob oldp = *Pold;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int s, u;
#if CONFIG_EXPANDED_COEF_CONTEXT
- if (k >=3 && ((i == 0 && j == 1) ||
- (i > 0 && j == 0)))
- continue;
+ if (k >= 3 && ((i == 0 && j == 1) ||
+ (i > 0 && j == 0)))
+ continue;
#endif
#if CONFIG_NEWUPDATE && defined(SEARCH_NEWP)
- s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
- u = s > 0 && newp != oldp ? 1 : 0;
+ s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
+ u = s > 0 && newp != oldp ? 1 : 0;
#else
- s = prob_update_savings(ct, oldp, newp, upd);
- u = s > 0 ? 1 : 0;
+ s = prob_update_savings(ct, oldp, newp, upd);
+ u = s > 0 ? 1 : 0;
#endif
- vp8_write(w, u, upd);
+ vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
- if(!cpi->dummy_packing)
- ++ tree_update_hist_8x8 [i][j][k][t] [u];
+ if (!cpi->dummy_packing)
+ ++ tree_update_hist_8x8 [i][j][k][t] [u];
#endif
- if (u)
- {
- /* send/use new probability */
+ if (u) {
+ /* send/use new probability */
#if CONFIG_NEWUPDATE
- write_prob_diff_update(w, newp, oldp);
+ write_prob_diff_update(w, newp, oldp);
#else
- vp8_write_literal(w, newp, 8);
-#endif
- *Pold = newp;
- }
- }
- while (++t < MAX_ENTROPY_TOKENS - 1);
- }
- while (++k < PREV_COEF_CONTEXTS);
- }
- while (++j < COEF_BANDS);
- }
- while (++i < BLOCK_TYPES_8X8);
- }
+ vp8_write_literal(w, newp, 8);
+#endif
+ *Pold = newp;
+ }
+ } while (++t < MAX_ENTROPY_TOKENS - 1);
+ } while (++k < PREV_COEF_CONTEXTS);
+ } while (++j < COEF_BANDS);
+ } while (++i < BLOCK_TYPES_8X8);
}
+ }
}
#ifdef PACKET_TESTING
FILE *vpxlogc = 0;
#endif
-static void put_delta_q(vp8_writer *bc, int delta_q)
-{
- if (delta_q != 0)
- {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, abs(delta_q), 4);
+static void put_delta_q(vp8_writer *bc, int delta_q) {
+ if (delta_q != 0) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, abs(delta_q), 4);
- if (delta_q < 0)
- vp8_write_bit(bc, 1);
- else
- vp8_write_bit(bc, 0);
- }
+ if (delta_q < 0)
+ vp8_write_bit(bc, 1);
else
- vp8_write_bit(bc, 0);
+ vp8_write_bit(bc, 0);
+ } else
+ vp8_write_bit(bc, 0);
}
extern const unsigned int kf_y_mode_cts[8][VP8_YMODES];
-static void decide_kf_ymode_entropy(VP8_COMP *cpi)
-{
-
- int mode_cost[MB_MODE_COUNT];
- int cost;
- int bestcost = INT_MAX;
- int bestindex = 0;
- int i, j;
-
- for(i=0; i<8; i++)
- {
- vp8_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
- cost = 0;
- for(j=0;j<VP8_YMODES;j++)
- {
- cost += mode_cost[j] * cpi->ymode_count[j];
- }
- if(cost < bestcost)
- {
- bestindex = i;
- bestcost = cost;
- }
+static void decide_kf_ymode_entropy(VP8_COMP *cpi) {
+
+ int mode_cost[MB_MODE_COUNT];
+ int cost;
+ int bestcost = INT_MAX;
+ int bestindex = 0;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ vp8_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
+ cost = 0;
+ for (j = 0; j < VP8_YMODES; j++) {
+ cost += mode_cost[j] * cpi->ymode_count[j];
}
- cpi->common.kf_ymode_probs_index = bestindex;
+ if (cost < bestcost) {
+ bestindex = i;
+ bestcost = cost;
+ }
+ }
+ cpi->common.kf_ymode_probs_index = bestindex;
}
-static void segment_reference_frames(VP8_COMP *cpi)
-{
- VP8_COMMON *oci = &cpi->common;
- MODE_INFO *mi = oci->mi;
- int ref[MAX_MB_SEGMENTS]={0};
- int i,j;
- int mb_index=0;
- MACROBLOCKD *const xd = & cpi->mb.e_mbd;
-
- for (i = 0; i < oci->mb_rows; i++)
- {
- for (j = 0; j < oci->mb_cols; j++, mb_index++)
- {
- ref[mi[mb_index].mbmi.segment_id]|=(1<<mi[mb_index].mbmi.ref_frame);
- }
- mb_index++;
- }
- for (i = 0; i < MAX_MB_SEGMENTS; i++)
- {
- enable_segfeature(xd,i,SEG_LVL_REF_FRAME);
- set_segdata( xd,i, SEG_LVL_REF_FRAME, ref[i]);
+static void segment_reference_frames(VP8_COMP *cpi) {
+ VP8_COMMON *oci = &cpi->common;
+ MODE_INFO *mi = oci->mi;
+ int ref[MAX_MB_SEGMENTS] = {0};
+ int i, j;
+ int mb_index = 0;
+ MACROBLOCKD *const xd = & cpi->mb.e_mbd;
+
+ for (i = 0; i < oci->mb_rows; i++) {
+ for (j = 0; j < oci->mb_cols; j++, mb_index++) {
+ ref[mi[mb_index].mbmi.segment_id] |= (1 << mi[mb_index].mbmi.ref_frame);
}
+ mb_index++;
+ }
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ enable_segfeature(xd, i, SEG_LVL_REF_FRAME);
+ set_segdata(xd, i, SEG_LVL_REF_FRAME, ref[i]);
+ }
}
-void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
-{
- int i, j;
- VP8_HEADER oh;
- VP8_COMMON *const pc = & cpi->common;
- vp8_writer *const bc = & cpi->bc;
- MACROBLOCKD *const xd = & cpi->mb.e_mbd;
- int extra_bytes_packed = 0;
+void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) {
+ int i, j;
+ VP8_HEADER oh;
+ VP8_COMMON *const pc = & cpi->common;
+ vp8_writer *const bc = & cpi->bc;
+ MACROBLOCKD *const xd = & cpi->mb.e_mbd;
+ int extra_bytes_packed = 0;
- unsigned char *cx_data = dest;
+ unsigned char *cx_data = dest;
- oh.show_frame = (int) pc->show_frame;
- oh.type = (int)pc->frame_type;
- oh.version = pc->version;
- oh.first_partition_length_in_bytes = 0;
+ oh.show_frame = (int) pc->show_frame;
+ oh.type = (int)pc->frame_type;
+ oh.version = pc->version;
+ oh.first_partition_length_in_bytes = 0;
- cx_data += 3;
+ cx_data += 3;
#if defined(SECTIONBITS_OUTPUT)
- Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
+ Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
#endif
#if CONFIG_NEWUPDATE
- compute_update_table();
+ compute_update_table();
#endif
- //vp8_kf_default_bmode_probs() is called in vp8_setup_key_frame() once for each
- //K frame before encode frame. pc->kf_bmode_prob doesn't get changed anywhere
- //else. No need to call it again here. --yw
- //vp8_kf_default_bmode_probs( pc->kf_bmode_prob);
+ // vp8_kf_default_bmode_probs() is called in vp8_setup_key_frame() once for each
+ // K frame before encode frame. pc->kf_bmode_prob doesn't get changed anywhere
+ // else. No need to call it again here. --yw
+ // vp8_kf_default_bmode_probs( pc->kf_bmode_prob);
- // every keyframe send startcode, width, height, scale factor, clamp and color type
- if (oh.type == KEY_FRAME)
- {
- int v;
+ // every keyframe send startcode, width, height, scale factor, clamp and color type
+ if (oh.type == KEY_FRAME) {
+ int v;
- // Start / synch code
- cx_data[0] = 0x9D;
- cx_data[1] = 0x01;
- cx_data[2] = 0x2a;
+ // Start / synch code
+ cx_data[0] = 0x9D;
+ cx_data[1] = 0x01;
+ cx_data[2] = 0x2a;
- v = (pc->horiz_scale << 14) | pc->Width;
- cx_data[3] = v;
- cx_data[4] = v >> 8;
+ v = (pc->horiz_scale << 14) | pc->Width;
+ cx_data[3] = v;
+ cx_data[4] = v >> 8;
- v = (pc->vert_scale << 14) | pc->Height;
- cx_data[5] = v;
- cx_data[6] = v >> 8;
+ v = (pc->vert_scale << 14) | pc->Height;
+ cx_data[5] = v;
+ cx_data[6] = v >> 8;
- extra_bytes_packed = 7;
- cx_data += extra_bytes_packed ;
+ extra_bytes_packed = 7;
+ cx_data += extra_bytes_packed;
- vp8_start_encode(bc, cx_data);
+ vp8_start_encode(bc, cx_data);
- // signal clr type
- vp8_write_bit(bc, pc->clr_type);
- vp8_write_bit(bc, pc->clamp_type);
+ // signal clr type
+ vp8_write_bit(bc, pc->clr_type);
+ vp8_write_bit(bc, pc->clamp_type);
- }
- else
- vp8_start_encode(bc, cx_data);
+ } else
+ vp8_start_encode(bc, cx_data);
- // Signal whether or not Segmentation is enabled
- vp8_write_bit(bc, (xd->segmentation_enabled) ? 1 : 0);
+ // Signal whether or not Segmentation is enabled
+ vp8_write_bit(bc, (xd->segmentation_enabled) ? 1 : 0);
- // Indicate which features are enabled
- if ( xd->segmentation_enabled )
- {
- // Indicate whether or not the segmentation map is being updated.
- vp8_write_bit(bc, (xd->update_mb_segmentation_map) ? 1 : 0);
-
- // If it is, then indicate the method that will be used.
- if ( xd->update_mb_segmentation_map )
- {
- // Select the coding strategy (temporal or spatial)
- choose_segmap_coding_method( cpi );
-
- // Take a copy of the segment map if it changed for
- // future comparison
- vpx_memcpy( pc->last_frame_seg_map,
- cpi->segmentation_map, pc->MBs );
-
- // Write out the chosen coding method.
- vp8_write_bit(bc, (pc->temporal_update) ? 1:0);
- }
+ // Indicate which features are enabled
+ if (xd->segmentation_enabled) {
+ // Indicate whether or not the segmentation map is being updated.
+ vp8_write_bit(bc, (xd->update_mb_segmentation_map) ? 1 : 0);
- vp8_write_bit(bc, (xd->update_mb_segmentation_data) ? 1 : 0);
+ // If it is, then indicate the method that will be used.
+ if (xd->update_mb_segmentation_map) {
+ // Select the coding strategy (temporal or spatial)
+ choose_segmap_coding_method(cpi);
- //segment_reference_frames(cpi);
+ // Take a copy of the segment map if it changed for
+ // future comparison
+ vpx_memcpy(pc->last_frame_seg_map,
+ cpi->segmentation_map, pc->MBs);
- if (xd->update_mb_segmentation_data)
- {
- signed char Data;
+ // Write out the chosen coding method.
+ vp8_write_bit(bc, (pc->temporal_update) ? 1 : 0);
+ }
- vp8_write_bit(bc, (xd->mb_segment_abs_delta) ? 1 : 0);
+ vp8_write_bit(bc, (xd->update_mb_segmentation_data) ? 1 : 0);
- // For each segments id...
- for (i = 0; i < MAX_MB_SEGMENTS; i++)
- {
- // For each segmentation codable feature...
- for (j = 0; j < SEG_LVL_MAX; j++)
- {
- Data = get_segdata( xd, i, j );
+ // segment_reference_frames(cpi);
+ if (xd->update_mb_segmentation_data) {
+ signed char Data;
-#if CONFIG_FEATUREUPDATES
+ vp8_write_bit(bc, (xd->mb_segment_abs_delta) ? 1 : 0);
- // check if there's an update
- if(segfeature_changed( xd,i,j) )
- {
- vp8_write_bit(bc, 1);
-
- if ( segfeature_active( xd, i, j ) )
- {
- // this bit is to say we are still
- // active/ if we were inactive
- // this is unnecessary
- if ( old_segfeature_active( xd, i, j ))
- {
- vp8_write_bit(bc, 1);
- }
- // Is the segment data signed..
- if ( is_segfeature_signed(j) )
- {
- // Encode the relevant feature data
- if (Data < 0)
- {
- Data = - Data;
- vp8_write_literal(bc, Data,
- seg_feature_data_bits(j));
- vp8_write_bit(bc, 1);
- }
- else
- {
- vp8_write_literal(bc, Data,
- seg_feature_data_bits(j));
- vp8_write_bit(bc, 0);
- }
- }
- // Unsigned data element so no sign bit needed
- else
- vp8_write_literal(bc, Data,
- seg_feature_data_bits(j));
- }
- // feature is inactive now
- else if ( old_segfeature_active( xd, i, j ))
- {
- vp8_write_bit(bc, 0);
- }
- }
- else
- {
- vp8_write_bit(bc,0);
- }
-#else
+ // For each segments id...
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ // For each segmentation codable feature...
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ Data = get_segdata(xd, i, j);
- // If the feature is enabled...
- if ( segfeature_active( xd, i, j ) )
- {
- vp8_write_bit(bc, 1);
-
- // Is the segment data signed..
- if ( is_segfeature_signed(j) )
- {
- // Encode the relevant feature data
- if (Data < 0)
- {
- Data = - Data;
- vp8_write_literal(bc, Data,
- seg_feature_data_bits(j));
- vp8_write_bit(bc, 1);
- }
- else
- {
- vp8_write_literal(bc, Data,
- seg_feature_data_bits(j));
- vp8_write_bit(bc, 0);
- }
- }
- // Unsigned data element so no sign bit needed
- else
- vp8_write_literal(bc, Data,
- seg_feature_data_bits(j));
- }
- else
- vp8_write_bit(bc, 0);
-#endif
- }
- }
- }
#if CONFIG_FEATUREUPDATES
- // save the segment info for updates next frame
- save_segment_info ( xd );
-#endif
- if (xd->update_mb_segmentation_map)
- {
- // Send the tree probabilities used to decode unpredicted
- // macro-block segments
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
- {
- int Data = xd->mb_segment_tree_probs[i];
+ // check if there's an update
+ if (segfeature_changed(xd, i, j)) {
+ vp8_write_bit(bc, 1);
- if (Data != 255)
- {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, Data, 8);
+ if (segfeature_active(xd, i, j)) {
+ // this bit is to say we are still
+ // active/ if we were inactive
+ // this is unnecessary
+ if (old_segfeature_active(xd, i, j)) {
+ vp8_write_bit(bc, 1);
+ }
+ // Is the segment data signed..
+ if (is_segfeature_signed(j)) {
+ // Encode the relevant feature data
+ if (Data < 0) {
+ Data = - Data;
+ vp8_write_literal(bc, Data,
+ seg_feature_data_bits(j));
+ vp8_write_bit(bc, 1);
+ } else {
+ vp8_write_literal(bc, Data,
+ seg_feature_data_bits(j));
+ vp8_write_bit(bc, 0);
}
- else
- vp8_write_bit(bc, 0);
+ }
+ // Unsigned data element so no sign bit needed
+ else
+ vp8_write_literal(bc, Data,
+ seg_feature_data_bits(j));
}
-
- // If predictive coding of segment map is enabled send the
- // prediction probabilities.
- if ( pc->temporal_update )
- {
- for (i = 0; i < PREDICTION_PROBS; i++)
- {
- int Data = pc->segment_pred_probs[i];
-
- if (Data != 255)
- {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, Data, 8);
- }
- else
- vp8_write_bit(bc, 0);
- }
+ // feature is inactive now
+ else if (old_segfeature_active(xd, i, j)) {
+ vp8_write_bit(bc, 0);
}
- }
- }
+ } else {
+ vp8_write_bit(bc, 0);
+ }
+#else
- // Encode the common prediction model status flag probability updates for
- // the reference frame
- update_refpred_stats( cpi );
- if ( pc->frame_type != KEY_FRAME )
- {
- for (i = 0; i < PREDICTION_PROBS; i++)
- {
- if ( cpi->ref_pred_probs_update[i] )
- {
+ // If the feature is enabled...
+ if (segfeature_active(xd, i, j)) {
+ vp8_write_bit(bc, 1);
+
+ // Is the segment data signed..
+ if (is_segfeature_signed(j)) {
+ // Encode the relevant feature data
+ if (Data < 0) {
+ Data = - Data;
+ vp8_write_literal(bc, Data,
+ seg_feature_data_bits(j));
vp8_write_bit(bc, 1);
- vp8_write_literal(bc, pc->ref_pred_probs[i], 8);
+ } else {
+ vp8_write_literal(bc, Data,
+ seg_feature_data_bits(j));
+ vp8_write_bit(bc, 0);
+ }
}
+ // Unsigned data element so no sign bit needed
else
- vp8_write_bit(bc, 0);
+ vp8_write_literal(bc, Data,
+ seg_feature_data_bits(j));
+ } else
+ vp8_write_bit(bc, 0);
+#endif
}
+ }
}
- vp8_write_bit(bc, pc->txfm_mode);
-
- // Encode the loop filter level and type
- vp8_write_bit(bc, pc->filter_type);
- vp8_write_literal(bc, pc->filter_level, 6);
- vp8_write_literal(bc, pc->sharpness_level, 3);
-
- // Write out loop filter deltas applied at the MB level based on mode or ref frame (if they are enabled).
- vp8_write_bit(bc, (xd->mode_ref_lf_delta_enabled) ? 1 : 0);
-
- if (xd->mode_ref_lf_delta_enabled)
- {
- // Do the deltas need to be updated
- int send_update = xd->mode_ref_lf_delta_update;
-
- vp8_write_bit(bc, send_update);
- if (send_update)
- {
- int Data;
-
- // Send update
- for (i = 0; i < MAX_REF_LF_DELTAS; i++)
- {
- Data = xd->ref_lf_deltas[i];
-
- // Frame level data
- if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i])
- {
- xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
- vp8_write_bit(bc, 1);
-
- if (Data > 0)
- {
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 0); // sign
- }
- else
- {
- Data = -Data;
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 1); // sign
- }
- }
- else
- vp8_write_bit(bc, 0);
- }
-
- // Send update
- for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
- {
- Data = xd->mode_lf_deltas[i];
-
- if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i])
- {
- xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
- vp8_write_bit(bc, 1);
-
- if (Data > 0)
- {
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 0); // sign
- }
- else
- {
- Data = -Data;
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 1); // sign
- }
- }
- else
- vp8_write_bit(bc, 0);
- }
+#if CONFIG_FEATUREUPDATES
+ // save the segment info for updates next frame
+ save_segment_info(xd);
+#endif
+
+ if (xd->update_mb_segmentation_map) {
+ // Send the tree probabilities used to decode unpredicted
+ // macro-block segments
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
+ int Data = xd->mb_segment_tree_probs[i];
+
+ if (Data != 255) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, Data, 8);
+ } else
+ vp8_write_bit(bc, 0);
+ }
+
+ // If predictive coding of segment map is enabled send the
+ // prediction probabilities.
+ if (pc->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ int Data = pc->segment_pred_probs[i];
+
+ if (Data != 255) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, Data, 8);
+ } else
+ vp8_write_bit(bc, 0);
}
+ }
+ }
+ }
+
+ // Encode the common prediction model status flag probability updates for
+ // the reference frame
+ update_refpred_stats(cpi);
+ if (pc->frame_type != KEY_FRAME) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ if (cpi->ref_pred_probs_update[i]) {
+ vp8_write_bit(bc, 1);
+ vp8_write_literal(bc, pc->ref_pred_probs[i], 8);
+ } else
+ vp8_write_bit(bc, 0);
}
+ }
+
+ vp8_write_bit(bc, pc->txfm_mode);
+
+ // Encode the loop filter level and type
+ vp8_write_bit(bc, pc->filter_type);
+ vp8_write_literal(bc, pc->filter_level, 6);
+ vp8_write_literal(bc, pc->sharpness_level, 3);
+
+ // Write out loop filter deltas applied at the MB level based on mode or ref frame (if they are enabled).
+ vp8_write_bit(bc, (xd->mode_ref_lf_delta_enabled) ? 1 : 0);
+
+ if (xd->mode_ref_lf_delta_enabled) {
+ // Do the deltas need to be updated
+ int send_update = xd->mode_ref_lf_delta_update;
+
+ vp8_write_bit(bc, send_update);
+ if (send_update) {
+ int Data;
+
+ // Send update
+ for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
+ Data = xd->ref_lf_deltas[i];
+
+ // Frame level data
+ if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i]) {
+ xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
+ vp8_write_bit(bc, 1);
+
+ if (Data > 0) {
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 0); // sign
+ } else {
+ Data = -Data;
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 1); // sign
+ }
+ } else
+ vp8_write_bit(bc, 0);
+ }
+
+ // Send update
+ for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
+ Data = xd->mode_lf_deltas[i];
+
+ if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i]) {
+ xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
+ vp8_write_bit(bc, 1);
+
+ if (Data > 0) {
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 0); // sign
+ } else {
+ Data = -Data;
+ vp8_write_literal(bc, (Data & 0x3F), 6);
+ vp8_write_bit(bc, 1); // sign
+ }
+ } else
+ vp8_write_bit(bc, 0);
+ }
+ }
+ }
- //signal here is multi token partition is enabled
- //vp8_write_literal(bc, pc->multi_token_partition, 2);
- vp8_write_literal(bc, 0, 2);
+ // signal here is multi token partition is enabled
+ // vp8_write_literal(bc, pc->multi_token_partition, 2);
+ vp8_write_literal(bc, 0, 2);
- // Frame Q baseline quantizer index
- vp8_write_literal(bc, pc->base_qindex, QINDEX_BITS);
+ // Frame Q baseline quantizer index
+ vp8_write_literal(bc, pc->base_qindex, QINDEX_BITS);
- // Transmit Dc, Second order and Uv quantizer delta information
- put_delta_q(bc, pc->y1dc_delta_q);
- put_delta_q(bc, pc->y2dc_delta_q);
- put_delta_q(bc, pc->y2ac_delta_q);
- put_delta_q(bc, pc->uvdc_delta_q);
- put_delta_q(bc, pc->uvac_delta_q);
+ // Transmit Dc, Second order and Uv quantizer delta information
+ put_delta_q(bc, pc->y1dc_delta_q);
+ put_delta_q(bc, pc->y2dc_delta_q);
+ put_delta_q(bc, pc->y2ac_delta_q);
+ put_delta_q(bc, pc->uvdc_delta_q);
+ put_delta_q(bc, pc->uvac_delta_q);
- // When there is a key frame all reference buffers are updated using the new key frame
- if (pc->frame_type != KEY_FRAME)
- {
- // Should the GF or ARF be updated using the transmitted frame or buffer
- vp8_write_bit(bc, pc->refresh_golden_frame);
- vp8_write_bit(bc, pc->refresh_alt_ref_frame);
+ // When there is a key frame all reference buffers are updated using the new key frame
+ if (pc->frame_type != KEY_FRAME) {
+ // Should the GF or ARF be updated using the transmitted frame or buffer
+ vp8_write_bit(bc, pc->refresh_golden_frame);
+ vp8_write_bit(bc, pc->refresh_alt_ref_frame);
- // For inter frames the current default behavior is that when
- // cm->refresh_golden_frame is set we copy the old GF over to
- // the ARF buffer. This is purely an encoder decision at present.
- if (pc->refresh_golden_frame)
- pc->copy_buffer_to_arf = 2;
+ // For inter frames the current default behavior is that when
+ // cm->refresh_golden_frame is set we copy the old GF over to
+ // the ARF buffer. This is purely an encoder decision at present.
+ if (pc->refresh_golden_frame)
+ pc->copy_buffer_to_arf = 2;
- // If not being updated from current frame should either GF or ARF be updated from another buffer
- if (!pc->refresh_golden_frame)
- vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
+ // If not being updated from current frame should either GF or ARF be updated from another buffer
+ if (!pc->refresh_golden_frame)
+ vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
- if (!pc->refresh_alt_ref_frame)
- vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
+ if (!pc->refresh_alt_ref_frame)
+ vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
- // Indicate reference frame sign bias for Golden and ARF frames (always 0 for last frame buffer)
- vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
- vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
+ // Indicate reference frame sign bias for Golden and ARF frames (always 0 for last frame buffer)
+ vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
+ vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
#if CONFIG_HIGH_PRECISION_MV
- // Signal whether to allow high MV precision
- vp8_write_bit(bc, (xd->allow_high_precision_mv) ? 1 : 0);
+ // Signal whether to allow high MV precision
+ vp8_write_bit(bc, (xd->allow_high_precision_mv) ? 1 : 0);
#endif
#if CONFIG_ENHANCED_INTERP
- // Signal the type of subpel filter to use
- vp8_write_literal(bc, (pc->mcomp_filter_type), 2);
+ // Signal the type of subpel filter to use
+ vp8_write_literal(bc, (pc->mcomp_filter_type), 2);
#endif
- }
+ }
- vp8_write_bit(bc, pc->refresh_entropy_probs);
+ vp8_write_bit(bc, pc->refresh_entropy_probs);
- if (pc->frame_type != KEY_FRAME)
- vp8_write_bit(bc, pc->refresh_last_frame);
+ if (pc->frame_type != KEY_FRAME)
+ vp8_write_bit(bc, pc->refresh_last_frame);
#ifdef ENTROPY_STATS
- if (pc->frame_type == INTER_FRAME)
- active_section = 0;
- else
- active_section = 7;
+ if (pc->frame_type == INTER_FRAME)
+ active_section = 0;
+ else
+ active_section = 7;
#endif
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state(); // __asm emms;
#if CONFIG_ADAPTIVE_ENTROPY
- vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs);
- vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8);
- vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
- vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
- vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
- vp8_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
- vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
- vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
- vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
+ vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs);
+ vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8);
+ vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
+ vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
+ vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
+ vp8_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
+ vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
+ vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
+ vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
#if CONFIG_HIGH_PRECISION_MV
- vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
+ vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
#endif
- vp8_zero(cpi->sub_mv_ref_count);
- vp8_zero(cpi->mbsplit_count);
- vp8_zero(cpi->common.fc.mv_ref_ct)
- vp8_zero(cpi->common.fc.mv_ref_ct_a)
+ vp8_zero(cpi->sub_mv_ref_count);
+ vp8_zero(cpi->mbsplit_count);
+ vp8_zero(cpi->common.fc.mv_ref_ct)
+ vp8_zero(cpi->common.fc.mv_ref_ct_a)
#endif
#if CONFIG_NEWUPDATE && COEFUPDATETYPE == 2
- update_coef_probs2(cpi);
+ update_coef_probs2(cpi);
#elif CONFIG_NEWUPDATE && COEFUPDATETYPE == 3
- update_coef_probs3(cpi);
+ update_coef_probs3(cpi);
#else
- update_coef_probs(cpi);
+ update_coef_probs(cpi);
#endif
#ifdef ENTROPY_STATS
- active_section = 2;
+ active_section = 2;
#endif
- // Write out the mb_no_coeff_skip flag
- vp8_write_bit(bc, pc->mb_no_coeff_skip);
+ // Write out the mb_no_coeff_skip flag
+ vp8_write_bit(bc, pc->mb_no_coeff_skip);
- if (pc->frame_type == KEY_FRAME)
- {
- decide_kf_ymode_entropy(cpi);
- write_kfmodes(cpi);
+ if (pc->frame_type == KEY_FRAME) {
+ decide_kf_ymode_entropy(cpi);
+ write_kfmodes(cpi);
#ifdef ENTROPY_STATS
- active_section = 8;
+ active_section = 8;
#endif
- }
- else
- {
- pack_inter_mode_mvs(cpi);
+ } else {
+ pack_inter_mode_mvs(cpi);
#if CONFIG_ADAPTIVE_ENTROPY == 0
- vp8_update_mode_context(&cpi->common);
+ vp8_update_mode_context(&cpi->common);
#endif
#ifdef ENTROPY_STATS
- active_section = 1;
+ active_section = 1;
#endif
- }
+ }
- vp8_stop_encode(bc);
+ vp8_stop_encode(bc);
- oh.first_partition_length_in_bytes = cpi->bc.pos;
+ oh.first_partition_length_in_bytes = cpi->bc.pos;
- /* update frame tag */
- {
- int v = (oh.first_partition_length_in_bytes << 5) |
- (oh.show_frame << 4) |
- (oh.version << 1) |
- oh.type;
-
- dest[0] = v;
- dest[1] = v >> 8;
- dest[2] = v >> 16;
- }
+ /* update frame tag */
+ {
+ int v = (oh.first_partition_length_in_bytes << 5) |
+ (oh.show_frame << 4) |
+ (oh.version << 1) |
+ oh.type;
+
+ dest[0] = v;
+ dest[1] = v >> 8;
+ dest[2] = v >> 16;
+ }
- *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc.pos;
+ *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc.pos;
- vp8_start_encode(&cpi->bc2, cx_data + bc->pos);
+ vp8_start_encode(&cpi->bc2, cx_data + bc->pos);
- pack_tokens(&cpi->bc2, cpi->tok, cpi->tok_count);
+ pack_tokens(&cpi->bc2, cpi->tok, cpi->tok_count);
- vp8_stop_encode(&cpi->bc2);
+ vp8_stop_encode(&cpi->bc2);
- *size += cpi->bc2.pos;
+ *size += cpi->bc2.pos;
}
#ifdef ENTROPY_STATS
-void print_tree_update_probs()
-{
- int i, j, k, l;
- FILE *f = fopen("coefupdprob.h", "w");
- int Sum;
- fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
- fprintf(f, "const vp8_prob\n"
- "vp8_coef_update_probs[BLOCK_TYPES]\n"
- " [COEF_BANDS]\n"
- " [PREV_COEF_CONTEXTS]\n"
- " [ENTROPY_NODES] = {\n");
-
- for (i = 0; i < BLOCK_TYPES; i++)
- {
- fprintf(f, " { \n");
-
- for (j = 0; j < COEF_BANDS; j++)
- {
- fprintf(f, " {\n");
-
- for (k = 0; k < PREV_COEF_CONTEXTS; k++)
- {
- fprintf(f, " {");
-
- for (l = 0; l < ENTROPY_NODES; l++)
- {
- Sum = tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1];
-
- if (Sum > 0)
- {
- if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0)
- fprintf(f, "%3ld, ", (tree_update_hist[i][j][k][l][0] * 255) / Sum);
- else
- fprintf(f, "%3ld, ", 1);
- }
- else
- fprintf(f, "%3ld, ", 128);
- }
-
- fprintf(f, "},\n");
- }
-
- fprintf(f, " },\n");
+void print_tree_update_probs() {
+ int i, j, k, l;
+ FILE *f = fopen("coefupdprob.h", "w");
+ int Sum;
+ fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
+ fprintf(f, "const vp8_prob\n"
+ "vp8_coef_update_probs[BLOCK_TYPES]\n"
+ " [COEF_BANDS]\n"
+ " [PREV_COEF_CONTEXTS]\n"
+ " [ENTROPY_NODES] = {\n");
+
+ for (i = 0; i < BLOCK_TYPES; i++) {
+ fprintf(f, " { \n");
+
+ for (j = 0; j < COEF_BANDS; j++) {
+ fprintf(f, " {\n");
+
+ for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
+ fprintf(f, " {");
+
+ for (l = 0; l < ENTROPY_NODES; l++) {
+ Sum = tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1];
+
+ if (Sum > 0) {
+ if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0)
+ fprintf(f, "%3ld, ", (tree_update_hist[i][j][k][l][0] * 255) / Sum);
+ else
+ fprintf(f, "%3ld, ", 1);
+ } else
+ fprintf(f, "%3ld, ", 128);
}
- fprintf(f, " },\n");
+ fprintf(f, "},\n");
+ }
+
+ fprintf(f, " },\n");
}
- fprintf(f, "};\n");
+ fprintf(f, " },\n");
+ }
- fprintf(f, "const vp8_prob\n"
- "vp8_coef_update_probs_8x8[BLOCK_TYPES_8X8]\n"
- " [COEF_BANDS]\n"
- " [PREV_COEF_CONTEXTS]\n"
- " [ENTROPY_NODES] = {\n");
+ fprintf(f, "};\n");
+ fprintf(f, "const vp8_prob\n"
+ "vp8_coef_update_probs_8x8[BLOCK_TYPES_8X8]\n"
+ " [COEF_BANDS]\n"
+ " [PREV_COEF_CONTEXTS]\n"
+ " [ENTROPY_NODES] = {\n");
- for (i = 0; i < BLOCK_TYPES_8X8; i++)
- {
- fprintf(f, " { \n");
- for (j = 0; j < COEF_BANDS; j++)
- {
- fprintf(f, " {\n");
+ for (i = 0; i < BLOCK_TYPES_8X8; i++) {
+ fprintf(f, " { \n");
- for (k = 0; k < PREV_COEF_CONTEXTS; k++)
- {
- fprintf(f, " {");
+ for (j = 0; j < COEF_BANDS; j++) {
+ fprintf(f, " {\n");
- for (l = 0; l < MAX_ENTROPY_TOKENS - 1; l++)
- {
- Sum = tree_update_hist_8x8[i][j][k][l][0] + tree_update_hist_8x8[i][j][k][l][1];
+ for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
+ fprintf(f, " {");
- if (Sum > 0)
- {
- if (((tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum) > 0)
- fprintf(f, "%3ld, ", (tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum);
- else
- fprintf(f, "%3ld, ", 1);
- }
- else
- fprintf(f, "%3ld, ", 128);
- }
-
- fprintf(f, "},\n");
- }
+ for (l = 0; l < MAX_ENTROPY_TOKENS - 1; l++) {
+ Sum = tree_update_hist_8x8[i][j][k][l][0] + tree_update_hist_8x8[i][j][k][l][1];
- fprintf(f, " },\n");
+ if (Sum > 0) {
+ if (((tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum) > 0)
+ fprintf(f, "%3ld, ", (tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum);
+ else
+ fprintf(f, "%3ld, ", 1);
+ } else
+ fprintf(f, "%3ld, ", 128);
}
- fprintf(f, " },\n");
+ fprintf(f, "},\n");
+ }
+
+ fprintf(f, " },\n");
}
- fclose(f);
- f = fopen("treeupdate.bin", "wb");
- fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
- fwrite(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
- fclose(f);
+
+ fprintf(f, " },\n");
+ }
+ fclose(f);
+ f = fopen("treeupdate.bin", "wb");
+ fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
+ fwrite(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
+ fclose(f);
}
#endif
diff --git a/vp8/encoder/bitstream.h b/vp8/encoder/bitstream.h
index 78fb26f66..b1c46dc95 100644
--- a/vp8/encoder/bitstream.h
+++ b/vp8/encoder/bitstream.h
@@ -18,7 +18,7 @@ void vp8cx_pack_tokens_armv5(vp8_writer *w, const TOKENEXTRA *p, int xcount,
vp8_extra_bit_struct *,
const vp8_tree_index *);
# define pack_tokens(a,b,c) \
- vp8cx_pack_tokens_armv5(a,b,c,vp8_coef_encodings,vp8_extra_bits,vp8_coef_tree)
+ vp8cx_pack_tokens_armv5(a,b,c,vp8_coef_encodings,vp8_extra_bits,vp8_coef_tree)
#else
# define pack_tokens(a,b,c) pack_tokens_c(a,b,c)
#endif
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 45f157703..766e80776 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -19,168 +19,162 @@
#include "vpx_ports/mem.h"
// motion search site
-typedef struct
-{
- MV mv;
- int offset;
+typedef struct {
+ MV mv;
+ int offset;
} search_site;
-typedef struct
-{
- // 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
- short *src_diff;
- short *coeff;
-
- // 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
- short *quant;
- short *quant_fast; // fast quant deprecated for now
- unsigned char *quant_shift;
- short *zbin;
- short *zbin_8x8;
- short *zrun_zbin_boost;
- short *zrun_zbin_boost_8x8;
- short *round;
-
- // Zbin Over Quant value
- short zbin_extra;
-
- unsigned char **base_src;
- unsigned char **base_second_src;
- int src;
- int src_stride;
-
- int eob_max_offset;
- int eob_max_offset_8x8;
+typedef struct {
+ // 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
+ short *src_diff;
+ short *coeff;
+
+ // 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
+ short *quant;
+ short *quant_fast; // fast quant deprecated for now
+ unsigned char *quant_shift;
+ short *zbin;
+ short *zbin_8x8;
+ short *zrun_zbin_boost;
+ short *zrun_zbin_boost_8x8;
+ short *round;
+
+ // Zbin Over Quant value
+ short zbin_extra;
+
+ unsigned char **base_src;
+ unsigned char **base_second_src;
+ int src;
+ int src_stride;
+
+ int eob_max_offset;
+ int eob_max_offset_8x8;
} BLOCK;
-typedef struct
-{
- int count;
- struct
- {
- B_PREDICTION_MODE mode;
- int_mv mv;
- int_mv second_mv;
- } bmi[16];
+typedef struct {
+ int count;
+ struct {
+ B_PREDICTION_MODE mode;
+ int_mv mv;
+ int_mv second_mv;
+ } bmi[16];
} PARTITION_INFO;
// Structure to hold snapshot of coding context during the mode picking process
// TODO Do we need all of these?
-typedef struct
-{
- MODE_INFO mic;
- PARTITION_INFO partition_info;
- int_mv best_ref_mv;
- int_mv second_best_ref_mv;
- int rate;
- int distortion;
- int intra_error;
- int best_mode_index;
- int rddiv;
- int rdmult;
+typedef struct {
+ MODE_INFO mic;
+ PARTITION_INFO partition_info;
+ int_mv best_ref_mv;
+ int_mv second_best_ref_mv;
+ int rate;
+ int distortion;
+ int intra_error;
+ int best_mode_index;
+ int rddiv;
+ int rdmult;
} PICK_MODE_CONTEXT;
-typedef struct
-{
- DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
- DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
- DECLARE_ALIGNED(16, unsigned char, thismb[256]); // 16x16 Y
-
- unsigned char *thismb_ptr;
- // 16 Y blocks, 4 U blocks, 4 V blocks,
- // 1 DC 2nd order block each with 16 entries
- BLOCK block[25];
-
- YV12_BUFFER_CONFIG src;
-
- MACROBLOCKD e_mbd;
- PARTITION_INFO *partition_info; /* work pointer */
- PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */
- PARTITION_INFO *pip; /* Base of allocated array */
-
- search_site *ss;
- int ss_count;
- int searches_per_step;
-
- int errorperbit;
- int sadperbit16;
- int sadperbit4;
- int rddiv;
- int rdmult;
- unsigned int * mb_activity_ptr;
- int * mb_norm_activity_ptr;
- signed int act_zbin_adj;
-
- int mvcosts[2][MVvals+1];
- int *mvcost[2];
- int mvsadcosts[2][MVfpvals+1];
- int *mvsadcost[2];
+typedef struct {
+ DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
+ DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
+ DECLARE_ALIGNED(16, unsigned char, thismb[256]); // 16x16 Y
+
+ unsigned char *thismb_ptr;
+ // 16 Y blocks, 4 U blocks, 4 V blocks,
+ // 1 DC 2nd order block each with 16 entries
+ BLOCK block[25];
+
+ YV12_BUFFER_CONFIG src;
+
+ MACROBLOCKD e_mbd;
+ PARTITION_INFO *partition_info; /* work pointer */
+ PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */
+ PARTITION_INFO *pip; /* Base of allocated array */
+
+ search_site *ss;
+ int ss_count;
+ int searches_per_step;
+
+ int errorperbit;
+ int sadperbit16;
+ int sadperbit4;
+ int rddiv;
+ int rdmult;
+ unsigned int *mb_activity_ptr;
+ int *mb_norm_activity_ptr;
+ signed int act_zbin_adj;
+
+ int mvcosts[2][MVvals + 1];
+ int *mvcost[2];
+ int mvsadcosts[2][MVfpvals + 1];
+ int *mvsadcost[2];
#if CONFIG_HIGH_PRECISION_MV
- int mvcosts_hp[2][MVvals_hp+1];
- int *mvcost_hp[2];
- int mvsadcosts_hp[2][MVfpvals_hp+1];
- int *mvsadcost_hp[2];
+ int mvcosts_hp[2][MVvals_hp + 1];
+ int *mvcost_hp[2];
+ int mvsadcosts_hp[2][MVfpvals_hp + 1];
+ int *mvsadcost_hp[2];
#endif
- int mbmode_cost[2][MB_MODE_COUNT];
- int intra_uv_mode_cost[2][MB_MODE_COUNT];
- int bmode_costs[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
- int i8x8_mode_costs[MB_MODE_COUNT];
- int inter_bmode_costs[B_MODE_COUNT];
-
- // These define limits to motion vector components to prevent them from extending outside the UMV borders
- int mv_col_min;
- int mv_col_max;
- int mv_row_min;
- int mv_row_max;
+ int mbmode_cost[2][MB_MODE_COUNT];
+ int intra_uv_mode_cost[2][MB_MODE_COUNT];
+ int bmode_costs[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
+ int i8x8_mode_costs[MB_MODE_COUNT];
+ int inter_bmode_costs[B_MODE_COUNT];
+
+ // These define limits to motion vector components to prevent them from extending outside the UMV borders
+ int mv_col_min;
+ int mv_col_max;
+ int mv_row_min;
+ int mv_row_max;
#if CONFIG_SUPERBLOCKS
- int mv_col_min_sb;
- int mv_col_max_sb;
- int mv_row_min_sb;
- int mv_row_max_sb;
+ int mv_col_min_sb;
+ int mv_col_max_sb;
+ int mv_row_min_sb;
+ int mv_row_max_sb;
#endif
- int skip;
+ int skip;
- int encode_breakout;
+ int encode_breakout;
- //char * gf_active_ptr;
- signed char *gf_active_ptr;
+ // char * gf_active_ptr;
+ signed char *gf_active_ptr;
- unsigned char *active_ptr;
- MV_CONTEXT *mvc;
+ unsigned char *active_ptr;
+ MV_CONTEXT *mvc;
#if CONFIG_HIGH_PRECISION_MV
- MV_CONTEXT_HP *mvc_hp;
+ MV_CONTEXT_HP *mvc_hp;
#endif
- unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
- [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
- unsigned int token_costs_8x8[BLOCK_TYPES_8X8] [COEF_BANDS]
- [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
+ unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
+ [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
+ unsigned int token_costs_8x8[BLOCK_TYPES_8X8] [COEF_BANDS]
+ [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
- int optimize;
- int q_index;
+ int optimize;
+ int q_index;
- int encode_as_sb;
+ int encode_as_sb;
- // Structure to hold context for each of the 4 MBs within a SB:
- // when encoded as 4 independent MBs:
- PICK_MODE_CONTEXT mb_context[4];
+ // Structure to hold context for each of the 4 MBs within a SB:
+ // when encoded as 4 independent MBs:
+ PICK_MODE_CONTEXT mb_context[4];
#if CONFIG_SUPERBLOCKS
- // when 4 MBs share coding parameters:
- PICK_MODE_CONTEXT sb_context[4];
+ // when 4 MBs share coding parameters:
+ PICK_MODE_CONTEXT sb_context[4];
#endif
- void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
- void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
- void (*short_walsh4x4)(short *input, short *output, int pitch);
- void (*quantize_b)(BLOCK *b, BLOCKD *d);
- void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
- void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
- void (*short_fhaar2x2)(short *input, short *output, int pitch);
- void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
- void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);
+ void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
+ void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
+ void (*short_walsh4x4)(short *input, short *output, int pitch);
+ void (*quantize_b)(BLOCK *b, BLOCKD *d);
+ void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
+ void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
+ void (*short_fhaar2x2)(short *input, short *output, int pitch);
+ void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
+ void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);
} MACROBLOCK;
diff --git a/vp8/encoder/boolhuff.c b/vp8/encoder/boolhuff.c
index d96ca7d40..e23d32e85 100644
--- a/vp8/encoder/boolhuff.c
+++ b/vp8/encoder/boolhuff.c
@@ -20,149 +20,137 @@ unsigned __int64 Sectionbits[500];
unsigned int active_section = 0;
#endif
-const unsigned int vp8_prob_cost[256] =
-{
- 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
- 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
- 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
- 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516,
- 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433,
- 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
- 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307,
- 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257,
- 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
- 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174,
- 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139,
- 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
- 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
- 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50,
- 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
- 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
+const unsigned int vp8_prob_cost[256] = {
+ 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
+ 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
+ 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
+ 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516,
+ 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433,
+ 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
+ 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307,
+ 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257,
+ 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
+ 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174,
+ 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139,
+ 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
+ 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
+ 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50,
+ 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
+ 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
};
-void vp8_start_encode(BOOL_CODER *br, unsigned char *source)
-{
+void vp8_start_encode(BOOL_CODER *br, unsigned char *source) {
- br->lowvalue = 0;
- br->range = 255;
- br->value = 0;
- br->count = -24;
- br->buffer = source;
- br->pos = 0;
+ br->lowvalue = 0;
+ br->range = 255;
+ br->value = 0;
+ br->count = -24;
+ br->buffer = source;
+ br->pos = 0;
}
-void vp8_stop_encode(BOOL_CODER *br)
-{
- int i;
+void vp8_stop_encode(BOOL_CODER *br) {
+ int i;
- for (i = 0; i < 32; i++)
- vp8_encode_bool(br, 0, 128);
+ for (i = 0; i < 32; i++)
+ vp8_encode_bool(br, 0, 128);
}
-void vp8_encode_value(BOOL_CODER *br, int data, int bits)
-{
- int bit;
+void vp8_encode_value(BOOL_CODER *br, int data, int bits) {
+ int bit;
- for (bit = bits - 1; bit >= 0; bit--)
- vp8_encode_bool(br, (1 & (data >> bit)), 0x80);
+ for (bit = bits - 1; bit >= 0; bit--)
+ vp8_encode_bool(br, (1 & (data >> bit)), 0x80);
}
#if CONFIG_NEWUPDATE
-int recenter_nonneg(int v, int m)
-{
- if (v > (m<<1)) return v;
- else if (v >= m) return ((v-m)<<1);
- else return ((m-v)<<1)-1;
+int recenter_nonneg(int v, int m) {
+ if (v > (m << 1)) return v;
+ else if (v >= m) return ((v - m) << 1);
+ else return ((m - v) << 1) - 1;
}
-static int get_unsigned_bits(unsigned num_values)
-{
- int cat=0;
- if ((num_values--)<=1) return 0;
- while (num_values>0)
- {
- cat++;
- num_values>>=1;
- }
- return cat;
+static int get_unsigned_bits(unsigned num_values) {
+ int cat = 0;
+ if ((num_values--) <= 1) return 0;
+ while (num_values > 0) {
+ cat++;
+ num_values >>= 1;
+ }
+ return cat;
}
-void vp8_encode_uniform(BOOL_CODER *br, int v, int n)
-{
- int l = get_unsigned_bits(n);
- int m;
- if (l == 0) return;
- m = (1<<l)-n;
- if (v<m)
- vp8_encode_value(br, v, l-1);
- else
- {
- vp8_encode_value(br, m+((v-m)>>1), l-1);
- vp8_encode_value(br, (v-m)&1, 1);
- }
+void vp8_encode_uniform(BOOL_CODER *br, int v, int n) {
+ int l = get_unsigned_bits(n);
+ int m;
+ if (l == 0) return;
+ m = (1 << l) - n;
+ if (v < m)
+ vp8_encode_value(br, v, l - 1);
+ else {
+ vp8_encode_value(br, m + ((v - m) >> 1), l - 1);
+ vp8_encode_value(br, (v - m) & 1, 1);
+ }
}
-int vp8_count_uniform(int v, int n)
-{
- int l = get_unsigned_bits(n);
- int m;
- if (l == 0) return 0;
- m = (1<<l)-n;
- if (v<m)
- return l-1;
- else
- return l;
+int vp8_count_uniform(int v, int n) {
+ int l = get_unsigned_bits(n);
+ int m;
+ if (l == 0) return 0;
+ m = (1 << l) - n;
+ if (v < m)
+ return l - 1;
+ else
+ return l;
}
-void vp8_encode_term_subexp(BOOL_CODER *br, int word, int k, int num_syms)
-{
- int i = 0;
- int mk = 0;
- while (1) {
- int b = (i?k+i-1:k);
- int a = (1<<b);
- if (num_syms<=mk+3*a) {
- vp8_encode_uniform(br, word-mk, num_syms-mk);
- break;
- } else {
- int t = (word>=mk+a);
- vp8_encode_value(br, t, 1);
- if (t) {
- i=i+1;
- mk+=a;
- } else {
- vp8_encode_value(br, word-mk, b);
- break;
- }
- }
+void vp8_encode_term_subexp(BOOL_CODER *br, int word, int k, int num_syms) {
+ int i = 0;
+ int mk = 0;
+ while (1) {
+ int b = (i ? k + i - 1 : k);
+ int a = (1 << b);
+ if (num_syms <= mk + 3 * a) {
+ vp8_encode_uniform(br, word - mk, num_syms - mk);
+ break;
+ } else {
+ int t = (word >= mk + a);
+ vp8_encode_value(br, t, 1);
+ if (t) {
+ i = i + 1;
+ mk += a;
+ } else {
+ vp8_encode_value(br, word - mk, b);
+ break;
+ }
}
+ }
}
-int vp8_count_term_subexp(int word, int k, int num_syms)
-{
- int count = 0;
- int i = 0;
- int mk = 0;
- while (1) {
- int b = (i?k+i-1:k);
- int a = (1<<b);
- if (num_syms<=mk+3*a) {
- count += vp8_count_uniform(word-mk, num_syms-mk);
- break;
- } else {
- int t = (word>=mk+a);
- count++;
- if (t) {
- i=i+1;
- mk+=a;
- } else {
- count += b;
- break;
- }
- }
+int vp8_count_term_subexp(int word, int k, int num_syms) {
+ int count = 0;
+ int i = 0;
+ int mk = 0;
+ while (1) {
+ int b = (i ? k + i - 1 : k);
+ int a = (1 << b);
+ if (num_syms <= mk + 3 * a) {
+ count += vp8_count_uniform(word - mk, num_syms - mk);
+ break;
+ } else {
+ int t = (word >= mk + a);
+ count++;
+ if (t) {
+ i = i + 1;
+ mk += a;
+ } else {
+ count += b;
+ break;
+ }
}
- return count;
+ }
+ return count;
}
#endif
diff --git a/vp8/encoder/boolhuff.h b/vp8/encoder/boolhuff.h
index 1fb3cfadf..5b81b0670 100644
--- a/vp8/encoder/boolhuff.h
+++ b/vp8/encoder/boolhuff.h
@@ -21,18 +21,17 @@
#include "vpx_ports/mem.h"
-typedef struct
-{
- unsigned int lowvalue;
- unsigned int range;
- unsigned int value;
- int count;
- unsigned int pos;
- unsigned char *buffer;
-
- // Variables used to track bit costs without outputing to the bitstream
- unsigned int measure_cost;
- unsigned long bit_counter;
+typedef struct {
+ unsigned int lowvalue;
+ unsigned int range;
+ unsigned int value;
+ int count;
+ unsigned int pos;
+ unsigned char *buffer;
+
+ // Variables used to track bit costs without outputing to the bitstream
+ unsigned int measure_cost;
+ unsigned long bit_counter;
} BOOL_CODER;
extern void vp8_start_encode(BOOL_CODER *bc, unsigned char *buffer);
@@ -52,68 +51,63 @@ extern int recenter_nonneg(int v, int m);
DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
-static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability)
-{
- unsigned int split;
- int count = br->count;
- unsigned int range = br->range;
- unsigned int lowvalue = br->lowvalue;
- register unsigned int shift;
+static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
+ unsigned int split;
+ int count = br->count;
+ unsigned int range = br->range;
+ unsigned int lowvalue = br->lowvalue;
+ register unsigned int shift;
#ifdef ENTROPY_STATS
#if defined(SECTIONBITS_OUTPUT)
- if (bit)
- Sectionbits[active_section] += vp8_prob_cost[255-probability];
- else
- Sectionbits[active_section] += vp8_prob_cost[probability];
+ if (bit)
+ Sectionbits[active_section] += vp8_prob_cost[255 - probability];
+ else
+ Sectionbits[active_section] += vp8_prob_cost[probability];
#endif
#endif
- split = 1 + (((range - 1) * probability) >> 8);
+ split = 1 + (((range - 1) * probability) >> 8);
- range = split;
+ range = split;
- if (bit)
- {
- lowvalue += split;
- range = br->range - split;
- }
-
- shift = vp8_norm[range];
+ if (bit) {
+ lowvalue += split;
+ range = br->range - split;
+ }
- range <<= shift;
- count += shift;
+ shift = vp8_norm[range];
- if (count >= 0)
- {
- int offset = shift - count;
+ range <<= shift;
+ count += shift;
- if ((lowvalue << (offset - 1)) & 0x80000000)
- {
- int x = br->pos - 1;
+ if (count >= 0) {
+ int offset = shift - count;
- while (x >= 0 && br->buffer[x] == 0xff)
- {
- br->buffer[x] = (unsigned char)0;
- x--;
- }
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = br->pos - 1;
- br->buffer[x] += 1;
- }
+ while (x >= 0 && br->buffer[x] == 0xff) {
+ br->buffer[x] = (unsigned char)0;
+ x--;
+ }
- br->buffer[br->pos++] = (lowvalue >> (24 - offset));
- lowvalue <<= offset;
- shift = count;
- lowvalue &= 0xffffff;
- count -= 8 ;
+ br->buffer[x] += 1;
}
- lowvalue <<= shift;
- br->count = count;
- br->lowvalue = lowvalue;
- br->range = range;
+ br->buffer[br->pos++] = (lowvalue >> (24 - offset));
+ lowvalue <<= offset;
+ shift = count;
+ lowvalue &= 0xffffff;
+ count -= 8;
+ }
+
+ lowvalue <<= shift;
+ br->count = count;
+ br->lowvalue = lowvalue;
+ br->range = range;
}
#endif
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index 568d0e087..b98e3f25d 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -31,234 +31,230 @@ static const int xC7S1 = 3196;
#define IN_SHIFT (FINAL_SHIFT+1)
-void vp8_short_fdct8x8_c ( short * InputData, short * OutputData, int pitch)
-{
- int loop;
- int short_pitch = pitch>>1;
- int is07, is12, is34, is56;
- int is0734, is1256;
- int id07, id12, id34, id56;
- int irot_input_x, irot_input_y;
- int icommon_product1; // Re-used product (c4s4 * (s12 - s56))
- int icommon_product2; // Re-used product (c4s4 * (d12 + d56))
- int temp1, temp2; // intermediate variable for computation
-
- int InterData[64];
- int *ip = InterData;
- short *op = OutputData;
-
- for (loop = 0; loop < 8; loop++)
- {
- // Pre calculate some common sums and differences.
- is07 = (InputData[0] + InputData[7])<<IN_SHIFT;
- is12 = (InputData[1] + InputData[2])<<IN_SHIFT;
- is34 = (InputData[3] + InputData[4])<<IN_SHIFT;
- is56 = (InputData[5] + InputData[6])<<IN_SHIFT;
- id07 = (InputData[0] - InputData[7])<<IN_SHIFT;
- id12 = (InputData[1] - InputData[2])<<IN_SHIFT;
- id34 = (InputData[3] - InputData[4])<<IN_SHIFT;
- id56 = (InputData[5] - InputData[6])<<IN_SHIFT;
-
- is0734 = is07 + is34;
- is1256 = is12 + is56;
-
- // Pre-Calculate some common product terms.
- icommon_product1 = xC4S4*(is12 - is56);
- DOROUND(icommon_product1)
- icommon_product1>>=SHIFT_BITS;
-
- icommon_product2 = xC4S4*(id12 + id56);
- DOROUND(icommon_product2)
- icommon_product2>>=SHIFT_BITS;
-
-
- ip[0] = (xC4S4*(is0734 + is1256));
- DOROUND(ip[0]);
- ip[0] >>= SHIFT_BITS;
-
- ip[4] = (xC4S4*(is0734 - is1256));
- DOROUND(ip[4]);
- ip[4] >>= SHIFT_BITS;
-
- // Define inputs to rotation for outputs 2 and 6
- irot_input_x = id12 - id56;
- irot_input_y = is07 - is34;
-
- // Apply rotation for outputs 2 and 6.
- temp1=xC6S2*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC2S6*irot_input_y;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- ip[2] = temp1 + temp2;
-
- temp1=xC6S2*irot_input_y;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC2S6*irot_input_x ;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- ip[6] = temp1 -temp2 ;
-
- // Define inputs to rotation for outputs 1 and 7
- irot_input_x = icommon_product1 + id07;
- irot_input_y = -( id34 + icommon_product2 );
-
- // Apply rotation for outputs 1 and 7.
- temp1=xC1S7*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC7S1*irot_input_y;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- ip[1] = temp1 - temp2;
-
- temp1=xC7S1*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC1S7*irot_input_y ;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- ip[7] = temp1 + temp2 ;
-
- // Define inputs to rotation for outputs 3 and 5
- irot_input_x = id07 - icommon_product1;
- irot_input_y = id34 - icommon_product2;
-
- // Apply rotation for outputs 3 and 5.
- temp1=xC3S5*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC5S3*irot_input_y ;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- ip[3] = temp1 - temp2 ;
-
-
- temp1=xC5S3*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC3S5*irot_input_y;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- ip[5] = temp1 + temp2;
-
- // Increment data pointer for next row
- InputData += short_pitch ;
- ip += 8;
- }
+void vp8_short_fdct8x8_c(short *InputData, short *OutputData, int pitch) {
+ int loop;
+ int short_pitch = pitch >> 1;
+ int is07, is12, is34, is56;
+ int is0734, is1256;
+ int id07, id12, id34, id56;
+ int irot_input_x, irot_input_y;
+ int icommon_product1; // Re-used product (c4s4 * (s12 - s56))
+ int icommon_product2; // Re-used product (c4s4 * (d12 + d56))
+ int temp1, temp2; // intermediate variable for computation
+
+ int InterData[64];
+ int *ip = InterData;
+ short *op = OutputData;
+
+ for (loop = 0; loop < 8; loop++) {
+ // Pre calculate some common sums and differences.
+ is07 = (InputData[0] + InputData[7]) << IN_SHIFT;
+ is12 = (InputData[1] + InputData[2]) << IN_SHIFT;
+ is34 = (InputData[3] + InputData[4]) << IN_SHIFT;
+ is56 = (InputData[5] + InputData[6]) << IN_SHIFT;
+ id07 = (InputData[0] - InputData[7]) << IN_SHIFT;
+ id12 = (InputData[1] - InputData[2]) << IN_SHIFT;
+ id34 = (InputData[3] - InputData[4]) << IN_SHIFT;
+ id56 = (InputData[5] - InputData[6]) << IN_SHIFT;
+
+ is0734 = is07 + is34;
+ is1256 = is12 + is56;
+
+ // Pre-Calculate some common product terms.
+ icommon_product1 = xC4S4 * (is12 - is56);
+ DOROUND(icommon_product1)
+ icommon_product1 >>= SHIFT_BITS;
+
+ icommon_product2 = xC4S4 * (id12 + id56);
+ DOROUND(icommon_product2)
+ icommon_product2 >>= SHIFT_BITS;
+
+
+ ip[0] = (xC4S4 * (is0734 + is1256));
+ DOROUND(ip[0]);
+ ip[0] >>= SHIFT_BITS;
+
+ ip[4] = (xC4S4 * (is0734 - is1256));
+ DOROUND(ip[4]);
+ ip[4] >>= SHIFT_BITS;
+
+ // Define inputs to rotation for outputs 2 and 6
+ irot_input_x = id12 - id56;
+ irot_input_y = is07 - is34;
+
+ // Apply rotation for outputs 2 and 6.
+ temp1 = xC6S2 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC2S6 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ ip[2] = temp1 + temp2;
+
+ temp1 = xC6S2 * irot_input_y;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC2S6 * irot_input_x;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ ip[6] = temp1 - temp2;
+
+ // Define inputs to rotation for outputs 1 and 7
+ irot_input_x = icommon_product1 + id07;
+ irot_input_y = -(id34 + icommon_product2);
+
+ // Apply rotation for outputs 1 and 7.
+ temp1 = xC1S7 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC7S1 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ ip[1] = temp1 - temp2;
+
+ temp1 = xC7S1 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC1S7 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ ip[7] = temp1 + temp2;
+
+ // Define inputs to rotation for outputs 3 and 5
+ irot_input_x = id07 - icommon_product1;
+ irot_input_y = id34 - icommon_product2;
+
+ // Apply rotation for outputs 3 and 5.
+ temp1 = xC3S5 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC5S3 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ ip[3] = temp1 - temp2;
+
+
+ temp1 = xC5S3 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC3S5 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ ip[5] = temp1 + temp2;
+
+ // Increment data pointer for next row
+ InputData += short_pitch;
+ ip += 8;
+ }
- // Performed DCT on rows, now transform the columns
- ip = InterData;
- for (loop = 0; loop < 8; loop++)
- {
- // Pre calculate some common sums and differences.
- is07 = ip[0 * 8] + ip[7 * 8];
- is12 = ip[1 * 8] + ip[2 * 8];
- is34 = ip[3 * 8] + ip[4 * 8];
- is56 = ip[5 * 8] + ip[6 * 8];
-
- id07 = ip[0 * 8] - ip[7 * 8];
- id12 = ip[1 * 8] - ip[2 * 8];
- id34 = ip[3 * 8] - ip[4 * 8];
- id56 = ip[5 * 8] - ip[6 * 8];
-
- is0734 = is07 + is34;
- is1256 = is12 + is56;
-
- // Pre-Calculate some common product terms
- icommon_product1 = xC4S4*(is12 - is56) ;
- icommon_product2 = xC4S4*(id12 + id56) ;
- DOROUND(icommon_product1)
- DOROUND(icommon_product2)
- icommon_product1>>=SHIFT_BITS;
- icommon_product2>>=SHIFT_BITS;
-
-
- temp1 = xC4S4*(is0734 + is1256) ;
- temp2 = xC4S4*(is0734 - is1256) ;
- DOROUND(temp1);
- DOROUND(temp2);
- temp1>>=SHIFT_BITS;
-
- temp2>>=SHIFT_BITS;
- op[0*8] = (temp1 + FINAL_ROUNDING)>>FINAL_SHIFT;
- op[4*8] = (temp2 + FINAL_ROUNDING)>>FINAL_SHIFT;
-
- // Define inputs to rotation for outputs 2 and 6
- irot_input_x = id12 - id56;
- irot_input_y = is07 - is34;
-
- // Apply rotation for outputs 2 and 6.
- temp1=xC6S2*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC2S6*irot_input_y;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- op[2*8] = (temp1 + temp2 + FINAL_ROUNDING)>>FINAL_SHIFT;
-
- temp1=xC6S2*irot_input_y;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC2S6*irot_input_x ;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- op[6*8] = (temp1 -temp2 + FINAL_ROUNDING)>>FINAL_SHIFT ;
-
- // Define inputs to rotation for outputs 1 and 7
- irot_input_x = icommon_product1 + id07;
- irot_input_y = -( id34 + icommon_product2 );
-
- // Apply rotation for outputs 1 and 7.
- temp1=xC1S7*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC7S1*irot_input_y;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- op[1*8] = (temp1 - temp2 + FINAL_ROUNDING)>>FINAL_SHIFT;
-
- temp1=xC7S1*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC1S7*irot_input_y ;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- op[7*8] = (temp1 + temp2 + FINAL_ROUNDING)>>FINAL_SHIFT;
-
- // Define inputs to rotation for outputs 3 and 5
- irot_input_x = id07 - icommon_product1;
- irot_input_y = id34 - icommon_product2;
-
- // Apply rotation for outputs 3 and 5.
- temp1=xC3S5*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC5S3*irot_input_y ;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- op[3*8] = (temp1 - temp2 + FINAL_ROUNDING)>>FINAL_SHIFT ;
-
-
- temp1=xC5S3*irot_input_x;
- DOROUND(temp1);
- temp1>>=SHIFT_BITS;
- temp2=xC3S5*irot_input_y;
- DOROUND(temp2);
- temp2>>=SHIFT_BITS;
- op[5*8] = (temp1 + temp2 + FINAL_ROUNDING)>>FINAL_SHIFT;
-
- // Increment data pointer for next column.
- ip ++;
- op ++;
- }
+ // Performed DCT on rows, now transform the columns
+ ip = InterData;
+ for (loop = 0; loop < 8; loop++) {
+ // Pre calculate some common sums and differences.
+ is07 = ip[0 * 8] + ip[7 * 8];
+ is12 = ip[1 * 8] + ip[2 * 8];
+ is34 = ip[3 * 8] + ip[4 * 8];
+ is56 = ip[5 * 8] + ip[6 * 8];
+
+ id07 = ip[0 * 8] - ip[7 * 8];
+ id12 = ip[1 * 8] - ip[2 * 8];
+ id34 = ip[3 * 8] - ip[4 * 8];
+ id56 = ip[5 * 8] - ip[6 * 8];
+
+ is0734 = is07 + is34;
+ is1256 = is12 + is56;
+
+ // Pre-Calculate some common product terms
+ icommon_product1 = xC4S4 * (is12 - is56);
+ icommon_product2 = xC4S4 * (id12 + id56);
+ DOROUND(icommon_product1)
+ DOROUND(icommon_product2)
+ icommon_product1 >>= SHIFT_BITS;
+ icommon_product2 >>= SHIFT_BITS;
+
+
+ temp1 = xC4S4 * (is0734 + is1256);
+ temp2 = xC4S4 * (is0734 - is1256);
+ DOROUND(temp1);
+ DOROUND(temp2);
+ temp1 >>= SHIFT_BITS;
+
+ temp2 >>= SHIFT_BITS;
+ op[0 * 8] = (temp1 + FINAL_ROUNDING) >> FINAL_SHIFT;
+ op[4 * 8] = (temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+ // Define inputs to rotation for outputs 2 and 6
+ irot_input_x = id12 - id56;
+ irot_input_y = is07 - is34;
+
+ // Apply rotation for outputs 2 and 6.
+ temp1 = xC6S2 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC2S6 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ op[2 * 8] = (temp1 + temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+ temp1 = xC6S2 * irot_input_y;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC2S6 * irot_input_x;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ op[6 * 8] = (temp1 - temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+ // Define inputs to rotation for outputs 1 and 7
+ irot_input_x = icommon_product1 + id07;
+ irot_input_y = -(id34 + icommon_product2);
+
+ // Apply rotation for outputs 1 and 7.
+ temp1 = xC1S7 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC7S1 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ op[1 * 8] = (temp1 - temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+ temp1 = xC7S1 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC1S7 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ op[7 * 8] = (temp1 + temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+ // Define inputs to rotation for outputs 3 and 5
+ irot_input_x = id07 - icommon_product1;
+ irot_input_y = id34 - icommon_product2;
+
+ // Apply rotation for outputs 3 and 5.
+ temp1 = xC3S5 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC5S3 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ op[3 * 8] = (temp1 - temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+
+ temp1 = xC5S3 * irot_input_x;
+ DOROUND(temp1);
+ temp1 >>= SHIFT_BITS;
+ temp2 = xC3S5 * irot_input_y;
+ DOROUND(temp2);
+ temp2 >>= SHIFT_BITS;
+ op[5 * 8] = (temp1 + temp2 + FINAL_ROUNDING) >> FINAL_SHIFT;
+
+ // Increment data pointer for next column.
+ ip++;
+ op++;
+ }
}
#else
-void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
-{
+void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch) {
int j1, i, j, k;
float b[8];
float b1[8];
@@ -272,15 +268,12 @@ void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
float f6 = (float) .1913417;
float f7 = (float) .0975452;
pitch = pitch / 2;
- for (i = 0, k = 0; i < 8; i++, k += pitch)
- {
- for (j = 0; j < 8; j++)
- {
- b[j] = (float)( block[k + j]<<3);
+ for (i = 0, k = 0; i < 8; i++, k += pitch) {
+ for (j = 0; j < 8; j++) {
+ b[j] = (float)(block[k + j] << 3);
}
/* Horizontal transform */
- for (j = 0; j < 4; j++)
- {
+ for (j = 0; j < 4; j++) {
j1 = 7 - j;
b1[j] = b[j] + b[j1];
b1[j1] = b[j] - b[j1];
@@ -307,10 +300,8 @@ void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
d[i][3] = b1[6] * f3 - b1[5] * f5;
}
/* Vertical transform */
- for (i = 0; i < 8; i++)
- {
- for (j = 0; j < 4; j++)
- {
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 4; j++) {
j1 = 7 - j;
b1[j] = d[j][i] + d[j1][i];
b1[j1] = d[j][i] - d[j1][i];
@@ -336,11 +327,9 @@ void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
d[7][i] = b1[7] * f7 - b1[4] * f1;
d[3][i] = b1[6] * f3 - b1[5] * f5;
}
- for (i = 0; i < 8; i++)
- {
- for (j = 0; j < 8; j++)
- {
- *(coefs + j + i * 8) = (short) floor(d[i][j] +0.5);
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++) {
+ *(coefs + j + i * 8) = (short) floor(d[i][j] + 0.5);
}
}
return;
@@ -348,207 +337,191 @@ void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
#endif
-void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) //pitch = 8
-{
- /* [1 1 ; 1 -1] orthogonal transform */
- /* use position: 0,1, 4, 8 */
- int i;
- short *ip1 = input;
- short *op1 = output;
- for (i = 0; i < 16; i++)
- {
- op1[i] = 0;
- }
-
- op1[0]=(ip1[0] + ip1[1] + ip1[4] + ip1[8] + 1)>>1;
- op1[1]=(ip1[0] - ip1[1] + ip1[4] - ip1[8])>>1;
- op1[4]=(ip1[0] + ip1[1] - ip1[4] - ip1[8])>>1;
- op1[8]=(ip1[0] - ip1[1] - ip1[4] + ip1[8])>>1;
+void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8
+ /* [1 1; 1 -1] orthogonal transform */
+ /* use position: 0,1, 4, 8 */
+ int i;
+ short *ip1 = input;
+ short *op1 = output;
+ for (i = 0; i < 16; i++) {
+ op1[i] = 0;
+ }
+
+ op1[0] = (ip1[0] + ip1[1] + ip1[4] + ip1[8] + 1) >> 1;
+ op1[1] = (ip1[0] - ip1[1] + ip1[4] - ip1[8]) >> 1;
+ op1[4] = (ip1[0] + ip1[1] - ip1[4] - ip1[8]) >> 1;
+ op1[8] = (ip1[0] - ip1[1] - ip1[4] + ip1[8]) >> 1;
}
-void vp8_short_fdct4x4_c(short *input, short *output, int pitch)
-{
- int i;
- int a1, b1, c1, d1;
- short *ip = input;
- short *op = output;
+void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1;
+ short *ip = input;
+ short *op = output;
- for (i = 0; i < 4; i++)
- {
- a1 = ((ip[0] + ip[3])<<5);
- b1 = ((ip[1] + ip[2])<<5);
- c1 = ((ip[1] - ip[2])<<5);
- d1 = ((ip[0] - ip[3])<<5);
+ for (i = 0; i < 4; i++) {
+ a1 = ((ip[0] + ip[3]) << 5);
+ b1 = ((ip[1] + ip[2]) << 5);
+ c1 = ((ip[1] - ip[2]) << 5);
+ d1 = ((ip[0] - ip[3]) << 5);
- op[0] = a1 + b1;
- op[2] = a1 - b1;
+ op[0] = a1 + b1;
+ op[2] = a1 - b1;
- op[1] = (c1 * 2217 + d1 * 5352 + 14500)>>12;
- op[3] = (d1 * 2217 - c1 * 5352 + 7500)>>12;
+ op[1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12;
+ op[3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12;
- ip += pitch / 2;
- op += 4;
+ ip += pitch / 2;
+ op += 4;
- }
- ip = output;
- op = output;
- for (i = 0; i < 4; i++)
- {
- a1 = ip[0] + ip[12];
- b1 = ip[4] + ip[8];
- c1 = ip[4] - ip[8];
- d1 = ip[0] - ip[12];
-
- op[0] = ( a1 + b1 + 7)>>4;
- op[8] = ( a1 - b1 + 7)>>4;
-
- op[4] =((c1 * 2217 + d1 * 5352 + 12000)>>16) + (d1!=0);
- op[12] = (d1 * 2217 - c1 * 5352 + 51000)>>16;
-
- ip++;
- op++;
- }
+ }
+ ip = output;
+ op = output;
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0] + ip[12];
+ b1 = ip[4] + ip[8];
+ c1 = ip[4] - ip[8];
+ d1 = ip[0] - ip[12];
+
+ op[0] = (a1 + b1 + 7) >> 4;
+ op[8] = (a1 - b1 + 7) >> 4;
+
+ op[4] = ((c1 * 2217 + d1 * 5352 + 12000) >> 16) + (d1 != 0);
+ op[12] = (d1 * 2217 - c1 * 5352 + 51000) >> 16;
+
+ ip++;
+ op++;
+ }
}
-void vp8_short_fdct8x4_c(short *input, short *output, int pitch)
-{
- vp8_short_fdct4x4_c(input, output, pitch);
- vp8_short_fdct4x4_c(input + 4, output + 16, pitch);
+void vp8_short_fdct8x4_c(short *input, short *output, int pitch) {
+ vp8_short_fdct4x4_c(input, output, pitch);
+ vp8_short_fdct4x4_c(input + 4, output + 16, pitch);
}
-void vp8_short_walsh4x4_c(short *input, short *output, int pitch)
-{
- int i;
- int a1, b1, c1, d1;
- short *ip = input;
- short *op = output;
- int pitch_short = pitch >>1;
-
- for (i = 0; i < 4; i++)
- {
- a1 = ip[0 * pitch_short] + ip[3 * pitch_short];
- b1 = ip[1 * pitch_short] + ip[2 * pitch_short];
- c1 = ip[1 * pitch_short] - ip[2 * pitch_short];
- d1 = ip[0 * pitch_short] - ip[3 * pitch_short];
-
- op[0] = (a1 + b1 + 1)>>1;
- op[4] = (c1 + d1)>>1;
- op[8] = (a1 - b1)>>1;
- op[12]= (d1 - c1)>>1;
-
- ip++;
- op++;
- }
- ip = output;
- op = output;
-
- for (i = 0; i < 4; i++)
- {
- a1 = ip[0] + ip[3];
- b1 = ip[1] + ip[2];
- c1 = ip[1] - ip[2];
- d1 = ip[0] - ip[3];
-
- op[0] = (a1 + b1 + 1)>>1;
- op[1] = (c1 + d1)>>1;
- op[2] = (a1 - b1)>>1;
- op[3] = (d1 - c1)>>1;
-
- ip += 4;
- op += 4;
- }
+void vp8_short_walsh4x4_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1;
+ short *ip = input;
+ short *op = output;
+ int pitch_short = pitch >> 1;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0 * pitch_short] + ip[3 * pitch_short];
+ b1 = ip[1 * pitch_short] + ip[2 * pitch_short];
+ c1 = ip[1 * pitch_short] - ip[2 * pitch_short];
+ d1 = ip[0 * pitch_short] - ip[3 * pitch_short];
+
+ op[0] = (a1 + b1 + 1) >> 1;
+ op[4] = (c1 + d1) >> 1;
+ op[8] = (a1 - b1) >> 1;
+ op[12] = (d1 - c1) >> 1;
+
+ ip++;
+ op++;
+ }
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0] + ip[3];
+ b1 = ip[1] + ip[2];
+ c1 = ip[1] - ip[2];
+ d1 = ip[0] - ip[3];
+
+ op[0] = (a1 + b1 + 1) >> 1;
+ op[1] = (c1 + d1) >> 1;
+ op[2] = (a1 - b1) >> 1;
+ op[3] = (d1 - c1) >> 1;
+
+ ip += 4;
+ op += 4;
+ }
}
#if CONFIG_LOSSLESS
-void vp8_short_walsh4x4_lossless_c(short *input, short *output, int pitch)
-{
- int i;
- int a1, b1, c1, d1;
- short *ip = input;
- short *op = output;
- int pitch_short = pitch >>1;
-
- for (i = 0; i < 4; i++)
- {
- a1 = (ip[0 * pitch_short] + ip[3 * pitch_short])>>Y2_WHT_UPSCALE_FACTOR;
- b1 = (ip[1 * pitch_short] + ip[2 * pitch_short])>>Y2_WHT_UPSCALE_FACTOR;
- c1 = (ip[1 * pitch_short] - ip[2 * pitch_short])>>Y2_WHT_UPSCALE_FACTOR;
- d1 = (ip[0 * pitch_short] - ip[3 * pitch_short])>>Y2_WHT_UPSCALE_FACTOR;
-
- op[0] = (a1 + b1 + 1)>>1;
- op[4] = (c1 + d1)>>1;
- op[8] = (a1 - b1)>>1;
- op[12]= (d1 - c1)>>1;
-
- ip++;
- op++;
- }
- ip = output;
- op = output;
-
- for (i = 0; i < 4; i++)
- {
- a1 = ip[0] + ip[3];
- b1 = ip[1] + ip[2];
- c1 = ip[1] - ip[2];
- d1 = ip[0] - ip[3];
-
- op[0] = ((a1 + b1 + 1)>>1)<<Y2_WHT_UPSCALE_FACTOR;
- op[1] = ((c1 + d1)>>1)<<Y2_WHT_UPSCALE_FACTOR;
- op[2] = ((a1 - b1)>>1)<<Y2_WHT_UPSCALE_FACTOR;
- op[3] = ((d1 - c1)>>1)<<Y2_WHT_UPSCALE_FACTOR;
-
- ip += 4;
- op += 4;
- }
+void vp8_short_walsh4x4_lossless_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1;
+ short *ip = input;
+ short *op = output;
+ int pitch_short = pitch >> 1;
+
+ for (i = 0; i < 4; i++) {
+ a1 = (ip[0 * pitch_short] + ip[3 * pitch_short]) >> Y2_WHT_UPSCALE_FACTOR;
+ b1 = (ip[1 * pitch_short] + ip[2 * pitch_short]) >> Y2_WHT_UPSCALE_FACTOR;
+ c1 = (ip[1 * pitch_short] - ip[2 * pitch_short]) >> Y2_WHT_UPSCALE_FACTOR;
+ d1 = (ip[0 * pitch_short] - ip[3 * pitch_short]) >> Y2_WHT_UPSCALE_FACTOR;
+
+ op[0] = (a1 + b1 + 1) >> 1;
+ op[4] = (c1 + d1) >> 1;
+ op[8] = (a1 - b1) >> 1;
+ op[12] = (d1 - c1) >> 1;
+
+ ip++;
+ op++;
+ }
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0] + ip[3];
+ b1 = ip[1] + ip[2];
+ c1 = ip[1] - ip[2];
+ d1 = ip[0] - ip[3];
+
+ op[0] = ((a1 + b1 + 1) >> 1) << Y2_WHT_UPSCALE_FACTOR;
+ op[1] = ((c1 + d1) >> 1) << Y2_WHT_UPSCALE_FACTOR;
+ op[2] = ((a1 - b1) >> 1) << Y2_WHT_UPSCALE_FACTOR;
+ op[3] = ((d1 - c1) >> 1) << Y2_WHT_UPSCALE_FACTOR;
+
+ ip += 4;
+ op += 4;
+ }
}
-void vp8_short_walsh4x4_x8_c(short *input, short *output, int pitch)
-{
- int i;
- int a1, b1, c1, d1;
- short *ip = input;
- short *op = output;
- int pitch_short = pitch >>1;
-
- for (i = 0; i < 4; i++)
- {
- a1 = ip[0 * pitch_short] + ip[3 * pitch_short];
- b1 = ip[1 * pitch_short] + ip[2 * pitch_short];
- c1 = ip[1 * pitch_short] - ip[2 * pitch_short];
- d1 = ip[0 * pitch_short] - ip[3 * pitch_short];
-
- op[0] = (a1 + b1 +1)>>1;
- op[4] = (c1 + d1)>>1;
- op[8] = (a1 - b1)>>1;
- op[12]= (d1 - c1)>>1;
-
- ip++;
- op++;
- }
- ip = output;
- op = output;
-
- for (i = 0; i < 4; i++)
- {
- a1 = ip[0] + ip[3];
- b1 = ip[1] + ip[2];
- c1 = ip[1] - ip[2];
- d1 = ip[0] - ip[3];
-
- op[0] = ((a1 + b1 +1)>>1)<<WHT_UPSCALE_FACTOR;
- op[1] = ((c1 + d1)>>1)<<WHT_UPSCALE_FACTOR;
- op[2] = ((a1 - b1)>>1)<<WHT_UPSCALE_FACTOR;
- op[3] = ((d1 - c1)>>1)<<WHT_UPSCALE_FACTOR;
-
- ip += 4;
- op += 4;
- }
+void vp8_short_walsh4x4_x8_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1;
+ short *ip = input;
+ short *op = output;
+ int pitch_short = pitch >> 1;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0 * pitch_short] + ip[3 * pitch_short];
+ b1 = ip[1 * pitch_short] + ip[2 * pitch_short];
+ c1 = ip[1 * pitch_short] - ip[2 * pitch_short];
+ d1 = ip[0 * pitch_short] - ip[3 * pitch_short];
+
+ op[0] = (a1 + b1 + 1) >> 1;
+ op[4] = (c1 + d1) >> 1;
+ op[8] = (a1 - b1) >> 1;
+ op[12] = (d1 - c1) >> 1;
+
+ ip++;
+ op++;
+ }
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0] + ip[3];
+ b1 = ip[1] + ip[2];
+ c1 = ip[1] - ip[2];
+ d1 = ip[0] - ip[3];
+
+ op[0] = ((a1 + b1 + 1) >> 1) << WHT_UPSCALE_FACTOR;
+ op[1] = ((c1 + d1) >> 1) << WHT_UPSCALE_FACTOR;
+ op[2] = ((a1 - b1) >> 1) << WHT_UPSCALE_FACTOR;
+ op[3] = ((d1 - c1) >> 1) << WHT_UPSCALE_FACTOR;
+
+ ip += 4;
+ op += 4;
+ }
}
-void vp8_short_walsh8x4_x8_c(short *input, short *output, int pitch)
-{
+void vp8_short_walsh8x4_x8_c(short *input, short *output, int pitch) {
vp8_short_walsh4x4_x8_c(input, output, pitch);
vp8_short_walsh4x4_x8_c(input + 4, output + 16, pitch);
}
diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h
index 2a059a0b1..6d2b736b3 100644
--- a/vp8/encoder/dct.h
+++ b/vp8/encoder/dct.h
@@ -66,15 +66,14 @@ extern prototype_fdct(vp8_short_walsh4x4_lossless_c);
#endif
typedef prototype_fdct(*vp8_fdct_fn_t);
-typedef struct
-{
- vp8_fdct_fn_t short8x8;
- vp8_fdct_fn_t haar_short2x2;
- vp8_fdct_fn_t short4x4;
- vp8_fdct_fn_t short8x4;
- vp8_fdct_fn_t fast4x4;
- vp8_fdct_fn_t fast8x4;
- vp8_fdct_fn_t walsh_short4x4;
+typedef struct {
+ vp8_fdct_fn_t short8x8;
+ vp8_fdct_fn_t haar_short2x2;
+ vp8_fdct_fn_t short4x4;
+ vp8_fdct_fn_t short8x4;
+ vp8_fdct_fn_t fast4x4;
+ vp8_fdct_fn_t fast8x4;
+ vp8_fdct_fn_t walsh_short4x4;
} vp8_fdct_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index be2d1f4e3..ee0ed2921 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -32,7 +32,7 @@
#include "vpx_ports/vpx_timer.h"
#include "vp8/common/pred_common.h"
-//#define DBG_PRNT_SEGMAP 1
+// #define DBG_PRNT_SEGMAP 1
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
@@ -43,11 +43,11 @@
#endif
#ifdef ENC_DEBUG
-int enc_debug=0;
+int enc_debug = 0;
int mb_row_debug, mb_col_debug;
#endif
-extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
+extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t);
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi);
@@ -66,7 +66,7 @@ void vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int output_enabled);
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int output_enabled);
-static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
+static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
@@ -94,128 +94,116 @@ unsigned int b_modes[B_MODE_COUNT];
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
-static const unsigned char VP8_VAR_OFFS[16]=
-{
- 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
+static const unsigned char VP8_VAR_OFFS[16] = {
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
// Original activity measure from Tim T's code.
-static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
-{
- unsigned int act;
- unsigned int sse;
- /* TODO: This could also be done over smaller areas (8x8), but that would
- * require extensive changes elsewhere, as lambda is assumed to be fixed
- * over an entire MB in most of the code.
- * Another option is to compute four 8x8 variances, and pick a single
- * lambda using a non-linear combination (e.g., the smallest, or second
- * smallest, etc.).
- */
- act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
- x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
- act = act<<4;
-
- /* If the region is flat, lower the activity some more. */
- if (act < 8<<12)
- act = act < 5<<12 ? act : 5<<12;
-
- return act;
+static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) {
+ unsigned int act;
+ unsigned int sse;
+ /* TODO: This could also be done over smaller areas (8x8), but that would
+ * require extensive changes elsewhere, as lambda is assumed to be fixed
+ * over an entire MB in most of the code.
+ * Another option is to compute four 8x8 variances, and pick a single
+ * lambda using a non-linear combination (e.g., the smallest, or second
+ * smallest, etc.).
+ */
+ act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
+ x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
+ act = act << 4;
+
+ /* If the region is flat, lower the activity some more. */
+ if (act < 8 << 12)
+ act = act < 5 << 12 ? act : 5 << 12;
+
+ return act;
}
// Stub for alternative experimental activity measures.
-static unsigned int alt_activity_measure( VP8_COMP *cpi,
- MACROBLOCK *x, int use_dc_pred )
-{
- return vp8_encode_intra(cpi,x, use_dc_pred);
+static unsigned int alt_activity_measure(VP8_COMP *cpi,
+ MACROBLOCK *x, int use_dc_pred) {
+ return vp8_encode_intra(cpi, x, use_dc_pred);
}
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
#define ALT_ACT_MEASURE 1
-static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
- int mb_row, int mb_col)
-{
- unsigned int mb_activity;
+static unsigned int mb_activity_measure(VP8_COMP *cpi, MACROBLOCK *x,
+ int mb_row, int mb_col) {
+ unsigned int mb_activity;
- if ( ALT_ACT_MEASURE )
- {
- int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+ if (ALT_ACT_MEASURE) {
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
- // Or use and alternative.
- mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
- }
- else
- {
- // Original activity measure from Tim T's code.
- mb_activity = tt_activity_measure( cpi, x );
- }
+ // Or use and alternative.
+ mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
+ } else {
+ // Original activity measure from Tim T's code.
+ mb_activity = tt_activity_measure(cpi, x);
+ }
- if ( mb_activity < VP8_ACTIVITY_AVG_MIN )
- mb_activity = VP8_ACTIVITY_AVG_MIN;
+ if (mb_activity < VP8_ACTIVITY_AVG_MIN)
+ mb_activity = VP8_ACTIVITY_AVG_MIN;
- return mb_activity;
+ return mb_activity;
}
// Calculate an "average" mb activity value for the frame
#define ACT_MEDIAN 0
-static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
-{
+static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
#if ACT_MEDIAN
- // Find median: Simple n^2 algorithm for experimentation
- {
- unsigned int median;
- unsigned int i,j;
- unsigned int * sortlist;
- unsigned int tmp;
-
- // Create a list to sort to
- CHECK_MEM_ERROR(sortlist,
- vpx_calloc(sizeof(unsigned int),
- cpi->common.MBs));
-
- // Copy map to sort list
- vpx_memcpy( sortlist, cpi->mb_activity_map,
- sizeof(unsigned int) * cpi->common.MBs );
-
-
- // Ripple each value down to its correct position
- for ( i = 1; i < cpi->common.MBs; i ++ )
- {
- for ( j = i; j > 0; j -- )
- {
- if ( sortlist[j] < sortlist[j-1] )
- {
- // Swap values
- tmp = sortlist[j-1];
- sortlist[j-1] = sortlist[j];
- sortlist[j] = tmp;
- }
- else
- break;
- }
- }
+ // Find median: Simple n^2 algorithm for experimentation
+ {
+ unsigned int median;
+ unsigned int i, j;
+ unsigned int *sortlist;
+ unsigned int tmp;
+
+ // Create a list to sort to
+ CHECK_MEM_ERROR(sortlist,
+ vpx_calloc(sizeof(unsigned int),
+ cpi->common.MBs));
+
+ // Copy map to sort list
+ vpx_memcpy(sortlist, cpi->mb_activity_map,
+ sizeof(unsigned int) * cpi->common.MBs);
+
+
+ // Ripple each value down to its correct position
+ for (i = 1; i < cpi->common.MBs; i ++) {
+ for (j = i; j > 0; j --) {
+ if (sortlist[j] < sortlist[j - 1]) {
+ // Swap values
+ tmp = sortlist[j - 1];
+ sortlist[j - 1] = sortlist[j];
+ sortlist[j] = tmp;
+ } else
+ break;
+ }
+ }
- // Even number MBs so estimate median as mean of two either side.
- median = ( 1 + sortlist[cpi->common.MBs >> 1] +
- sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
+ // Even number MBs so estimate median as mean of two either side.
+ median = (1 + sortlist[cpi->common.MBs >> 1] +
+ sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
- cpi->activity_avg = median;
+ cpi->activity_avg = median;
- vpx_free(sortlist);
- }
+ vpx_free(sortlist);
+ }
#else
- // Simple mean for now
- cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
+ // Simple mean for now
+ cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
#endif
- if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
- cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
+ if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
+ cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
- // Experimental code: return fixed value normalized for several clips
- if ( ALT_ACT_MEASURE )
- cpi->activity_avg = 100000;
+ // Experimental code: return fixed value normalized for several clips
+ if (ALT_ACT_MEASURE)
+ cpi->activity_avg = 100000;
}
#define USE_ACT_INDEX 0
@@ -223,56 +211,53 @@ static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
#if USE_ACT_INDEX
// Calculate and activity index for each mb
-static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
-{
- VP8_COMMON *const cm = & cpi->common;
- int mb_row, mb_col;
+static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
+ VP8_COMMON *const cm = & cpi->common;
+ int mb_row, mb_col;
- int64_t act;
- int64_t a;
- int64_t b;
+ int64_t act;
+ int64_t a;
+ int64_t b;
#if OUTPUT_NORM_ACT_STATS
- FILE *f = fopen("norm_act.stt", "a");
- fprintf(f, "\n%12d\n", cpi->activity_avg );
+ FILE *f = fopen("norm_act.stt", "a");
+ fprintf(f, "\n%12d\n", cpi->activity_avg);
#endif
- // Reset pointers to start of activity map
- x->mb_activity_ptr = cpi->mb_activity_map;
+ // Reset pointers to start of activity map
+ x->mb_activity_ptr = cpi->mb_activity_map;
- // Calculate normalized mb activity number.
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
- {
- // for each macroblock col in image
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
- // Read activity from the map
- act = *(x->mb_activity_ptr);
+ // Calculate normalized mb activity number.
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ // Read activity from the map
+ act = *(x->mb_activity_ptr);
- // Calculate a normalized activity number
- a = act + 4*cpi->activity_avg;
- b = 4*act + cpi->activity_avg;
+ // Calculate a normalized activity number
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
- if ( b >= a )
- *(x->activity_ptr) = (int)((b + (a>>1))/a) - 1;
- else
- *(x->activity_ptr) = 1 - (int)((a + (b>>1))/b);
+ if (b >= a)
+ *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
+ else
+ *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
#if OUTPUT_NORM_ACT_STATS
- fprintf(f, " %6d", *(x->mb_activity_ptr));
+ fprintf(f, " %6d", *(x->mb_activity_ptr));
#endif
- // Increment activity map pointers
- x->mb_activity_ptr++;
- }
+ // Increment activity map pointers
+ x->mb_activity_ptr++;
+ }
#if OUTPUT_NORM_ACT_STATS
- fprintf(f, "\n");
+ fprintf(f, "\n");
#endif
- }
+ }
#if OUTPUT_NORM_ACT_STATS
- fclose(f);
+ fclose(f);
#endif
}
@@ -280,1384 +265,1285 @@ static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
-static void build_activity_map( VP8_COMP *cpi )
-{
- MACROBLOCK *const x = & cpi->mb;
- MACROBLOCKD *xd = &x->e_mbd;
- VP8_COMMON *const cm = & cpi->common;
+static void build_activity_map(VP8_COMP *cpi) {
+ MACROBLOCK *const x = & cpi->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+ VP8_COMMON *const cm = & cpi->common;
#if ALT_ACT_MEASURE
- YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
- int recon_yoffset;
- int recon_y_stride = new_yv12->y_stride;
+ YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ int recon_yoffset;
+ int recon_y_stride = new_yv12->y_stride;
#endif
- int mb_row, mb_col;
- unsigned int mb_activity;
- int64_t activity_sum = 0;
+ int mb_row, mb_col;
+ unsigned int mb_activity;
+ int64_t activity_sum = 0;
- // for each macroblock row in image
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
- {
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
#if ALT_ACT_MEASURE
- // reset above block coeffs
- xd->up_available = (mb_row != 0);
- recon_yoffset = (mb_row * recon_y_stride * 16);
+ // reset above block coeffs
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
#endif
- // for each macroblock col in image
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
#if ALT_ACT_MEASURE
- xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
- xd->left_available = (mb_col != 0);
- recon_yoffset += 16;
+ xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
+ xd->left_available = (mb_col != 0);
+ recon_yoffset += 16;
#endif
- //Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ // Copy current mb to a buffer
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
+ x->src.y_stride,
+ x->thismb, 16);
- // measure activity
- mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
+ // measure activity
+ mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
- // Keep frame sum
- activity_sum += mb_activity;
+ // Keep frame sum
+ activity_sum += mb_activity;
- // Store MB level activity details.
- *x->mb_activity_ptr = mb_activity;
+ // Store MB level activity details.
+ *x->mb_activity_ptr = mb_activity;
- // Increment activity map pointer
- x->mb_activity_ptr++;
+ // Increment activity map pointer
+ x->mb_activity_ptr++;
- // adjust to the next column of source macroblocks
- x->src.y_buffer += 16;
- }
+ // adjust to the next column of source macroblocks
+ x->src.y_buffer += 16;
+ }
- // adjust to the next row of mbs
- x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
+ // adjust to the next row of mbs
+ x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
#if ALT_ACT_MEASURE
- //extend the recon for intra prediction
- vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
- xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+ // extend the recon for intra prediction
+ vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
+ xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
#endif
- }
+ }
- // Calculate an "average" MB activity
- calc_av_activity(cpi, activity_sum);
+ // Calculate an "average" MB activity
+ calc_av_activity(cpi, activity_sum);
#if USE_ACT_INDEX
- // Calculate an activity index number of each mb
- calc_activity_index( cpi, x );
+ // Calculate an activity index number of each mb
+ calc_activity_index(cpi, x);
#endif
}
// Macroblock activity masking
-void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
-{
+void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
#if USE_ACT_INDEX
- x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
- x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
- x->errorperbit += (x->errorperbit==0);
+ x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
#else
- int64_t a;
- int64_t b;
- int64_t act = *(x->mb_activity_ptr);
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
- // Apply the masking to the RD multiplier.
- a = act + (2*cpi->activity_avg);
- b = (2*act) + cpi->activity_avg;
+ // Apply the masking to the RD multiplier.
+ a = act + (2 * cpi->activity_avg);
+ b = (2 * act) + cpi->activity_avg;
- x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
- x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
- x->errorperbit += (x->errorperbit==0);
+ x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
#endif
- // Activity based Zbin adjustment
- adjust_act_zbin(cpi, x);
+ // Activity based Zbin adjustment
+ adjust_act_zbin(cpi, x);
}
-static void update_state (VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx)
-{
- int i;
- MACROBLOCKD *xd = &x->e_mbd;
- MODE_INFO *mi = &ctx->mic;
- int mb_mode = mi->mbmi.mode;
- int mb_mode_index = ctx->best_mode_index;
+static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
+ int i;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MODE_INFO *mi = &ctx->mic;
+ int mb_mode = mi->mbmi.mode;
+ int mb_mode_index = ctx->best_mode_index;
#if CONFIG_DEBUG
- assert (mb_mode < MB_MODE_COUNT);
- assert (mb_mode_index < MAX_MODES);
- assert (mi->mbmi.ref_frame < MAX_REF_FRAMES);
+ assert(mb_mode < MB_MODE_COUNT);
+ assert(mb_mode_index < MAX_MODES);
+ assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
#endif
- // Restore the coding context of the MB to that that was in place
- // when the mode was picked for it
- vpx_memcpy(xd->mode_info_context, mi, sizeof(MODE_INFO));
+ // Restore the coding context of the MB to that that was in place
+ // when the mode was picked for it
+ vpx_memcpy(xd->mode_info_context, mi, sizeof(MODE_INFO));
- if (mb_mode == B_PRED)
- {
- for (i = 0; i < 16; i++)
- {
- xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
- assert (xd->block[i].bmi.as_mode.first < MB_MODE_COUNT);
- }
+ if (mb_mode == B_PRED) {
+ for (i = 0; i < 16; i++) {
+ xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
+ assert(xd->block[i].bmi.as_mode.first < MB_MODE_COUNT);
}
- else if (mb_mode == I8X8_PRED)
- {
- for (i = 0; i < 16; i++)
- {
- xd->block[i].bmi = xd->mode_info_context->bmi[i];
- }
- }
- else if (mb_mode == SPLITMV)
- {
- vpx_memcpy(x->partition_info, &ctx->partition_info,
- sizeof(PARTITION_INFO));
-
- xd->mode_info_context->mbmi.mv.as_int =
- x->partition_info->bmi[15].mv.as_int;
- xd->mode_info_context->mbmi.second_mv.as_int =
- x->partition_info->bmi[15].second_mv.as_int;
+ } else if (mb_mode == I8X8_PRED) {
+ for (i = 0; i < 16; i++) {
+ xd->block[i].bmi = xd->mode_info_context->bmi[i];
}
-
- if (cpi->common.frame_type == KEY_FRAME)
- {
- // Restore the coding modes to that held in the coding context
- //if (mb_mode == B_PRED)
- // for (i = 0; i < 16; i++)
- // {
- // xd->block[i].bmi.as_mode =
- // xd->mode_info_context->bmi[i].as_mode;
- // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
- // }
+ } else if (mb_mode == SPLITMV) {
+ vpx_memcpy(x->partition_info, &ctx->partition_info,
+ sizeof(PARTITION_INFO));
+
+ xd->mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ xd->mode_info_context->mbmi.second_mv.as_int =
+ x->partition_info->bmi[15].second_mv.as_int;
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ // Restore the coding modes to that held in the coding context
+ // if (mb_mode == B_PRED)
+ // for (i = 0; i < 16; i++)
+ // {
+ // xd->block[i].bmi.as_mode =
+ // xd->mode_info_context->bmi[i].as_mode;
+ // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
+ // }
#if CONFIG_INTERNAL_STATS
- static const int kf_mode_index[] = {
- THR_DC /*DC_PRED*/,
- THR_V_PRED /*V_PRED*/,
- THR_H_PRED /*H_PRED*/,
+ static const int kf_mode_index[] = {
+ THR_DC /*DC_PRED*/,
+ THR_V_PRED /*V_PRED*/,
+ THR_H_PRED /*H_PRED*/,
#if CONFIG_NEWINTRAMODES
- THR_D45_PRED /*D45_PRED*/,
- THR_D135_PRED /*D135_PRED*/,
- THR_D117_PRED /*D117_PRED*/,
- THR_D153_PRED /*D153_PRED*/,
- THR_D27_PRED /*D27_PRED*/,
- THR_D63_PRED /*D63_PRED*/,
+ THR_D45_PRED /*D45_PRED*/,
+ THR_D135_PRED /*D135_PRED*/,
+ THR_D117_PRED /*D117_PRED*/,
+ THR_D153_PRED /*D153_PRED*/,
+ THR_D27_PRED /*D27_PRED*/,
+ THR_D63_PRED /*D63_PRED*/,
#endif
- THR_TM /*TM_PRED*/,
- THR_I8X8_PRED /*I8X8_PRED*/,
- THR_B_PRED /*B_PRED*/,
- };
- cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
+ THR_TM /*TM_PRED*/,
+ THR_I8X8_PRED /*I8X8_PRED*/,
+ THR_B_PRED /*B_PRED*/,
+ };
+ cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
#endif
- }
- else
- {
-/*
- // Reduce the activation RD thresholds for the best choice mode
- if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
- (cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
- {
- int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
+ } else {
+ /*
+ // Reduce the activation RD thresholds for the best choice mode
+ if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
+ (cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
+ {
+ int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
- cpi->rd_thresh_mult[mb_mode_index] =
- (cpi->rd_thresh_mult[mb_mode_index]
- >= (MIN_THRESHMULT + best_adjustment)) ?
- cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
- MIN_THRESHMULT;
- cpi->rd_threshes[mb_mode_index] =
- (cpi->rd_baseline_thresh[mb_mode_index] >> 7)
- * cpi->rd_thresh_mult[mb_mode_index];
+ cpi->rd_thresh_mult[mb_mode_index] =
+ (cpi->rd_thresh_mult[mb_mode_index]
+ >= (MIN_THRESHMULT + best_adjustment)) ?
+ cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
+ MIN_THRESHMULT;
+ cpi->rd_threshes[mb_mode_index] =
+ (cpi->rd_baseline_thresh[mb_mode_index] >> 7)
+ * cpi->rd_thresh_mult[mb_mode_index];
- }
-*/
- // Note how often each mode chosen as best
- cpi->mode_chosen_counts[mb_mode_index]++;
+ }
+ */
+ // Note how often each mode chosen as best
+ cpi->mode_chosen_counts[mb_mode_index]++;
- rd_update_mvcount(cpi, x, &ctx->best_ref_mv, &ctx->second_best_ref_mv);
+ rd_update_mvcount(cpi, x, &ctx->best_ref_mv, &ctx->second_best_ref_mv);
- cpi->prediction_error += ctx->distortion;
- cpi->intra_error += ctx->intra_error;
- }
+ cpi->prediction_error += ctx->distortion;
+ cpi->intra_error += ctx->intra_error;
+ }
}
-static void pick_mb_modes (VP8_COMP *cpi,
- VP8_COMMON *cm,
- int mb_row,
- int mb_col,
- MACROBLOCK *x,
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int *totalrate)
-{
- int i;
- int map_index;
- int recon_yoffset, recon_uvoffset;
- int ref_fb_idx = cm->lst_fb_idx;
- int dst_fb_idx = cm->new_fb_idx;
- int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
- int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
- ENTROPY_CONTEXT_PLANES left_context[2];
- ENTROPY_CONTEXT_PLANES above_context[2];
- ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
- + mb_col;
-
- // Offsets to move pointers from MB to MB within a SB in raster order
- int row_delta[4] = { 0, +1, 0, -1};
- int col_delta[4] = {+1, -1, +1, +1};
-
- /* Function should not modify L & A contexts; save and restore on exit */
- vpx_memcpy (left_context,
- cpi->left_context,
- sizeof(left_context));
- vpx_memcpy (above_context,
- initial_above_context_ptr,
- sizeof(above_context));
-
- /* Encode MBs in raster order within the SB */
- for ( i=0; i<4; i++ )
- {
- int dy = row_delta[i];
- int dx = col_delta[i];
- int offset_unextended = dy * cm->mb_cols + dx;
- int offset_extended = dy * xd->mode_info_stride + dx;
-
- // TODO Many of the index items here can be computed more efficiently!
-
- if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols))
- {
- // MB lies outside frame, move on
- mb_row += dy;
- mb_col += dx;
-
- // Update pointers
- x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
- x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
- x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
-
- x->gf_active_ptr += offset_unextended;
- x->partition_info += offset_extended;
- xd->mode_info_context += offset_extended;
- xd->prev_mode_info_context += offset_extended;
+static void pick_mb_modes(VP8_COMP *cpi,
+ VP8_COMMON *cm,
+ int mb_row,
+ int mb_col,
+ MACROBLOCK *x,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ int *totalrate) {
+ int i;
+ int map_index;
+ int recon_yoffset, recon_uvoffset;
+ int ref_fb_idx = cm->lst_fb_idx;
+ int dst_fb_idx = cm->new_fb_idx;
+ int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
+ int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
+ ENTROPY_CONTEXT_PLANES left_context[2];
+ ENTROPY_CONTEXT_PLANES above_context[2];
+ ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
+ + mb_col;
+
+ // Offsets to move pointers from MB to MB within a SB in raster order
+ int row_delta[4] = { 0, +1, 0, -1};
+ int col_delta[4] = { +1, -1, +1, +1};
+
+ /* Function should not modify L & A contexts; save and restore on exit */
+ vpx_memcpy(left_context,
+ cpi->left_context,
+ sizeof(left_context));
+ vpx_memcpy(above_context,
+ initial_above_context_ptr,
+ sizeof(above_context));
+
+ /* Encode MBs in raster order within the SB */
+ for (i = 0; i < 4; i++) {
+ int dy = row_delta[i];
+ int dx = col_delta[i];
+ int offset_unextended = dy * cm->mb_cols + dx;
+ int offset_extended = dy * xd->mode_info_stride + dx;
+
+ // TODO Many of the index items here can be computed more efficiently!
+
+ if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
+ // MB lies outside frame, move on
+ mb_row += dy;
+ mb_col += dx;
+
+ // Update pointers
+ x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
+
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
#if CONFIG_DEBUG
- assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
- (xd->mode_info_context - cpi->common.mip));
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
#endif
- continue;
- }
-
- // Index of the MB in the SB 0..3
- xd->mb_index = i;
-
- map_index = (mb_row * cpi->common.mb_cols) + mb_col;
- x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
-
- // set above context pointer
- xd->above_context = cm->above_context + mb_col;
-
- // Restore the appropriate left context depending on which
- // row in the SB the MB is situated
- vpx_memcpy (&cm->left_context,
- &cpi->left_context[i>>1],
- sizeof(ENTROPY_CONTEXT_PLANES));
-
- // Set up distance of MB to edge of frame in 1/8th pel units
- xd->mb_to_top_edge = -((mb_row * 16) << 3);
- xd->mb_to_left_edge = -((mb_col * 16) << 3);
- xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
- xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
-
- // Set up limit values for MV components to prevent them from
- // extending beyond the UMV borders assuming 16x16 block size
- x->mv_row_min = -((mb_row * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
- x->mv_col_min = -((mb_col * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
- x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
- (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
- x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
- (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
-
- xd->up_available = (mb_row != 0);
- xd->left_available = (mb_col != 0);
-
- recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
- recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
-
- xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
- xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
- xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
-
- // Copy current MB to a work buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
-
- x->rddiv = cpi->RDDIV;
- x->rdmult = cpi->RDMULT;
-
- if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
- vp8_activity_masking(cpi, x);
-
- // Is segmentation enabled
- if (xd->segmentation_enabled)
- {
- // Code to set segment id in xd->mbmi.segment_id
- if (cpi->segmentation_map[map_index] <= 3)
- xd->mode_info_context->mbmi.segment_id =
- cpi->segmentation_map[map_index];
- else
- xd->mode_info_context->mbmi.segment_id = 0;
-
- vp8cx_mb_init_quantizer(cpi, x);
- }
- else
- // Set to Segment 0 by default
- xd->mode_info_context->mbmi.segment_id = 0;
-
- x->active_ptr = cpi->active_map + map_index;
-
- /* force 4x4 transform for mode selection */
- xd->mode_info_context->mbmi.txfm_size = TX_4X4; // TODO IS this right??
-
- cpi->update_context = 0; // TODO Do we need this now??
-
- // Find best coding mode & reconstruct the MB so it is available
- // as a predictor for MBs that follow in the SB
- if (cm->frame_type == KEY_FRAME)
- {
- *totalrate += vp8_rd_pick_intra_mode(cpi, x);
-
- // Save the coding context
- vpx_memcpy (&x->mb_context[i].mic, xd->mode_info_context,
- sizeof(MODE_INFO));
-
- // Dummy encode, do not do the tokenization
- vp8cx_encode_intra_macro_block(cpi, x, tp, 0);
- //Note the encoder may have changed the segment_id
- }
- else
- {
- int seg_id;
-
- if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
- !segfeature_active( xd, 0, SEG_LVL_REF_FRAME ) &&
- segfeature_active( xd, 1, SEG_LVL_REF_FRAME ) &&
- check_segref(xd, 1, INTRA_FRAME) +
- check_segref(xd, 1, LAST_FRAME) +
- check_segref(xd, 1, GOLDEN_FRAME) +
- check_segref(xd, 1, ALTREF_FRAME) == 1)
- {
- cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
- }
- else
- {
- cpi->seg0_progress = (((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols + i) << 16) / cm->MBs;
- }
-
- *totalrate += vp8cx_pick_mode_inter_macroblock(cpi, x,
- recon_yoffset,
- recon_uvoffset);
-
- // Dummy encode, do not do the tokenization
- vp8cx_encode_inter_macroblock(cpi, x, tp,
- recon_yoffset, recon_uvoffset, 0);
-
- seg_id = xd->mode_info_context->mbmi.segment_id;
- if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0)
- {
- cpi->seg0_idx++;
- }
- if (!xd->segmentation_enabled ||
- !segfeature_active( xd, seg_id, SEG_LVL_REF_FRAME ) ||
- check_segref(xd, seg_id, INTRA_FRAME) +
- check_segref(xd, seg_id, LAST_FRAME) +
- check_segref(xd, seg_id, GOLDEN_FRAME) +
- check_segref(xd, seg_id, ALTREF_FRAME) > 1)
- {
- // Get the prediction context and status
- int pred_flag = get_pred_flag( xd, PRED_REF );
- int pred_context = get_pred_context( cm, xd, PRED_REF );
+ continue;
+ }
- // Count prediction success
- cpi->ref_pred_count[pred_context][pred_flag]++;
- }
- }
+ // Index of the MB in the SB 0..3
+ xd->mb_index = i;
+
+ map_index = (mb_row * cpi->common.mb_cols) + mb_col;
+ x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+
+ // set above context pointer
+ xd->above_context = cm->above_context + mb_col;
+
+ // Restore the appropriate left context depending on which
+ // row in the SB the MB is situated
+ vpx_memcpy(&cm->left_context,
+ &cpi->left_context[i >> 1],
+ sizeof(ENTROPY_CONTEXT_PLANES));
+
+ // Set up distance of MB to edge of frame in 1/8th pel units
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+ xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
+
+ // Set up limit values for MV components to prevent them from
+ // extending beyond the UMV borders assuming 16x16 block size
+ x->mv_row_min = -((mb_row * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
+ x->mv_col_min = -((mb_col * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
+ x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
+ (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
+ x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
+ (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
+
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
+
+ recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
+
+ xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
+ xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
+
+ // Copy current MB to a work buffer
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
+ x->src.y_stride,
+ x->thismb, 16);
+
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ vp8_activity_masking(cpi, x);
+
+ // Is segmentation enabled
+ if (xd->segmentation_enabled) {
+ // Code to set segment id in xd->mbmi.segment_id
+ if (cpi->segmentation_map[map_index] <= 3)
+ xd->mode_info_context->mbmi.segment_id =
+ cpi->segmentation_map[map_index];
+ else
+ xd->mode_info_context->mbmi.segment_id = 0;
+
+ vp8cx_mb_init_quantizer(cpi, x);
+ } else
+ // Set to Segment 0 by default
+ xd->mode_info_context->mbmi.segment_id = 0;
+
+ x->active_ptr = cpi->active_map + map_index;
+
+ /* force 4x4 transform for mode selection */
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4; // TODO IS this right??
+
+ cpi->update_context = 0; // TODO Do we need this now??
+
+ // Find best coding mode & reconstruct the MB so it is available
+ // as a predictor for MBs that follow in the SB
+ if (cm->frame_type == KEY_FRAME) {
+ *totalrate += vp8_rd_pick_intra_mode(cpi, x);
+
+ // Save the coding context
+ vpx_memcpy(&x->mb_context[i].mic, xd->mode_info_context,
+ sizeof(MODE_INFO));
+
+ // Dummy encode, do not do the tokenization
+ vp8cx_encode_intra_macro_block(cpi, x, tp, 0);
+ // Note the encoder may have changed the segment_id
+ } else {
+ int seg_id;
+
+ if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
+ !segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
+ segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
+ check_segref(xd, 1, INTRA_FRAME) +
+ check_segref(xd, 1, LAST_FRAME) +
+ check_segref(xd, 1, GOLDEN_FRAME) +
+ check_segref(xd, 1, ALTREF_FRAME) == 1) {
+ cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
+ } else {
+ cpi->seg0_progress = (((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols + i) << 16) / cm->MBs;
+ }
+
+ *totalrate += vp8cx_pick_mode_inter_macroblock(cpi, x,
+ recon_yoffset,
+ recon_uvoffset);
+
+ // Dummy encode, do not do the tokenization
+ vp8cx_encode_inter_macroblock(cpi, x, tp,
+ recon_yoffset, recon_uvoffset, 0);
+
+ seg_id = xd->mode_info_context->mbmi.segment_id;
+ if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
+ cpi->seg0_idx++;
+ }
+ if (!xd->segmentation_enabled ||
+ !segfeature_active(xd, seg_id, SEG_LVL_REF_FRAME) ||
+ check_segref(xd, seg_id, INTRA_FRAME) +
+ check_segref(xd, seg_id, LAST_FRAME) +
+ check_segref(xd, seg_id, GOLDEN_FRAME) +
+ check_segref(xd, seg_id, ALTREF_FRAME) > 1) {
+ // Get the prediction context and status
+ int pred_flag = get_pred_flag(xd, PRED_REF);
+ int pred_context = get_pred_context(cm, xd, PRED_REF);
+
+ // Count prediction success
+ cpi->ref_pred_count[pred_context][pred_flag]++;
+ }
+ }
- // Keep a copy of the updated left context
- vpx_memcpy (&cpi->left_context[i>>1],
- &cm->left_context,
- sizeof(ENTROPY_CONTEXT_PLANES));
+ // Keep a copy of the updated left context
+ vpx_memcpy(&cpi->left_context[i >> 1],
+ &cm->left_context,
+ sizeof(ENTROPY_CONTEXT_PLANES));
- // Next MB
- mb_row += dy;
- mb_col += dx;
+ // Next MB
+ mb_row += dy;
+ mb_col += dx;
- x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
- x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
- x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
+ x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
- x->gf_active_ptr += offset_unextended;
- x->partition_info += offset_extended;
- xd->mode_info_context += offset_extended;
- xd->prev_mode_info_context += offset_extended;
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
#if CONFIG_DEBUG
- assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
- (xd->mode_info_context - cpi->common.mip));
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
#endif
- }
-
- /* Restore L & A coding context to those in place on entry */
- vpx_memcpy (cpi->left_context,
- left_context,
- sizeof(left_context));
- vpx_memcpy (initial_above_context_ptr,
- above_context,
- sizeof(above_context));
+ }
+
+ /* Restore L & A coding context to those in place on entry */
+ vpx_memcpy(cpi->left_context,
+ left_context,
+ sizeof(left_context));
+ vpx_memcpy(initial_above_context_ptr,
+ above_context,
+ sizeof(above_context));
}
-static void encode_sb ( VP8_COMP *cpi,
- VP8_COMMON *cm,
- int mbrow,
- int mbcol,
- MACROBLOCK *x,
- MACROBLOCKD *xd,
- TOKENEXTRA **tp )
-{
- int i;
- int map_index;
- int mb_row, mb_col;
- int recon_yoffset, recon_uvoffset;
- int ref_fb_idx = cm->lst_fb_idx;
- int dst_fb_idx = cm->new_fb_idx;
- int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
- int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
- int row_delta[4] = { 0, +1, 0, -1};
- int col_delta[4] = {+1, -1, +1, +1};
-
- mb_row = mbrow;
- mb_col = mbcol;
-
- /* Encode MBs in raster order within the SB */
- for ( i=0; i<4; i++ )
- {
- int dy = row_delta[i];
- int dx = col_delta[i];
- int offset_extended = dy * xd->mode_info_stride + dx;
- int offset_unextended = dy * cm->mb_cols + dx;
-
- if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols))
- {
- // MB lies outside frame, move on
- mb_row += dy;
- mb_col += dx;
-
- x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
- x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
- x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
-
- x->gf_active_ptr += offset_unextended;
- x->partition_info += offset_extended;
- xd->mode_info_context += offset_extended;
- xd->prev_mode_info_context += offset_extended;
+static void encode_sb(VP8_COMP *cpi,
+ VP8_COMMON *cm,
+ int mbrow,
+ int mbcol,
+ MACROBLOCK *x,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp) {
+ int i;
+ int map_index;
+ int mb_row, mb_col;
+ int recon_yoffset, recon_uvoffset;
+ int ref_fb_idx = cm->lst_fb_idx;
+ int dst_fb_idx = cm->new_fb_idx;
+ int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
+ int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
+ int row_delta[4] = { 0, +1, 0, -1};
+ int col_delta[4] = { +1, -1, +1, +1};
+
+ mb_row = mbrow;
+ mb_col = mbcol;
+
+ /* Encode MBs in raster order within the SB */
+ for (i = 0; i < 4; i++) {
+ int dy = row_delta[i];
+ int dx = col_delta[i];
+ int offset_extended = dy * xd->mode_info_stride + dx;
+ int offset_unextended = dy * cm->mb_cols + dx;
+
+ if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
+ // MB lies outside frame, move on
+ mb_row += dy;
+ mb_col += dx;
+
+ x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
+
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
#if CONFIG_DEBUG
- assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
- (xd->mode_info_context - cpi->common.mip));
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
#endif
- continue;
- }
+ continue;
+ }
- xd->mb_index = i;
+ xd->mb_index = i;
#ifdef ENC_DEBUG
- enc_debug = (cpi->common.current_video_frame == 0 &&
- mb_row==0 && mb_col==0);
- mb_col_debug=mb_col;
- mb_row_debug=mb_row;
+ enc_debug = (cpi->common.current_video_frame == 0 &&
+ mb_row == 0 && mb_col == 0);
+ mb_col_debug = mb_col;
+ mb_row_debug = mb_row;
#endif
- // Restore MB state to that when it was picked
+ // Restore MB state to that when it was picked
#if CONFIG_SUPERBLOCKS
- if (x->encode_as_sb)
- update_state (cpi, x, &x->sb_context[i]);
- else
+ if (x->encode_as_sb)
+ update_state(cpi, x, &x->sb_context[i]);
+ else
#endif
- update_state (cpi, x, &x->mb_context[i]);
-
- // Copy in the appropriate left context
- vpx_memcpy (&cm->left_context,
- &cpi->left_context[i>>1],
- sizeof(ENTROPY_CONTEXT_PLANES));
-
- map_index = (mb_row * cpi->common.mb_cols) + mb_col;
- x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
-
- // reset above block coeffs
- xd->above_context = cm->above_context + mb_col;
-
- // Set up distance of MB to edge of the frame in 1/8th pel units
- xd->mb_to_top_edge = -((mb_row * 16) << 3);
- xd->mb_to_left_edge = -((mb_col * 16) << 3);
- xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
- xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
-
- // Set up limit values for MV components to prevent them from
- // extending beyond the UMV borders assuming 16x16 block size
- x->mv_row_min = -((mb_row * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
- x->mv_col_min = -((mb_col * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
- x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
- (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
- x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
- (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
+ update_state(cpi, x, &x->mb_context[i]);
+
+ // Copy in the appropriate left context
+ vpx_memcpy(&cm->left_context,
+ &cpi->left_context[i >> 1],
+ sizeof(ENTROPY_CONTEXT_PLANES));
+
+ map_index = (mb_row * cpi->common.mb_cols) + mb_col;
+ x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+
+ // reset above block coeffs
+ xd->above_context = cm->above_context + mb_col;
+
+ // Set up distance of MB to edge of the frame in 1/8th pel units
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+ xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
+
+ // Set up limit values for MV components to prevent them from
+ // extending beyond the UMV borders assuming 16x16 block size
+ x->mv_row_min = -((mb_row * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
+ x->mv_col_min = -((mb_col * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
+ x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
+ (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
+ x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
+ (VP8BORDERINPIXELS - 16 - INTERP_EXTEND));
#if CONFIG_SUPERBLOCKS
- // Set up limit values for MV components to prevent them from
- // extending beyond the UMV borders assuming 32x32 block size
- x->mv_row_min_sb = -((mb_row * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
- x->mv_col_min_sb = -((mb_col * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
- x->mv_row_max_sb = ((cm->mb_rows - mb_row) * 16 +
- (VP8BORDERINPIXELS - 32 - INTERP_EXTEND));
- x->mv_col_max_sb = ((cm->mb_cols - mb_col) * 16 +
- (VP8BORDERINPIXELS - 32 - INTERP_EXTEND));
+ // Set up limit values for MV components to prevent them from
+ // extending beyond the UMV borders assuming 32x32 block size
+ x->mv_row_min_sb = -((mb_row * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
+ x->mv_col_min_sb = -((mb_col * 16) + VP8BORDERINPIXELS - INTERP_EXTEND);
+ x->mv_row_max_sb = ((cm->mb_rows - mb_row) * 16 +
+ (VP8BORDERINPIXELS - 32 - INTERP_EXTEND));
+ x->mv_col_max_sb = ((cm->mb_cols - mb_col) * 16 +
+ (VP8BORDERINPIXELS - 32 - INTERP_EXTEND));
#endif
- xd->up_available = (mb_row != 0);
- xd->left_available = (mb_col != 0);
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
- recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
- recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
+ recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
- xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
- xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
- xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
+ xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
+ xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
- // Copy current MB to a work buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ // Copy current MB to a work buffer
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
+ x->src.y_stride,
+ x->thismb, 16);
- if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
- vp8_activity_masking(cpi, x);
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ vp8_activity_masking(cpi, x);
- // Is segmentation enabled
- if (xd->segmentation_enabled)
- {
- // Code to set segment id in xd->mbmi.segment_id
- if (cpi->segmentation_map[map_index] <= 3)
- xd->mode_info_context->mbmi.segment_id =
- cpi->segmentation_map[map_index];
- else
- xd->mode_info_context->mbmi.segment_id = 0;
-
- vp8cx_mb_init_quantizer(cpi, x);
- }
- else
- // Set to Segment 0 by default
- xd->mode_info_context->mbmi.segment_id = 0;
+ // Is segmentation enabled
+ if (xd->segmentation_enabled) {
+ // Code to set segment id in xd->mbmi.segment_id
+ if (cpi->segmentation_map[map_index] <= 3)
+ xd->mode_info_context->mbmi.segment_id =
+ cpi->segmentation_map[map_index];
+ else
+ xd->mode_info_context->mbmi.segment_id = 0;
- x->active_ptr = cpi->active_map + map_index;
+ vp8cx_mb_init_quantizer(cpi, x);
+ } else
+ // Set to Segment 0 by default
+ xd->mode_info_context->mbmi.segment_id = 0;
- cpi->update_context = 0;
+ x->active_ptr = cpi->active_map + map_index;
- if (cm->frame_type == KEY_FRAME)
- {
- vp8cx_encode_intra_macro_block(cpi, x, tp, 1);
- //Note the encoder may have changed the segment_id
+ cpi->update_context = 0;
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp8cx_encode_intra_macro_block(cpi, x, tp, 1);
+ // Note the encoder may have changed the segment_id
#ifdef MODE_STATS
- y_modes[xd->mode_info_context->mbmi.mode] ++;
+ y_modes[xd->mode_info_context->mbmi.mode]++;
#endif
- }
- else
- {
- unsigned char *segment_id;
- int seg_ref_active;
+ } else {
+ unsigned char *segment_id;
+ int seg_ref_active;
- vp8cx_encode_inter_macroblock(cpi, x, tp,
- recon_yoffset, recon_uvoffset, 1);
- //Note the encoder may have changed the segment_id
+ vp8cx_encode_inter_macroblock(cpi, x, tp,
+ recon_yoffset, recon_uvoffset, 1);
+ // Note the encoder may have changed the segment_id
#ifdef MODE_STATS
- inter_y_modes[xd->mode_info_context->mbmi.mode] ++;
+ inter_y_modes[xd->mode_info_context->mbmi.mode]++;
- if (xd->mode_info_context->mbmi.mode == SPLITMV)
- {
- int b;
+ if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ int b;
- for (b = 0; b < x->partition_info->count; b++)
- {
- inter_b_modes[x->partition_info->bmi[b].mode] ++;
- }
- }
+ for (b = 0; b < x->partition_info->count; b++) {
+ inter_b_modes[x->partition_info->bmi[b].mode]++;
+ }
+ }
#endif
- // If we have just a single reference frame coded for a segment then
- // exclude from the reference frame counts used to work out
- // probabilities. NOTE: At the moment we dont support custom trees
- // for the reference frame coding for each segment but this is a
- // possible future action.
- segment_id = &xd->mode_info_context->mbmi.segment_id;
- seg_ref_active = segfeature_active( xd, *segment_id, SEG_LVL_REF_FRAME );
- if ( !seg_ref_active ||
- ( ( check_segref( xd, *segment_id, INTRA_FRAME ) +
- check_segref( xd, *segment_id, LAST_FRAME ) +
- check_segref( xd, *segment_id, GOLDEN_FRAME ) +
- check_segref( xd, *segment_id, ALTREF_FRAME ) ) > 1 ) )
- {
+ // If we have just a single reference frame coded for a segment then
+ // exclude from the reference frame counts used to work out
+ // probabilities. NOTE: At the moment we dont support custom trees
+ // for the reference frame coding for each segment but this is a
+ // possible future action.
+ segment_id = &xd->mode_info_context->mbmi.segment_id;
+ seg_ref_active = segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
+ if (!seg_ref_active ||
+ ((check_segref(xd, *segment_id, INTRA_FRAME) +
+ check_segref(xd, *segment_id, LAST_FRAME) +
+ check_segref(xd, *segment_id, GOLDEN_FRAME) +
+ check_segref(xd, *segment_id, ALTREF_FRAME)) > 1)) {
// TODO this may not be a good idea as it makes sample size small and means
// the predictor functions cannot use data about most likely value only most
// likely unpredicted value.
-//#if CONFIG_COMPRED
+// #if CONFIG_COMPRED
// // Only update count for incorrectly predicted cases
// if ( !ref_pred_flag )
-//#endif
- {
- cpi->count_mb_ref_frame_usage
- [xd->mode_info_context->mbmi.ref_frame]++;
- }
- }
-
- // Count of last ref frame 0,0 usage
- if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
- (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
- cpi->inter_zz_count ++;
+// #endif
+ {
+ cpi->count_mb_ref_frame_usage
+ [xd->mode_info_context->mbmi.ref_frame]++;
}
+ }
- // TODO Partitioning is broken!
- cpi->tplist[mb_row].stop = *tp;
+ // Count of last ref frame 0,0 usage
+ if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
+ (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
+ cpi->inter_zz_count++;
+ }
- // Copy back updated left context
- vpx_memcpy (&cpi->left_context[i>>1],
- &cm->left_context,
- sizeof(ENTROPY_CONTEXT_PLANES));
+ // TODO Partitioning is broken!
+ cpi->tplist[mb_row].stop = *tp;
- // Next MB
- mb_row += dy;
- mb_col += dx;
+ // Copy back updated left context
+ vpx_memcpy(&cpi->left_context[i >> 1],
+ &cm->left_context,
+ sizeof(ENTROPY_CONTEXT_PLANES));
- x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
- x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
- x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
+ // Next MB
+ mb_row += dy;
+ mb_col += dx;
- x->gf_active_ptr += offset_unextended;
- x->partition_info += offset_extended;
- xd->mode_info_context += offset_extended;
- xd->prev_mode_info_context += offset_extended;
+ x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
+
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
#if CONFIG_DEBUG
- assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
- (xd->mode_info_context - cpi->common.mip));
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
#endif
- }
+ }
- // debug output
+ // debug output
#if DBG_PRNT_SEGMAP
- {
- FILE *statsfile;
- statsfile = fopen("segmap2.stt", "a");
- fprintf(statsfile, "\n" );
- fclose(statsfile);
- }
- #endif
+ {
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n");
+ fclose(statsfile);
+ }
+#endif
}
static
-void encode_sb_row ( VP8_COMP *cpi,
- VP8_COMMON *cm,
- int mb_row,
- MACROBLOCK *x,
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int *totalrate )
-{
- int mb_col;
- int mb_cols = cm->mb_cols;
-
- // Initialize the left context for the new SB row
- vpx_memset (cpi->left_context, 0, sizeof(cpi->left_context));
- vpx_memset (&cm->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
-
- // Code each SB in the row
- for (mb_col=0; mb_col<mb_cols; mb_col+=2)
- {
- int mb_rate = 0;
+void encode_sb_row(VP8_COMP *cpi,
+ VP8_COMMON *cm,
+ int mb_row,
+ MACROBLOCK *x,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ int *totalrate) {
+ int mb_col;
+ int mb_cols = cm->mb_cols;
+
+ // Initialize the left context for the new SB row
+ vpx_memset(cpi->left_context, 0, sizeof(cpi->left_context));
+ vpx_memset(&cm->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ // Code each SB in the row
+ for (mb_col = 0; mb_col < mb_cols; mb_col += 2) {
+ int mb_rate = 0;
#if CONFIG_SUPERBLOCKS
- int sb_rate = INT_MAX;
+ int sb_rate = INT_MAX;
#endif
#if CONFIG_DEBUG
- MODE_INFO *mic = xd->mode_info_context;
- PARTITION_INFO *pi = x->partition_info;
- signed char *gfa = x->gf_active_ptr;
- unsigned char *yb = x->src.y_buffer;
- unsigned char *ub = x->src.u_buffer;
- unsigned char *vb = x->src.v_buffer;
+ MODE_INFO *mic = xd->mode_info_context;
+ PARTITION_INFO *pi = x->partition_info;
+ signed char *gfa = x->gf_active_ptr;
+ unsigned char *yb = x->src.y_buffer;
+ unsigned char *ub = x->src.u_buffer;
+ unsigned char *vb = x->src.v_buffer;
#endif
- // Pick modes assuming the SB is coded as 4 independent MBs
- pick_mb_modes (cpi, cm, mb_row, mb_col, x, xd, tp, &mb_rate);
+ // Pick modes assuming the SB is coded as 4 independent MBs
+ pick_mb_modes(cpi, cm, mb_row, mb_col, x, xd, tp, &mb_rate);
- x->src.y_buffer -= 32;
- x->src.u_buffer -= 16;
- x->src.v_buffer -= 16;
+ x->src.y_buffer -= 32;
+ x->src.u_buffer -= 16;
+ x->src.v_buffer -= 16;
- x->gf_active_ptr -= 2;
- x->partition_info -= 2;
- xd->mode_info_context -= 2;
- xd->prev_mode_info_context -= 2;
+ x->gf_active_ptr -= 2;
+ x->partition_info -= 2;
+ xd->mode_info_context -= 2;
+ xd->prev_mode_info_context -= 2;
#if CONFIG_DEBUG
- assert (x->gf_active_ptr == gfa);
- assert (x->partition_info == pi);
- assert (xd->mode_info_context == mic);
- assert (x->src.y_buffer == yb);
- assert (x->src.u_buffer == ub);
- assert (x->src.v_buffer == vb);
+ assert(x->gf_active_ptr == gfa);
+ assert(x->partition_info == pi);
+ assert(xd->mode_info_context == mic);
+ assert(x->src.y_buffer == yb);
+ assert(x->src.u_buffer == ub);
+ assert(x->src.v_buffer == vb);
#endif
#if CONFIG_SUPERBLOCKS
- // Pick a mode assuming that it applies all 4 of the MBs in the SB
- pick_sb_modes(cpi, cm, mb_row, mb_col, x, xd, &sb_rate);
-
- // Decide whether to encode as a SB or 4xMBs
- if(sb_rate < mb_rate)
- {
- x->encode_as_sb = 1;
- *totalrate += sb_rate;
- }
- else
+ // Pick a mode assuming that it applies all 4 of the MBs in the SB
+ pick_sb_modes(cpi, cm, mb_row, mb_col, x, xd, &sb_rate);
+
+ // Decide whether to encode as a SB or 4xMBs
+ if (sb_rate < mb_rate) {
+ x->encode_as_sb = 1;
+ *totalrate += sb_rate;
+ } else
#endif
- {
- x->encode_as_sb = 0;
- *totalrate += mb_rate;
- }
+ {
+ x->encode_as_sb = 0;
+ *totalrate += mb_rate;
+ }
- // Encode SB using best computed mode(s)
- encode_sb (cpi, cm, mb_row, mb_col, x, xd, tp);
+ // Encode SB using best computed mode(s)
+ encode_sb(cpi, cm, mb_row, mb_col, x, xd, tp);
#if CONFIG_DEBUG
- assert (x->gf_active_ptr == gfa+2);
- assert (x->partition_info == pi+2);
- assert (xd->mode_info_context == mic+2);
- assert (x->src.y_buffer == yb+32);
- assert (x->src.u_buffer == ub+16);
- assert (x->src.v_buffer == vb+16);
+ assert(x->gf_active_ptr == gfa + 2);
+ assert(x->partition_info == pi + 2);
+ assert(xd->mode_info_context == mic + 2);
+ assert(x->src.y_buffer == yb + 32);
+ assert(x->src.u_buffer == ub + 16);
+ assert(x->src.v_buffer == vb + 16);
#endif
- }
+ }
- // this is to account for the border
- x->gf_active_ptr += mb_cols - (mb_cols & 0x1);
- x->partition_info += xd->mode_info_stride + 1 - (mb_cols & 0x1);
- xd->mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
- xd->prev_mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
+ // this is to account for the border
+ x->gf_active_ptr += mb_cols - (mb_cols & 0x1);
+ x->partition_info += xd->mode_info_stride + 1 - (mb_cols & 0x1);
+ xd->mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
+ xd->prev_mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
#if CONFIG_DEBUG
- assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
- (xd->mode_info_context - cpi->common.mip));
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
#endif
}
-void init_encode_frame_mb_context(VP8_COMP *cpi)
-{
- MACROBLOCK *const x = & cpi->mb;
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & x->e_mbd;
+void init_encode_frame_mb_context(VP8_COMP *cpi) {
+ MACROBLOCK *const x = & cpi->mb;
+ VP8_COMMON *const cm = & cpi->common;
+ MACROBLOCKD *const xd = & x->e_mbd;
- // GF active flags data structure
- x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
+ // GF active flags data structure
+ x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
- // Activity map pointer
- x->mb_activity_ptr = cpi->mb_activity_map;
+ // Activity map pointer
+ x->mb_activity_ptr = cpi->mb_activity_map;
- x->act_zbin_adj = 0;
- cpi->seg0_idx = 0;
- vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
+ x->act_zbin_adj = 0;
+ cpi->seg0_idx = 0;
+ vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
- x->partition_info = x->pi;
+ x->partition_info = x->pi;
- xd->mode_info_context = cm->mi;
- xd->mode_info_stride = cm->mode_info_stride;
- xd->prev_mode_info_context = cm->prev_mi;
+ xd->mode_info_context = cm->mi;
+ xd->mode_info_stride = cm->mode_info_stride;
+ xd->prev_mode_info_context = cm->prev_mi;
- xd->frame_type = cm->frame_type;
+ xd->frame_type = cm->frame_type;
- xd->frames_since_golden = cm->frames_since_golden;
- xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
+ xd->frames_since_golden = cm->frames_since_golden;
+ xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
- // reset intra mode contexts
- if (cm->frame_type == KEY_FRAME)
- vp8_init_mbmode_probs(cm);
+ // reset intra mode contexts
+ if (cm->frame_type == KEY_FRAME)
+ vp8_init_mbmode_probs(cm);
- // Copy data over into macro block data structures.
- x->src = * cpi->Source;
- xd->pre = cm->yv12_fb[cm->lst_fb_idx];
- xd->dst = cm->yv12_fb[cm->new_fb_idx];
+ // Copy data over into macro block data structures.
+ x->src = * cpi->Source;
+ xd->pre = cm->yv12_fb[cm->lst_fb_idx];
+ xd->dst = cm->yv12_fb[cm->new_fb_idx];
- // set up frame for intra coded blocks
- vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
+ // set up frame for intra coded blocks
+ vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
- vp8_build_block_offsets(x);
+ vp8_build_block_offsets(x);
- vp8_setup_block_dptrs(&x->e_mbd);
+ vp8_setup_block_dptrs(&x->e_mbd);
- vp8_setup_block_ptrs(x);
+ vp8_setup_block_ptrs(x);
- xd->mode_info_context->mbmi.mode = DC_PRED;
- xd->mode_info_context->mbmi.uv_mode = DC_PRED;
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_context->mbmi.uv_mode = DC_PRED;
- xd->left_context = &cm->left_context;
+ xd->left_context = &cm->left_context;
- vp8_zero(cpi->count_mb_ref_frame_usage)
- vp8_zero(cpi->bmode_count)
- vp8_zero(cpi->ymode_count)
- vp8_zero(cpi->i8x8_mode_count)
- vp8_zero(cpi->y_uv_mode_count)
- vp8_zero(cpi->sub_mv_ref_count)
- vp8_zero(cpi->mbsplit_count)
+ vp8_zero(cpi->count_mb_ref_frame_usage)
+ vp8_zero(cpi->bmode_count)
+ vp8_zero(cpi->ymode_count)
+ vp8_zero(cpi->i8x8_mode_count)
+ vp8_zero(cpi->y_uv_mode_count)
+ vp8_zero(cpi->sub_mv_ref_count)
+ vp8_zero(cpi->mbsplit_count)
#if CONFIG_ADAPTIVE_ENTROPY
- vp8_zero(cpi->common.fc.mv_ref_ct)
- vp8_zero(cpi->common.fc.mv_ref_ct_a)
+ vp8_zero(cpi->common.fc.mv_ref_ct)
+ vp8_zero(cpi->common.fc.mv_ref_ct_a)
#endif
- //vp8_zero(cpi->uv_mode_count)
+ // vp8_zero(cpi->uv_mode_count)
- x->mvc = cm->fc.mvc;
+ x->mvc = cm->fc.mvc;
#if CONFIG_HIGH_PRECISION_MV
- x->mvc_hp = cm->fc.mvc_hp;
+ x->mvc_hp = cm->fc.mvc_hp;
#endif
- vpx_memset(cm->above_context, 0,
- sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
+ vpx_memset(cm->above_context, 0,
+ sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
- xd->fullpixel_mask = 0xffffffff;
- if(cm->full_pixel)
- xd->fullpixel_mask = 0xfffffff8;
+ xd->fullpixel_mask = 0xffffffff;
+ if (cm->full_pixel)
+ xd->fullpixel_mask = 0xfffffff8;
}
-static void encode_frame_internal(VP8_COMP *cpi)
-{
- int mb_row;
- MACROBLOCK *const x = & cpi->mb;
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & x->e_mbd;
+static void encode_frame_internal(VP8_COMP *cpi) {
+ int mb_row;
+ MACROBLOCK *const x = & cpi->mb;
+ VP8_COMMON *const cm = & cpi->common;
+ MACROBLOCKD *const xd = & x->e_mbd;
- TOKENEXTRA *tp = cpi->tok;
- int totalrate;
+ TOKENEXTRA *tp = cpi->tok;
+ int totalrate;
- // Compute a modified set of reference frame probabilities to use when
- // prediction fails. These are based on the current general estimates for
- // this frame which may be updated with each iteration of the recode loop.
- compute_mod_refprobs( cm );
+ // Compute a modified set of reference frame probabilities to use when
+ // prediction fails. These are based on the current general estimates for
+ // this frame which may be updated with each iteration of the recode loop.
+ compute_mod_refprobs(cm);
// debug output
#if DBG_PRNT_SEGMAP
- {
- FILE *statsfile;
- statsfile = fopen("segmap2.stt", "a");
- fprintf(statsfile, "\n" );
- fclose(statsfile);
- }
+ {
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n");
+ fclose(statsfile);
+ }
#endif
- totalrate = 0;
-
- // Functions setup for all frame types so we can use MC in AltRef
- if (cm->mcomp_filter_type == SIXTAP)
- {
- xd->subpixel_predict = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap4x4);
- xd->subpixel_predict8x4 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap8x4);
- xd->subpixel_predict8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap8x8);
- xd->subpixel_predict16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap16x16);
- xd->subpixel_predict_avg = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap_avg4x4);
- xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap_avg8x8);
- xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, sixtap_avg16x16);
- }
+ totalrate = 0;
+
+ // Functions setup for all frame types so we can use MC in AltRef
+ if (cm->mcomp_filter_type == SIXTAP) {
+ xd->subpixel_predict = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap4x4);
+ xd->subpixel_predict8x4 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap8x4);
+ xd->subpixel_predict8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap8x8);
+ xd->subpixel_predict16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg4x4);
+ xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg8x8);
+ xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg16x16);
+ }
#if CONFIG_ENHANCED_INTERP
- else if (cm->mcomp_filter_type == EIGHTTAP)
- {
- xd->subpixel_predict = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap4x4);
- xd->subpixel_predict8x4 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap8x4);
- xd->subpixel_predict8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap8x8);
- xd->subpixel_predict16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap16x16);
- xd->subpixel_predict_avg = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap_avg4x4);
- xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap_avg8x8);
- xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap_avg16x16);
- }
- else if (cm->mcomp_filter_type == EIGHTTAP_SHARP)
- {
- xd->subpixel_predict = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap4x4_sharp);
- xd->subpixel_predict8x4 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap8x4_sharp);
- xd->subpixel_predict8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap8x8_sharp);
- xd->subpixel_predict16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap16x16_sharp);
- xd->subpixel_predict_avg = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap_avg4x4_sharp);
- xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap_avg8x8_sharp);
- xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, eighttap_avg16x16_sharp);
- }
+ else if (cm->mcomp_filter_type == EIGHTTAP) {
+ xd->subpixel_predict = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap4x4);
+ xd->subpixel_predict8x4 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap8x4);
+ xd->subpixel_predict8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap8x8);
+ xd->subpixel_predict16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap_avg4x4);
+ xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap_avg8x8);
+ xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap_avg16x16);
+ } else if (cm->mcomp_filter_type == EIGHTTAP_SHARP) {
+ xd->subpixel_predict = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap4x4_sharp);
+ xd->subpixel_predict8x4 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap8x4_sharp);
+ xd->subpixel_predict8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap8x8_sharp);
+ xd->subpixel_predict16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap16x16_sharp);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap_avg4x4_sharp);
+ xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap_avg8x8_sharp);
+ xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, eighttap_avg16x16_sharp);
+ }
#endif
- else
- {
- xd->subpixel_predict = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, bilinear4x4);
- xd->subpixel_predict8x4 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, bilinear8x4);
- xd->subpixel_predict8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, bilinear8x8);
- xd->subpixel_predict16x16 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, bilinear16x16);
- xd->subpixel_predict_avg = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, bilinear_avg4x4);
- xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
- &cpi->common.rtcd.subpix, bilinear_avg8x8);
- xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
+ else {
+ xd->subpixel_predict = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear4x4);
+ xd->subpixel_predict8x4 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear8x4);
+ xd->subpixel_predict8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear8x8);
+ xd->subpixel_predict16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear_avg4x4);
+ xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear_avg8x8);
+ xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg16x16);
- }
+ }
- // Reset frame count of inter 0,0 motion vector usage.
- cpi->inter_zz_count = 0;
+ // Reset frame count of inter 0,0 motion vector usage.
+ cpi->inter_zz_count = 0;
- cpi->prediction_error = 0;
- cpi->intra_error = 0;
+ cpi->prediction_error = 0;
+ cpi->intra_error = 0;
#if CONFIG_NEWENTROPY
- cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
- cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
+ cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
+ cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
#else
- cpi->skip_true_count = 0;
- cpi->skip_false_count = 0;
+ cpi->skip_true_count = 0;
+ cpi->skip_false_count = 0;
#endif
#if CONFIG_PRED_FILTER
- if (cm->current_video_frame == 0)
- {
- // Initially assume that we'll signal the prediction filter
- // state at the frame level and that it is off.
- cpi->common.pred_filter_mode = 0;
- cpi->common.prob_pred_filter_off = 128;
- }
- cpi->pred_filter_on_count = 0;
- cpi->pred_filter_off_count = 0;
+ if (cm->current_video_frame == 0) {
+ // Initially assume that we'll signal the prediction filter
+ // state at the frame level and that it is off.
+ cpi->common.pred_filter_mode = 0;
+ cpi->common.prob_pred_filter_off = 128;
+ }
+ cpi->pred_filter_on_count = 0;
+ cpi->pred_filter_off_count = 0;
#endif
#if 0
- // Experimental code
- cpi->frame_distortion = 0;
- cpi->last_mb_distortion = 0;
+ // Experimental code
+ cpi->frame_distortion = 0;
+ cpi->last_mb_distortion = 0;
#endif
- xd->mode_info_context = cm->mi;
- xd->prev_mode_info_context = cm->prev_mi;
+ xd->mode_info_context = cm->mi;
+ xd->prev_mode_info_context = cm->prev_mi;
- vp8_zero(cpi->MVcount);
+ vp8_zero(cpi->MVcount);
#if CONFIG_HIGH_PRECISION_MV
- vp8_zero(cpi->MVcount_hp);
+ vp8_zero(cpi->MVcount_hp);
#endif
- vp8_zero(cpi->coef_counts);
- vp8_zero(cpi->coef_counts_8x8);
+ vp8_zero(cpi->coef_counts);
+ vp8_zero(cpi->coef_counts_8x8);
- vp8cx_frame_init_quantizer(cpi);
+ vp8cx_frame_init_quantizer(cpi);
- vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
- vp8cx_initialize_me_consts(cpi, cm->base_qindex);
+ vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
+ vp8cx_initialize_me_consts(cpi, cm->base_qindex);
- if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
- {
- // Initialize encode frame context.
- init_encode_frame_mb_context(cpi);
-
- // Build a frame level activity map
- build_activity_map(cpi);
- }
-
- // re-initencode frame context.
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ // Initialize encode frame context.
init_encode_frame_mb_context(cpi);
- cpi->rd_single_diff = cpi->rd_comp_diff = cpi->rd_hybrid_diff = 0;
- vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
- vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
+ // Build a frame level activity map
+ build_activity_map(cpi);
+ }
- {
- struct vpx_usec_timer emr_timer;
- vpx_usec_timer_start(&emr_timer);
+ // re-initencode frame context.
+ init_encode_frame_mb_context(cpi);
- {
- // For each row of SBs in the frame
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row+=2)
- {
- int offset = (cm->mb_cols+1) & ~0x1;
+ cpi->rd_single_diff = cpi->rd_comp_diff = cpi->rd_hybrid_diff = 0;
+ vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
+ vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
- encode_sb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
+ {
+ struct vpx_usec_timer emr_timer;
+ vpx_usec_timer_start(&emr_timer);
- // adjust to the next row of SBs
- x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
- x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
- x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
- }
+ {
+ // For each row of SBs in the frame
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 2) {
+ int offset = (cm->mb_cols + 1) & ~0x1;
- cpi->tok_count = tp - cpi->tok;
- }
+ encode_sb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
- vpx_usec_timer_mark(&emr_timer);
- cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
+ // adjust to the next row of SBs
+ x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
+ x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
+ x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
+ }
+ cpi->tok_count = tp - cpi->tok;
}
- // 256 rate units to the bit,
- // projected_frame_size in units of BYTES
- cpi->projected_frame_size = totalrate >> 8;
+ vpx_usec_timer_mark(&emr_timer);
+ cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
+
+ }
+
+ // 256 rate units to the bit,
+ // projected_frame_size in units of BYTES
+ cpi->projected_frame_size = totalrate >> 8;
#if 0
- // Keep record of the total distortion this time around for future use
- cpi->last_frame_distortion = cpi->frame_distortion;
+ // Keep record of the total distortion this time around for future use
+ cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
-static int check_dual_ref_flags(VP8_COMP *cpi)
-{
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int ref_flags = cpi->ref_frame_flags;
-
- if (segfeature_active(xd, 1, SEG_LVL_REF_FRAME))
- {
- if ((ref_flags & (VP8_LAST_FLAG | VP8_GOLD_FLAG)) == (VP8_LAST_FLAG | VP8_GOLD_FLAG) &&
- check_segref(xd, 1, LAST_FRAME))
- return 1;
- if ((ref_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG )) == (VP8_GOLD_FLAG | VP8_ALT_FLAG ) &&
- check_segref(xd, 1, GOLDEN_FRAME))
- return 1;
- if ((ref_flags & (VP8_ALT_FLAG | VP8_LAST_FLAG)) == (VP8_ALT_FLAG | VP8_LAST_FLAG) &&
- check_segref(xd, 1, ALTREF_FRAME))
- return 1;
- return 0;
- }
- else
- {
- return (!!(ref_flags & VP8_GOLD_FLAG) +
- !!(ref_flags & VP8_LAST_FLAG) +
- !!(ref_flags & VP8_ALT_FLAG) ) >= 2;
- }
+static int check_dual_ref_flags(VP8_COMP *cpi) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ int ref_flags = cpi->ref_frame_flags;
+
+ if (segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
+ if ((ref_flags & (VP8_LAST_FLAG | VP8_GOLD_FLAG)) == (VP8_LAST_FLAG | VP8_GOLD_FLAG) &&
+ check_segref(xd, 1, LAST_FRAME))
+ return 1;
+ if ((ref_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) == (VP8_GOLD_FLAG | VP8_ALT_FLAG) &&
+ check_segref(xd, 1, GOLDEN_FRAME))
+ return 1;
+ if ((ref_flags & (VP8_ALT_FLAG | VP8_LAST_FLAG)) == (VP8_ALT_FLAG | VP8_LAST_FLAG) &&
+ check_segref(xd, 1, ALTREF_FRAME))
+ return 1;
+ return 0;
+ } else {
+ return (!!(ref_flags & VP8_GOLD_FLAG) +
+ !!(ref_flags & VP8_LAST_FLAG) +
+ !!(ref_flags & VP8_ALT_FLAG)) >= 2;
+ }
}
-void vp8_encode_frame(VP8_COMP *cpi)
-{
- if (cpi->sf.RD)
- {
- int frame_type, pred_type;
- int single_diff, comp_diff, hybrid_diff;
-
- /*
- * This code does a single RD pass over the whole frame assuming
- * either compound, single or hybrid prediction as per whatever has
- * worked best for that type of frame in the past.
- * It also predicts whether another coding mode would have worked
- * better that this coding mode. If that is the case, it remembers
- * that for subsequent frames. If the difference is above a certain
- * threshold, it will actually re-encode the current frame using
- * that different coding mode.
- */
- if (cpi->common.frame_type == KEY_FRAME)
- frame_type = 0;
- else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
- frame_type = 3;
- else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
- frame_type = 1;
- else
- frame_type = 2;
-
- if (frame_type == 3)
- pred_type = SINGLE_PREDICTION_ONLY;
- else if (cpi->rd_prediction_type_threshes[frame_type][1] >
- cpi->rd_prediction_type_threshes[frame_type][0] &&
- cpi->rd_prediction_type_threshes[frame_type][1] >
- cpi->rd_prediction_type_threshes[frame_type][2] &&
- check_dual_ref_flags(cpi))
- pred_type = COMP_PREDICTION_ONLY;
- else if (cpi->rd_prediction_type_threshes[frame_type][0] >
- cpi->rd_prediction_type_threshes[frame_type][1] &&
- cpi->rd_prediction_type_threshes[frame_type][0] >
- cpi->rd_prediction_type_threshes[frame_type][2])
- pred_type = SINGLE_PREDICTION_ONLY;
- else
- pred_type = HYBRID_PREDICTION;
-
- cpi->common.comp_pred_mode = pred_type;
- encode_frame_internal(cpi);
-
- single_diff = cpi->rd_single_diff / cpi->common.MBs;
- cpi->rd_prediction_type_threshes[frame_type][0] += single_diff;
- cpi->rd_prediction_type_threshes[frame_type][0] >>= 1;
- comp_diff = cpi->rd_comp_diff / cpi->common.MBs;
- cpi->rd_prediction_type_threshes[frame_type][1] += comp_diff;
- cpi->rd_prediction_type_threshes[frame_type][1] >>= 1;
- hybrid_diff = cpi->rd_hybrid_diff / cpi->common.MBs;
- cpi->rd_prediction_type_threshes[frame_type][2] += hybrid_diff;
- cpi->rd_prediction_type_threshes[frame_type][2] >>= 1;
-
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- int single_count_zero = 0;
- int comp_count_zero = 0;
- int i;
-
- for ( i = 0; i < COMP_PRED_CONTEXTS; i++ )
- {
- single_count_zero += cpi->single_pred_count[i];
- comp_count_zero += cpi->comp_pred_count[i];
- }
-
- if (comp_count_zero == 0)
- {
- cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
- }
- else if (single_count_zero == 0)
- {
- cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
- }
- }
- }
+void vp8_encode_frame(VP8_COMP *cpi) {
+ if (cpi->sf.RD) {
+ int frame_type, pred_type;
+ int single_diff, comp_diff, hybrid_diff;
+
+ /*
+ * This code does a single RD pass over the whole frame assuming
+ * either compound, single or hybrid prediction as per whatever has
+ * worked best for that type of frame in the past.
+ * It also predicts whether another coding mode would have worked
+ * better that this coding mode. If that is the case, it remembers
+ * that for subsequent frames. If the difference is above a certain
+ * threshold, it will actually re-encode the current frame using
+ * that different coding mode.
+ */
+ if (cpi->common.frame_type == KEY_FRAME)
+ frame_type = 0;
+ else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
+ frame_type = 3;
+ else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
+ frame_type = 1;
else
- {
- encode_frame_internal(cpi);
+ frame_type = 2;
+
+ if (frame_type == 3)
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][1] >
+ cpi->rd_prediction_type_threshes[frame_type][0] &&
+ cpi->rd_prediction_type_threshes[frame_type][1] >
+ cpi->rd_prediction_type_threshes[frame_type][2] &&
+ check_dual_ref_flags(cpi))
+ pred_type = COMP_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][0] >
+ cpi->rd_prediction_type_threshes[frame_type][1] &&
+ cpi->rd_prediction_type_threshes[frame_type][0] >
+ cpi->rd_prediction_type_threshes[frame_type][2])
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else
+ pred_type = HYBRID_PREDICTION;
+
+ cpi->common.comp_pred_mode = pred_type;
+ encode_frame_internal(cpi);
+
+ single_diff = cpi->rd_single_diff / cpi->common.MBs;
+ cpi->rd_prediction_type_threshes[frame_type][0] += single_diff;
+ cpi->rd_prediction_type_threshes[frame_type][0] >>= 1;
+ comp_diff = cpi->rd_comp_diff / cpi->common.MBs;
+ cpi->rd_prediction_type_threshes[frame_type][1] += comp_diff;
+ cpi->rd_prediction_type_threshes[frame_type][1] >>= 1;
+ hybrid_diff = cpi->rd_hybrid_diff / cpi->common.MBs;
+ cpi->rd_prediction_type_threshes[frame_type][2] += hybrid_diff;
+ cpi->rd_prediction_type_threshes[frame_type][2] >>= 1;
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ int single_count_zero = 0;
+ int comp_count_zero = 0;
+ int i;
+
+ for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
+ single_count_zero += cpi->single_pred_count[i];
+ comp_count_zero += cpi->comp_pred_count[i];
+ }
+
+ if (comp_count_zero == 0) {
+ cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
+ } else if (single_count_zero == 0) {
+ cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
+ }
}
+ } else {
+ encode_frame_internal(cpi);
+ }
}
-void vp8_setup_block_ptrs(MACROBLOCK *x)
-{
- int r, c;
- int i;
+void vp8_setup_block_ptrs(MACROBLOCK *x) {
+ int r, c;
+ int i;
- for (r = 0; r < 4; r++)
- {
- for (c = 0; c < 4; c++)
- {
- x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
- }
+ for (r = 0; r < 4; r++) {
+ for (c = 0; c < 4; c++) {
+ x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
+ }
- for (r = 0; r < 2; r++)
- {
- for (c = 0; c < 2; c++)
- {
- x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
- }
+ for (r = 0; r < 2; r++) {
+ for (c = 0; c < 2; c++) {
+ x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
+ }
- for (r = 0; r < 2; r++)
- {
- for (c = 0; c < 2; c++)
- {
- x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
- }
+ for (r = 0; r < 2; r++) {
+ for (c = 0; c < 2; c++) {
+ x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
+ }
- x->block[24].src_diff = x->src_diff + 384;
+ x->block[24].src_diff = x->src_diff + 384;
- for (i = 0; i < 25; i++)
- {
- x->block[i].coeff = x->coeff + i * 16;
- }
+ for (i = 0; i < 25; i++) {
+ x->block[i].coeff = x->coeff + i * 16;
+ }
}
-void vp8_build_block_offsets(MACROBLOCK *x)
-{
- int block = 0;
- int br, bc;
-
- vp8_build_block_doffsets(&x->e_mbd);
-
- // y blocks
- x->thismb_ptr = &x->thismb[0];
- for (br = 0; br < 4; br++)
- {
- for (bc = 0; bc < 4; bc++)
- {
- BLOCK *this_block = &x->block[block];
- //this_block->base_src = &x->src.y_buffer;
- //this_block->src_stride = x->src.y_stride;
- //this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- this_block->base_src = &x->thismb_ptr;
- this_block->src_stride = 16;
- this_block->src = 4 * br * 16 + 4 * bc;
- ++block;
- }
+void vp8_build_block_offsets(MACROBLOCK *x) {
+ int block = 0;
+ int br, bc;
+
+ vp8_build_block_doffsets(&x->e_mbd);
+
+ // y blocks
+ x->thismb_ptr = &x->thismb[0];
+ for (br = 0; br < 4; br++) {
+ for (bc = 0; bc < 4; bc++) {
+ BLOCK *this_block = &x->block[block];
+ // this_block->base_src = &x->src.y_buffer;
+ // this_block->src_stride = x->src.y_stride;
+ // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
+ this_block->base_src = &x->thismb_ptr;
+ this_block->src_stride = 16;
+ this_block->src = 4 * br * 16 + 4 * bc;
+ ++block;
}
-
- // u blocks
- for (br = 0; br < 2; br++)
- {
- for (bc = 0; bc < 2; bc++)
- {
- BLOCK *this_block = &x->block[block];
- this_block->base_src = &x->src.u_buffer;
- this_block->src_stride = x->src.uv_stride;
- this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- ++block;
- }
+ }
+
+ // u blocks
+ for (br = 0; br < 2; br++) {
+ for (bc = 0; bc < 2; bc++) {
+ BLOCK *this_block = &x->block[block];
+ this_block->base_src = &x->src.u_buffer;
+ this_block->src_stride = x->src.uv_stride;
+ this_block->src = 4 * br * this_block->src_stride + 4 * bc;
+ ++block;
}
-
- // v blocks
- for (br = 0; br < 2; br++)
- {
- for (bc = 0; bc < 2; bc++)
- {
- BLOCK *this_block = &x->block[block];
- this_block->base_src = &x->src.v_buffer;
- this_block->src_stride = x->src.uv_stride;
- this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- ++block;
- }
+ }
+
+ // v blocks
+ for (br = 0; br < 2; br++) {
+ for (bc = 0; bc < 2; bc++) {
+ BLOCK *this_block = &x->block[block];
+ this_block->base_src = &x->src.v_buffer;
+ this_block->src_stride = x->src.uv_stride;
+ this_block->src = 4 * br * this_block->src_stride + 4 * bc;
+ ++block;
}
+ }
}
-static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
-{
- const MACROBLOCKD *xd = & x->e_mbd;
- const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
- const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
+static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
+ const MACROBLOCKD *xd = & x->e_mbd;
+ const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
+ const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
- const int is_key = cpi->common.frame_type == KEY_FRAME;
+ const int is_key = cpi->common.frame_type == KEY_FRAME;
- ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
- ++ uv_modes_y[m][uvm];
+ ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
+ ++ uv_modes_y[m][uvm];
- if (m == B_PRED)
- {
- unsigned int *const bct = is_key ? b_modes : inter_b_modes;
+ if (m == B_PRED) {
+ unsigned int *const bct = is_key ? b_modes : inter_b_modes;
- int b = 0;
+ int b = 0;
- do
- {
- ++ bct[xd->block[b].bmi.as_mode.first];
- }
- while (++b < 16);
- }
+ do {
+ ++ bct[xd->block[b].bmi.as_mode.first];
+ } while (++b < 16);
+ }
- if(m==I8X8_PRED)
- {
- i8x8_modes[xd->block[0].bmi.as_mode.first]++;
- i8x8_modes[xd->block[2].bmi.as_mode.first]++;
- i8x8_modes[xd->block[8].bmi.as_mode.first]++;
- i8x8_modes[xd->block[10].bmi.as_mode.first]++;
- }
+ if (m == I8X8_PRED) {
+ i8x8_modes[xd->block[0].bmi.as_mode.first]++;
+ i8x8_modes[xd->block[2].bmi.as_mode.first]++;
+ i8x8_modes[xd->block[8].bmi.as_mode.first]++;
+ i8x8_modes[xd->block[10].bmi.as_mode.first]++;
+ }
#endif
- ++cpi->ymode_count[m];
- if (m!=I8X8_PRED)
- ++cpi->y_uv_mode_count[m][uvm];
- else
- {
- cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
- cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
- cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
- cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
- }
- if (m == B_PRED)
- {
- int b = 0;
- do
- {
- ++ cpi->bmode_count[xd->block[b].bmi.as_mode.first];
- }
- while (++b < 16);
- }
+ ++cpi->ymode_count[m];
+ if (m != I8X8_PRED)
+ ++cpi->y_uv_mode_count[m][uvm];
+ else {
+ cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
+ cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
+ cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
+ cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
+ }
+ if (m == B_PRED) {
+ int b = 0;
+ do {
+ ++ cpi->bmode_count[xd->block[b].bmi.as_mode.first];
+ } while (++b < 16);
+ }
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
-static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
-{
+static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
#if USE_ACT_INDEX
- x->act_zbin_adj = *(x->mb_activity_ptr);
+ x->act_zbin_adj = *(x->mb_activity_ptr);
#else
- int64_t a;
- int64_t b;
- int64_t act = *(x->mb_activity_ptr);
-
- // Apply the masking to the RD multiplier.
- a = act + 4*cpi->activity_avg;
- b = 4*act + cpi->activity_avg;
-
- if ( act > cpi->activity_avg )
- x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
- else
- x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ // Apply the masking to the RD multiplier.
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (act > cpi->activity_avg)
+ x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
+ else
+ x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
#endif
}
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
- MACROBLOCK *x,
- TOKENEXTRA **t,
- int output_enabled)
-{
- if((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled)
- {
- adjust_act_zbin( cpi, x );
- vp8_update_zbin_extra(cpi, x);
- }
-
- /* test code: set transform size based on mode selection */
- if(cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != B_PRED)
- {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
- cpi->t8x8_count++;
- }
- else
- {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->t4x4_count ++;
- }
-
- if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
- {
- vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
- vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
- }
- else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
- vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
- else
- vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
-
- if(x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
- vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
-
- if (output_enabled)
- {
- // Tokenize
- sum_intra_stats(cpi, x);
- vp8_tokenize_mb(cpi, &x->e_mbd, t);
- }
+ MACROBLOCK *x,
+ TOKENEXTRA **t,
+ int output_enabled) {
+ if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
+ adjust_act_zbin(cpi, x);
+ vp8_update_zbin_extra(cpi, x);
+ }
+
+ /* test code: set transform size based on mode selection */
+ if (cpi->common.txfm_mode == ALLOW_8X8
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != B_PRED) {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ cpi->t8x8_count++;
+ } else {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->t4x4_count++;
+ }
+
+ if (x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED) {
+ vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
+ vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
+ } else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
+ vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
+ else
+ vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
+
+ if (x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
+ vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
+
+ if (output_enabled) {
+ // Tokenize
+ sum_intra_stats(cpi, x);
+ vp8_tokenize_mb(cpi, &x->e_mbd, t);
+ }
}
#ifdef SPEEDSTATS
extern int cnt_pm;
@@ -1667,201 +1553,174 @@ extern void vp8_fix_contexts(MACROBLOCKD *x);
void vp8cx_encode_inter_macroblock
(
- VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset,
- int output_enabled
-)
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *const xd = &x->e_mbd;
- unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
- int seg_ref_active;
- unsigned char ref_pred_flag;
-
- x->skip = 0;
-
- if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
- {
- // Adjust the zbin based on this MB rate.
- adjust_act_zbin( cpi, x );
+ VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
+ int recon_yoffset, int recon_uvoffset,
+ int output_enabled
+) {
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
+ int seg_ref_active;
+ unsigned char ref_pred_flag;
+
+ x->skip = 0;
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ // Adjust the zbin based on this MB rate.
+ adjust_act_zbin(cpi, x);
+ }
+
+ {
+ // Experimental code. Special case for gf and arf zeromv modes.
+ // Increase zbin size to suppress noise
+ cpi->zbin_mode_boost = 0;
+ if (cpi->zbin_mode_boost_enabled) {
+ if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
+ if (xd->mode_info_context->mbmi.mode == ZEROMV) {
+ if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
+ cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ else
+ cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ } else if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ cpi->zbin_mode_boost = 0;
+ else
+ cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
}
- {
- // Experimental code. Special case for gf and arf zeromv modes.
- // Increase zbin size to suppress noise
- cpi->zbin_mode_boost = 0;
- if (cpi->zbin_mode_boost_enabled)
- {
- if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
- {
- if (xd->mode_info_context->mbmi.mode == ZEROMV)
- {
- if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
- else
- cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- }
- else if (xd->mode_info_context->mbmi.mode == SPLITMV)
- cpi->zbin_mode_boost = 0;
- else
- cpi->zbin_mode_boost = MV_ZBIN_BOOST;
- }
- }
-
- vp8_update_zbin_extra(cpi, x);
+ vp8_update_zbin_extra(cpi, x);
+ }
+
+ seg_ref_active = segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
+
+ // SET VARIOUS PREDICTION FLAGS
+
+ // Did the chosen reference frame match its predicted value.
+ ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
+ get_pred_ref(cm, xd)));
+ set_pred_flag(xd, PRED_REF, ref_pred_flag);
+
+ /* test code: set transform size based on mode selection */
+ if (cpi->common.txfm_mode == ALLOW_8X8
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ cpi->t8x8_count++;
+ } else {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->t4x4_count++;
+ }
+
+ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ if (xd->mode_info_context->mbmi.mode == B_PRED) {
+ vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
+ vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
+ } else if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
+ vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
+ } else {
+ vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
+ vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
- seg_ref_active = segfeature_active( xd, *segment_id, SEG_LVL_REF_FRAME );
-
- // SET VARIOUS PREDICTION FLAGS
-
- // Did the chosen reference frame match its predicted value.
- ref_pred_flag = ( (xd->mode_info_context->mbmi.ref_frame ==
- get_pred_ref( cm, xd )) );
- set_pred_flag( xd, PRED_REF, ref_pred_flag );
-
- /* test code: set transform size based on mode selection */
- if( cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
- cpi->t8x8_count ++;
- }
+ if (output_enabled)
+ sum_intra_stats(cpi, x);
+ } else {
+ int ref_fb_idx;
+
+ if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
+ ref_fb_idx = cpi->common.lst_fb_idx;
+ else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
+ ref_fb_idx = cpi->common.gld_fb_idx;
else
- {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->t4x4_count++;
+ ref_fb_idx = cpi->common.alt_fb_idx;
+
+ xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
+ xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
+ xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
+
+ if (xd->mode_info_context->mbmi.second_ref_frame) {
+ int second_ref_fb_idx;
+
+ if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
+ second_ref_fb_idx = cpi->common.lst_fb_idx;
+ else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
+ second_ref_fb_idx = cpi->common.gld_fb_idx;
+ else
+ second_ref_fb_idx = cpi->common.alt_fb_idx;
+
+ xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
+ recon_yoffset;
+ xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
+ recon_uvoffset;
+ xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
+ recon_uvoffset;
}
- if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
- {
- if (xd->mode_info_context->mbmi.mode == B_PRED)
- {
- vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
- vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
- }
- else if(xd->mode_info_context->mbmi.mode == I8X8_PRED)
- {
- vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
- vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
- }
- else
- {
- vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
- vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
- }
+ if (!x->skip) {
+ vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
- if (output_enabled)
- sum_intra_stats(cpi, x);
- }
- else
- {
- int ref_fb_idx;
-
- if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
- ref_fb_idx = cpi->common.lst_fb_idx;
- else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
- ref_fb_idx = cpi->common.gld_fb_idx;
- else
- ref_fb_idx = cpi->common.alt_fb_idx;
-
- xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
- xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
- xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
-
- if (xd->mode_info_context->mbmi.second_ref_frame) {
- int second_ref_fb_idx;
-
- if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
- second_ref_fb_idx = cpi->common.lst_fb_idx;
- else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
- second_ref_fb_idx = cpi->common.gld_fb_idx;
- else
- second_ref_fb_idx = cpi->common.alt_fb_idx;
-
- xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
- recon_yoffset;
- xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
- recon_uvoffset;
- xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
- recon_uvoffset;
- }
+ // Clear mb_skip_coeff if mb_no_coeff_skip is not set
+ if (!cpi->common.mb_no_coeff_skip)
+ xd->mode_info_context->mbmi.mb_skip_coeff = 0;
- if (!x->skip)
- {
- vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
-
- // Clear mb_skip_coeff if mb_no_coeff_skip is not set
- if (!cpi->common.mb_no_coeff_skip)
- xd->mode_info_context->mbmi.mb_skip_coeff = 0;
-
- }
- else
- {
- vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
- xd->dst.u_buffer, xd->dst.v_buffer,
- xd->dst.y_stride, xd->dst.uv_stride);
- }
+ } else {
+ vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
+ xd->dst.u_buffer, xd->dst.v_buffer,
+ xd->dst.y_stride, xd->dst.uv_stride);
}
+ }
- if (!x->skip)
- {
+ if (!x->skip) {
#ifdef ENC_DEBUG
- if (enc_debug)
- {
- int i;
- printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
- for (i =0; i<400; i++) {
- printf("%3d ", xd->qcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("\n");
- printf("eobs = ");
- for (i=0;i<25;i++)
- printf("%d:%d ", i, xd->block[i].eob);
- printf("\n");
- fflush(stdout);
- }
+ if (enc_debug) {
+ int i;
+ printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", xd->qcoeff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("\n");
+ printf("eobs = ");
+ for (i = 0; i < 25; i++)
+ printf("%d:%d ", i, xd->block[i].eob);
+ printf("\n");
+ fflush(stdout);
+ }
#endif
- if (output_enabled)
- vp8_tokenize_mb(cpi, xd, t);
+ if (output_enabled)
+ vp8_tokenize_mb(cpi, xd, t);
#ifdef ENC_DEBUG
- if (enc_debug) {
- printf("Tokenized\n");
- fflush(stdout);
- }
-#endif
+ if (enc_debug) {
+ printf("Tokenized\n");
+ fflush(stdout);
}
- else
- {
+#endif
+ } else {
#if CONFIG_NEWENTROPY
- int mb_skip_context =
- cpi->common.mb_no_coeff_skip ?
- (x->e_mbd.mode_info_context-1)->mbmi.mb_skip_coeff +
- (x->e_mbd.mode_info_context-cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
- 0;
+ int mb_skip_context =
+ cpi->common.mb_no_coeff_skip ?
+ (x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
+ (x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
+ 0;
#endif
- if (cpi->common.mb_no_coeff_skip)
- {
- xd->mode_info_context->mbmi.mb_skip_coeff = 1;
+ if (cpi->common.mb_no_coeff_skip) {
+ xd->mode_info_context->mbmi.mb_skip_coeff = 1;
#if CONFIG_NEWENTROPY
- cpi->skip_true_count[mb_skip_context] ++;
+ cpi->skip_true_count[mb_skip_context]++;
#else
- cpi->skip_true_count ++;
+ cpi->skip_true_count++;
#endif
- vp8_fix_contexts(xd);
- }
- else
- {
- vp8_stuff_mb(cpi, xd, t);
- xd->mode_info_context->mbmi.mb_skip_coeff = 0;
+ vp8_fix_contexts(xd);
+ } else {
+ vp8_stuff_mb(cpi, xd, t);
+ xd->mode_info_context->mbmi.mb_skip_coeff = 0;
#if CONFIG_NEWENTROPY
- cpi->skip_false_count[mb_skip_context] ++;
+ cpi->skip_false_count[mb_skip_context]++;
#else
- cpi->skip_false_count ++;
+ cpi->skip_false_count++;
#endif
- }
}
+ }
}
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index bb7a1aa74..7d1453218 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -32,338 +32,308 @@ extern int enc_debug;
#define IF_RTCD(x) NULL
#endif
-int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred)
-{
+int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
- int i;
- int intra_pred_var = 0;
- (void) cpi;
+ int i;
+ int intra_pred_var = 0;
+ (void) cpi;
- if (use_16x16_pred)
- {
- x->e_mbd.mode_info_context->mbmi.mode = DC_PRED;
+ if (use_16x16_pred) {
+ x->e_mbd.mode_info_context->mbmi.mode = DC_PRED;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_mode = (MB_PREDICTION_MODE) (DC_PRED - 1);
+ x->e_mbd.mode_info_context->mbmi.second_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
#endif
- x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
- x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
-
- vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
- }
- else
- {
- for (i = 0; i < 16; i++)
- {
- x->e_mbd.block[i].bmi.as_mode.first = B_DC_PRED;
- vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
- }
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
+ } else {
+ for (i = 0; i < 16; i++) {
+ x->e_mbd.block[i].bmi.as_mode.first = B_DC_PRED;
+ vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
}
+ }
- intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
+ intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
- return intra_pred_var;
+ return intra_pred_var;
}
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
- MACROBLOCK *x, int ib)
-{
- BLOCKD *b = &x->e_mbd.block[ib];
- BLOCK *be = &x->block[ib];
+ MACROBLOCK *x, int ib) {
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
#if CONFIG_COMP_INTRA_PRED
- if (b->bmi.as_mode.second == (B_PREDICTION_MODE) (B_DC_PRED - 1))
- {
+ if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
- (b, b->bmi.as_mode.first, b->predictor);
+ (b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- RECON_INVOKE(&rtcd->common->recon, comp_intra4x4_predict)
- (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
- }
+ } else {
+ RECON_INVOKE(&rtcd->common->recon, comp_intra4x4_predict)
+ (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
+ }
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
+ x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
- RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
-void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
-{
- int i;
+void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
+ int i;
#if 0
- MACROBLOCKD *x = &mb->e_mbd;
- // Intra modes requiring top-right MB reconstructed data have been disabled
- vp8_intra_prediction_down_copy(x);
+ MACROBLOCKD *x = &mb->e_mbd;
+ // Intra modes requiring top-right MB reconstructed data have been disabled
+ vp8_intra_prediction_down_copy(x);
#endif
- for (i = 0; i < 16; i++)
- vp8_encode_intra4x4block(rtcd, mb, i);
- return;
+ for (i = 0; i < 16; i++)
+ vp8_encode_intra4x4block(rtcd, mb, i);
+ return;
}
-void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- BLOCK *b = &x->block[0];
+void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ BLOCK *b = &x->block[0];
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+ int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#if CONFIG_COMP_INTRA_PRED
- if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
+ if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
#endif
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
- else
- RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd);
+ else
+ RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd);
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
- if( tx_type == TX_8X8 )
- vp8_transform_intra_mby_8x8(x);
- else
- vp8_transform_intra_mby(x);
+ if (tx_type == TX_8X8)
+ vp8_transform_intra_mby_8x8(x);
+ else
+ vp8_transform_intra_mby(x);
- if(tx_type == TX_8X8)
- vp8_quantize_mby_8x8(x);
- else
- vp8_quantize_mby(x);
-
- if (x->optimize)
- {
- if( tx_type == TX_8X8 )
- vp8_optimize_mby_8x8(x, rtcd);
- else
- vp8_optimize_mby(x, rtcd);
- }
+ if (tx_type == TX_8X8)
+ vp8_quantize_mby_8x8(x);
+ else
+ vp8_quantize_mby(x);
- if(tx_type == TX_8X8)
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ if (x->optimize) {
+ if (tx_type == TX_8X8)
+ vp8_optimize_mby_8x8(x, rtcd);
else
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_optimize_mby(x, rtcd);
+ }
+
+ if (tx_type == TX_8X8)
+ vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ else
+ vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#ifdef ENC_DEBUG
- if (enc_debug) {
- int i;
- printf("Intra qcoeff:\n");
- printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.qcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("Intra dqcoeff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.dqcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("Intra diff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.diff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("Intra predictor:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.predictor[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("eobs:\n");
- for (i=0;i<25;i++)
- printf("%d ", x->e_mbd.block[i].eob);
- printf("\n");
+ if (enc_debug) {
+ int i;
+ printf("Intra qcoeff:\n");
+ printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.qcoeff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("Intra dqcoeff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.dqcoeff[i]);
+ if (i % 16 == 15) printf("\n");
}
+ printf("Intra diff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.diff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("Intra predictor:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.predictor[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("eobs:\n");
+ for (i = 0; i < 25; i++)
+ printf("%d ", x->e_mbd.block[i].eob);
+ printf("\n");
+ }
#endif
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mby)
+ (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
-void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#if CONFIG_COMP_INTRA_PRED
- if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
- {
+ if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd);
- }
+ } else {
+ RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd);
+ }
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- if(tx_type == TX_8X8)
- vp8_transform_mbuv_8x8(x);
- else
- vp8_transform_mbuv(x);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ if (tx_type == TX_8X8)
+ vp8_transform_mbuv_8x8(x);
+ else
+ vp8_transform_mbuv(x);
- if(tx_type == TX_8X8)
- vp8_quantize_mbuv_8x8(x);
- else
- vp8_quantize_mbuv(x);
+ if (tx_type == TX_8X8)
+ vp8_quantize_mbuv_8x8(x);
+ else
+ vp8_quantize_mbuv(x);
#ifdef ENC_DEBUG
- if (enc_debug) {
- int i;
- printf("vp8_encode_intra16x16mbuv\n");
- printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
- printf("qcoeff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.qcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("dqcoeff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.dqcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("diff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.diff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("predictor:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.predictor[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("eobs:\n");
- for (i=0;i<25;i++)
- printf("%d ", x->e_mbd.block[i].eob);
- printf("\n");
+ if (enc_debug) {
+ int i;
+ printf("vp8_encode_intra16x16mbuv\n");
+ printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
+ printf("qcoeff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.qcoeff[i]);
+ if (i % 16 == 15) printf("\n");
}
-#endif
- if (x->optimize)
- {
- if(tx_type == TX_8X8)
- vp8_optimize_mbuv_8x8(x, rtcd);
- else
- vp8_optimize_mbuv(x, rtcd);
+ printf("dqcoeff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.dqcoeff[i]);
+ if (i % 16 == 15) printf("\n");
}
-
- if(tx_type == TX_8X8)
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ printf("diff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.diff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("predictor:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.predictor[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("eobs:\n");
+ for (i = 0; i < 25; i++)
+ printf("%d ", x->e_mbd.block[i].eob);
+ printf("\n");
+ }
+#endif
+ if (x->optimize) {
+ if (tx_type == TX_8X8)
+ vp8_optimize_mbuv_8x8(x, rtcd);
else
- vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_optimize_mbuv(x, rtcd);
+ }
+
+ if (tx_type == TX_8X8)
+ vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ else
+ vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
- MACROBLOCK *x, int ib)
-{
- BLOCKD *b = &x->e_mbd.block[ib];
- BLOCK *be = &x->block[ib];
- const int iblock[4]={0,1,4,5};
- int i;
+ MACROBLOCK *x, int ib) {
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
+ const int iblock[4] = {0, 1, 4, 5};
+ int i;
#if CONFIG_COMP_INTRA_PRED
- if (b->bmi.as_mode.second == (MB_PREDICTION_MODE) (DC_PRED - 1))
- {
+ if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&rtcd->common->recon, intra8x8_predict)
- (b, b->bmi.as_mode.first, b->predictor);
+ (b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- RECON_INVOKE(&rtcd->common->recon, comp_intra8x8_predict)
- (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
- }
+ } else {
+ RECON_INVOKE(&rtcd->common->recon, comp_intra8x8_predict)
+ (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
+ }
#endif
- for(i=0;i<4;i++)
- {
- b = &x->e_mbd.block[ib + iblock[i]];
- be = &x->block[ib + iblock[i]];
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
- RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor,
- b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- }
+ for (i = 0; i < 4; i++) {
+ b = &x->e_mbd.block[ib + iblock[i]];
+ be = &x->block[ib + iblock[i]];
+ ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b(be, b);
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
+ RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor,
+ b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ }
}
extern const int vp8_i8x8_block[4];
-void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- int i, ib;
-
- for(i=0;i<4;i++)
- {
- ib = vp8_i8x8_block[i];
- vp8_encode_intra8x8(rtcd, x, ib);
- }
+void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ int i, ib;
+
+ for (i = 0; i < 4; i++) {
+ ib = vp8_i8x8_block[i];
+ vp8_encode_intra8x8(rtcd, x, ib);
+ }
}
void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
- MACROBLOCK *x, int ib,
- int mode, int second)
-{
- BLOCKD *b = &x->e_mbd.block[ib];
- BLOCK *be = &x->block[ib];
+ MACROBLOCK *x, int ib,
+ int mode, int second) {
+ BLOCKD *b = &x->e_mbd.block[ib];
+ BLOCK *be = &x->block[ib];
#if CONFIG_COMP_INTRA_PRED
- if (second == -1)
- {
+ if (second == -1) {
#endif
RECON_INVOKE(&rtcd->common->recon, intra_uv4x4_predict)
- (b, mode, b->predictor);
+ (b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- RECON_INVOKE(&rtcd->common->recon, comp_intra_uv4x4_predict)
- (b, mode, second, b->predictor);
- }
+ } else {
+ RECON_INVOKE(&rtcd->common->recon, comp_intra_uv4x4_predict)
+ (b, mode, second, b->predictor);
+ }
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
+ ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
- x->quantize_b(be, b);
+ x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16);
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16);
- RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
- b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
+ b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
-void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- int i, ib, mode, second;
- BLOCKD *b;
- for(i=0;i<4;i++)
- {
- ib = vp8_i8x8_block[i];
- b = &x->e_mbd.block[ib];
- mode = b->bmi.as_mode.first;
+void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ int i, ib, mode, second;
+ BLOCKD *b;
+ for (i = 0; i < 4; i++) {
+ ib = vp8_i8x8_block[i];
+ b = &x->e_mbd.block[ib];
+ mode = b->bmi.as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- second = b->bmi.as_mode.second;
+ second = b->bmi.as_mode.second;
#else
- second = -1;
+ second = -1;
#endif
- /*u */
- vp8_encode_intra_uv4x4(rtcd, x, i+16, mode, second);
- /*v */
- vp8_encode_intra_uv4x4(rtcd, x, i+20, mode, second);
- }
+ /*u */
+ vp8_encode_intra_uv4x4(rtcd, x, i + 16, mode, second);
+ /*v */
+ vp8_encode_intra_uv4x4(rtcd, x, i + 20, mode, second);
+ }
}
diff --git a/vp8/encoder/encodeintra.h b/vp8/encoder/encodeintra.h
index ae822d3d9..7d122938f 100644
--- a/vp8/encoder/encodeintra.h
+++ b/vp8/encoder/encodeintra.h
@@ -22,6 +22,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
- MACROBLOCK *x, int ib);
+ MACROBLOCK *x, int ib);
#endif
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index dcf479f61..fac2adddf 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -32,309 +32,267 @@
extern int enc_debug;
#endif
-void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch)
-{
- unsigned char *src_ptr = (*(be->base_src) + be->src);
- short *diff_ptr = be->src_diff;
- unsigned char *pred_ptr = bd->predictor;
- int src_stride = be->src_stride;
-
- int r, c;
-
- for (r = 0; r < 4; r++)
- {
- for (c = 0; c < 4; c++)
- {
- diff_ptr[c] = src_ptr[c] - pred_ptr[c];
- }
+void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *src_ptr = (*(be->base_src) + be->src);
+ short *diff_ptr = be->src_diff;
+ unsigned char *pred_ptr = bd->predictor;
+ int src_stride = be->src_stride;
+
+ int r, c;
- diff_ptr += pitch;
- pred_ptr += pitch;
- src_ptr += src_stride;
+ for (r = 0; r < 4; r++) {
+ for (c = 0; c < 4; c++) {
+ diff_ptr[c] = src_ptr[c] - pred_ptr[c];
}
+
+ diff_ptr += pitch;
+ pred_ptr += pitch;
+ src_ptr += src_stride;
+ }
}
-void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch)
-{
- unsigned char *src_ptr = (*(be->base_src) + be->src);
- short *diff_ptr = be->src_diff;
- unsigned char *pred_ptr = bd->predictor;
- int src_stride = be->src_stride;
- int r, c;
- for (r = 0; r < 8; r++)
- {
- for (c = 0; c < 8; c++)
- {
- diff_ptr[c] = src_ptr[c] - pred_ptr[c];
- }
- diff_ptr += pitch;
- pred_ptr += pitch;
- src_ptr += src_stride;
+void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *src_ptr = (*(be->base_src) + be->src);
+ short *diff_ptr = be->src_diff;
+ unsigned char *pred_ptr = bd->predictor;
+ int src_stride = be->src_stride;
+ int r, c;
+ for (r = 0; r < 8; r++) {
+ for (c = 0; c < 8; c++) {
+ diff_ptr[c] = src_ptr[c] - pred_ptr[c];
}
+ diff_ptr += pitch;
+ pred_ptr += pitch;
+ src_ptr += src_stride;
+ }
}
-void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
-{
- short *udiff = diff + 256;
- short *vdiff = diff + 320;
- unsigned char *upred = pred + 256;
- unsigned char *vpred = pred + 320;
+void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride) {
+ short *udiff = diff + 256;
+ short *vdiff = diff + 320;
+ unsigned char *upred = pred + 256;
+ unsigned char *vpred = pred + 320;
- int r, c;
+ int r, c;
- for (r = 0; r < 8; r++)
- {
- for (c = 0; c < 8; c++)
- {
- udiff[c] = usrc[c] - upred[c];
- }
-
- udiff += 8;
- upred += 8;
- usrc += stride;
+ for (r = 0; r < 8; r++) {
+ for (c = 0; c < 8; c++) {
+ udiff[c] = usrc[c] - upred[c];
}
- for (r = 0; r < 8; r++)
- {
- for (c = 0; c < 8; c++)
- {
- vdiff[c] = vsrc[c] - vpred[c];
- }
+ udiff += 8;
+ upred += 8;
+ usrc += stride;
+ }
- vdiff += 8;
- vpred += 8;
- vsrc += stride;
+ for (r = 0; r < 8; r++) {
+ for (c = 0; c < 8; c++) {
+ vdiff[c] = vsrc[c] - vpred[c];
}
-}
-void vp8_subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride)
-{
- int r, c;
+ vdiff += 8;
+ vpred += 8;
+ vsrc += stride;
+ }
+}
- for (r = 0; r < 16; r++)
- {
- for (c = 0; c < 16; c++)
- {
- diff[c] = src[c] - pred[c];
- }
+void vp8_subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride) {
+ int r, c;
- diff += 16;
- pred += 16;
- src += stride;
+ for (r = 0; r < 16; r++) {
+ for (c = 0; c < 16; c++) {
+ diff[c] = src[c] - pred[c];
}
+
+ diff += 16;
+ pred += 16;
+ src += stride;
+ }
}
-static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- BLOCK *b = &x->block[0];
+static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ BLOCK *b = &x->block[0];
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
}
-static void build_dcblock(MACROBLOCK *x)
-{
- short *src_diff_ptr = &x->src_diff[384];
- int i;
+static void build_dcblock(MACROBLOCK *x) {
+ short *src_diff_ptr = &x->src_diff[384];
+ int i;
- for (i = 0; i < 16; i++)
- {
- src_diff_ptr[i] = x->coeff[i * 16];
- }
+ for (i = 0; i < 16; i++) {
+ src_diff_ptr[i] = x->coeff[i * 16];
+ }
}
-void vp8_build_dcblock_8x8(MACROBLOCK *x)
-{
- short *src_diff_ptr = &x->src_diff[384];
- int i;
- for (i = 0; i < 16; i++)
- {
- src_diff_ptr[i] = 0;
- }
- src_diff_ptr[0] = x->coeff[0 * 16];
- src_diff_ptr[1] = x->coeff[4 * 16];
- src_diff_ptr[4] = x->coeff[8 * 16];
- src_diff_ptr[8] = x->coeff[12 * 16];
+void vp8_build_dcblock_8x8(MACROBLOCK *x) {
+ short *src_diff_ptr = &x->src_diff[384];
+ int i;
+ for (i = 0; i < 16; i++) {
+ src_diff_ptr[i] = 0;
+ }
+ src_diff_ptr[0] = x->coeff[0 * 16];
+ src_diff_ptr[1] = x->coeff[4 * 16];
+ src_diff_ptr[4] = x->coeff[8 * 16];
+ src_diff_ptr[8] = x->coeff[12 * 16];
}
-void vp8_transform_mbuv(MACROBLOCK *x)
-{
- int i;
+void vp8_transform_mbuv(MACROBLOCK *x) {
+ int i;
- for (i = 16; i < 24; i += 2)
- {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
+ for (i = 16; i < 24; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 16);
+ }
}
-void vp8_transform_intra_mby(MACROBLOCK *x)
-{
- int i;
+void vp8_transform_intra_mby(MACROBLOCK *x) {
+ int i;
- for (i = 0; i < 16; i += 2)
- {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
+ for (i = 0; i < 16; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 32);
+ }
- // build dc block from 16 y dc values
- build_dcblock(x);
+ // build dc block from 16 y dc values
+ build_dcblock(x);
- // do 2nd order transform on the dc block
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
+ // do 2nd order transform on the dc block
+ x->short_walsh4x4(&x->block[24].src_diff[0],
+ &x->block[24].coeff[0], 8);
}
-static void transform_mb(MACROBLOCK *x)
-{
- int i;
+static void transform_mb(MACROBLOCK *x) {
+ int i;
- for (i = 0; i < 16; i += 2)
- {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
+ for (i = 0; i < 16; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 32);
+ }
- // build dc block from 16 y dc values
- if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- build_dcblock(x);
+ // build dc block from 16 y dc values
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ build_dcblock(x);
- for (i = 16; i < 24; i += 2)
- {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
+ for (i = 16; i < 24; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 16);
+ }
- // do 2nd order transform on the dc block
- if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
+ // do 2nd order transform on the dc block
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ x->short_walsh4x4(&x->block[24].src_diff[0],
+ &x->block[24].coeff[0], 8);
}
-static void transform_mby(MACROBLOCK *x)
-{
- int i;
+static void transform_mby(MACROBLOCK *x) {
+ int i;
- for (i = 0; i < 16; i += 2)
- {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
+ for (i = 0; i < 16; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 32);
+ }
- // build dc block from 16 y dc values
- if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- {
- build_dcblock(x);
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
- }
+ // build dc block from 16 y dc values
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
+ build_dcblock(x);
+ x->short_walsh4x4(&x->block[24].src_diff[0],
+ &x->block[24].coeff[0], 8);
+ }
}
-void vp8_transform_mbuv_8x8(MACROBLOCK *x)
-{
- int i;
+void vp8_transform_mbuv_8x8(MACROBLOCK *x) {
+ int i;
#if !CONFIG_INT_8X8FDCT
- vp8_clear_system_state();
+ vp8_clear_system_state();
#endif
- for (i = 16; i < 24; i += 4)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
+ for (i = 16; i < 24; i += 4) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 16);
+ }
}
-void vp8_transform_intra_mby_8x8(MACROBLOCK *x)//changed
-{
- int i;
+void vp8_transform_intra_mby_8x8(MACROBLOCK *x) { // changed
+ int i;
#if !CONFIG_INT_8X8FDCT
- vp8_clear_system_state();
+ vp8_clear_system_state();
#endif
- for (i = 0; i < 9; i += 8)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i+2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- vp8_build_dcblock_8x8(x);
- //vp8_build_dcblock(x);
-
- // do 2nd order transform on the dc block
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
+ for (i = 0; i < 9; i += 8) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 32);
+ }
+ for (i = 2; i < 11; i += 8) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i + 2].coeff[0], 32);
+ }
+ // build dc block from 16 y dc values
+ vp8_build_dcblock_8x8(x);
+ // vp8_build_dcblock(x);
+
+ // do 2nd order transform on the dc block
+ x->short_fhaar2x2(&x->block[24].src_diff[0],
+ &x->block[24].coeff[0], 8);
}
-void vp8_transform_mb_8x8(MACROBLOCK *x)
-{
- int i;
+void vp8_transform_mb_8x8(MACROBLOCK *x) {
+ int i;
#if !CONFIG_INT_8X8FDCT
- vp8_clear_system_state();
+ vp8_clear_system_state();
#endif
- for (i = 0; i < 9; i += 8)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i+2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- vp8_build_dcblock_8x8(x);
- //vp8_build_dcblock(x);
-
- for (i = 16; i < 24; i += 4)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
+ for (i = 0; i < 9; i += 8) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 32);
+ }
+ for (i = 2; i < 11; i += 8) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i + 2].coeff[0], 32);
+ }
+ // build dc block from 16 y dc values
+ if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ vp8_build_dcblock_8x8(x);
+ // vp8_build_dcblock(x);
+
+ for (i = 16; i < 24; i += 4) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 16);
+ }
- // do 2nd order transform on the dc block
- if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
+ // do 2nd order transform on the dc block
+ if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ x->short_fhaar2x2(&x->block[24].src_diff[0],
+ &x->block[24].coeff[0], 8);
}
-void vp8_transform_mby_8x8(MACROBLOCK *x)
-{
- int i;
+void vp8_transform_mby_8x8(MACROBLOCK *x) {
+ int i;
#if !CONFIG_INT_8X8FDCT
- vp8_clear_system_state();
+ vp8_clear_system_state();
#endif
- for (i = 0; i < 9; i += 8)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8)
- {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i+2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- {
- //vp8_build_dcblock(x);
- vp8_build_dcblock_8x8(x);
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
- }
+ for (i = 0; i < 9; i += 8) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i].coeff[0], 32);
+ }
+ for (i = 2; i < 11; i += 8) {
+ x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ &x->block[i + 2].coeff[0], 32);
+ }
+ // build dc block from 16 y dc values
+ if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
+ // vp8_build_dcblock(x);
+ vp8_build_dcblock_8x8(x);
+ x->short_fhaar2x2(&x->block[24].src_diff[0],
+ &x->block[24].coeff[0], 8);
+ }
}
@@ -342,7 +300,7 @@ void vp8_transform_mby_8x8(MACROBLOCK *x)
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
typedef struct vp8_token_state vp8_token_state;
-struct vp8_token_state{
+struct vp8_token_state {
int rate;
int error;
signed char next;
@@ -355,895 +313,829 @@ struct vp8_token_state{
#define UV_RD_MULT 2
#define Y2_RD_MULT 4
-static const int plane_rd_mult[4]=
-{
- Y1_RD_MULT,
- Y2_RD_MULT,
- UV_RD_MULT,
- Y1_RD_MULT
+static const int plane_rd_mult[4] = {
+ Y1_RD_MULT,
+ Y2_RD_MULT,
+ UV_RD_MULT,
+ Y1_RD_MULT
};
static void optimize_b(MACROBLOCK *mb, int ib, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- const VP8_ENCODER_RTCD *rtcd)
-{
- BLOCK *b;
- BLOCKD *d;
- vp8_token_state tokens[17][2];
- unsigned best_mask[2];
- const short *dequant_ptr;
- const short *coeff_ptr;
- short *qcoeff_ptr;
- short *dqcoeff_ptr;
- int eob;
- int i0;
- int rc;
- int x;
- int sz = 0;
- int next;
- int rdmult;
- int rddiv;
- int final_eob;
- int rd_cost0;
- int rd_cost1;
- int rate0;
- int rate1;
- int error0;
- int error1;
- int t0;
- int t1;
- int best;
- int band;
- int pt;
- int i;
- int err_mult = plane_rd_mult[type];
-
- b = &mb->block[ib];
- d = &mb->e_mbd.block[ib];
-
- dequant_ptr = d->dequant;
- coeff_ptr = b->coeff;
- qcoeff_ptr = d->qcoeff;
- dqcoeff_ptr = d->dqcoeff;
- i0 = !type;
- eob = d->eob;
-
- /* Now set up a Viterbi trellis to evaluate alternative roundings. */
- rdmult = mb->rdmult * err_mult;
- if(mb->e_mbd.mode_info_context->mbmi.ref_frame==INTRA_FRAME)
- rdmult = (rdmult * 9)>>4;
-
- rddiv = mb->rddiv;
- best_mask[0] = best_mask[1] = 0;
- /* Initialize the sentinel node of the trellis. */
- tokens[eob][0].rate = 0;
- tokens[eob][0].error = 0;
- tokens[eob][0].next = 16;
- tokens[eob][0].token = DCT_EOB_TOKEN;
- tokens[eob][0].qc = 0;
- *(tokens[eob] + 1) = *(tokens[eob] + 0);
- next = eob;
- for (i = eob; i-- > i0;)
- {
- int base_bits;
- int d2;
- int dx;
-
- rc = vp8_default_zig_zag1d[i];
- x = qcoeff_ptr[rc];
- /* Only add a trellis state for non-zero coefficients. */
- if (x)
- {
- int shortcut=0;
- error0 = tokens[next][0].error;
- error1 = tokens[next][1].error;
- /* Evaluate the first possibility for this state. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
- t0 = (vp8_dct_value_tokens_ptr + x)->Token;
- /* Consider both possible successor states. */
- if (next < 16)
- {
- band = vp8_coef_bands[i + 1];
- pt = vp8_prev_token_class[t0];
- rate0 +=
- mb->token_costs[type][band][pt][tokens[next][0].token];
- rate1 +=
- mb->token_costs[type][band][pt][tokens[next][1].token];
- }
- rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
- if (rd_cost0 == rd_cost1)
- {
- rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
- }
- /* And pick the best. */
- best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
- dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
- d2 = dx*dx;
- tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
- tokens[i][0].error = d2 + (best ? error1 : error0);
- tokens[i][0].next = next;
- tokens[i][0].token = t0;
- tokens[i][0].qc = x;
- best_mask[0] |= best << i;
- /* Evaluate the second possibility for this state. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
-
- if((abs(x)*dequant_ptr[rc]>abs(coeff_ptr[rc])) &&
- (abs(x)*dequant_ptr[rc]<abs(coeff_ptr[rc])+dequant_ptr[rc]))
- shortcut = 1;
- else
- shortcut = 0;
-
- if(shortcut)
- {
- sz = -(x < 0);
- x -= 2*sz + 1;
- }
-
- /* Consider both possible successor states. */
- if (!x)
- {
- /* If we reduced this coefficient to zero, check to see if
- * we need to move the EOB back here.
- */
- t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
- DCT_EOB_TOKEN : ZERO_TOKEN;
- t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
- DCT_EOB_TOKEN : ZERO_TOKEN;
- }
- else
- {
- t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token;
- }
- if (next < 16)
- {
- band = vp8_coef_bands[i + 1];
- if(t0!=DCT_EOB_TOKEN)
- {
- pt = vp8_prev_token_class[t0];
- rate0 += mb->token_costs[type][band][pt][
- tokens[next][0].token];
- }
- if(t1!=DCT_EOB_TOKEN)
- {
- pt = vp8_prev_token_class[t1];
- rate1 += mb->token_costs[type][band][pt][
- tokens[next][1].token];
- }
- }
-
- rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
- if (rd_cost0 == rd_cost1)
- {
- rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
- }
- /* And pick the best. */
- best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
-
- if(shortcut)
- {
- dx -= (dequant_ptr[rc] + sz) ^ sz;
- d2 = dx*dx;
- }
- tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
- tokens[i][1].error = d2 + (best ? error1 : error0);
- tokens[i][1].next = next;
- tokens[i][1].token =best?t1:t0;
- tokens[i][1].qc = x;
- best_mask[1] |= best << i;
- /* Finally, make this the new head of the trellis. */
- next = i;
- }
- /* There's no choice to make for a zero coefficient, so we don't
- * add a new trellis node, but we do need to update the costs.
+ const VP8_ENCODER_RTCD *rtcd) {
+ BLOCK *b;
+ BLOCKD *d;
+ vp8_token_state tokens[17][2];
+ unsigned best_mask[2];
+ const short *dequant_ptr;
+ const short *coeff_ptr;
+ short *qcoeff_ptr;
+ short *dqcoeff_ptr;
+ int eob;
+ int i0;
+ int rc;
+ int x;
+ int sz = 0;
+ int next;
+ int rdmult;
+ int rddiv;
+ int final_eob;
+ int rd_cost0;
+ int rd_cost1;
+ int rate0;
+ int rate1;
+ int error0;
+ int error1;
+ int t0;
+ int t1;
+ int best;
+ int band;
+ int pt;
+ int i;
+ int err_mult = plane_rd_mult[type];
+
+ b = &mb->block[ib];
+ d = &mb->e_mbd.block[ib];
+
+ dequant_ptr = d->dequant;
+ coeff_ptr = b->coeff;
+ qcoeff_ptr = d->qcoeff;
+ dqcoeff_ptr = d->dqcoeff;
+ i0 = !type;
+ eob = d->eob;
+
+ /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+ rdmult = mb->rdmult * err_mult;
+ if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
+ rdmult = (rdmult * 9) >> 4;
+
+ rddiv = mb->rddiv;
+ best_mask[0] = best_mask[1] = 0;
+ /* Initialize the sentinel node of the trellis. */
+ tokens[eob][0].rate = 0;
+ tokens[eob][0].error = 0;
+ tokens[eob][0].next = 16;
+ tokens[eob][0].token = DCT_EOB_TOKEN;
+ tokens[eob][0].qc = 0;
+ *(tokens[eob] + 1) = *(tokens[eob] + 0);
+ next = eob;
+ for (i = eob; i-- > i0;) {
+ int base_bits;
+ int d2;
+ int dx;
+
+ rc = vp8_default_zig_zag1d[i];
+ x = qcoeff_ptr[rc];
+ /* Only add a trellis state for non-zero coefficients. */
+ if (x) {
+ int shortcut = 0;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ /* Evaluate the first possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ /* Consider both possible successor states. */
+ if (next < 16) {
+ band = vp8_coef_bands[i + 1];
+ pt = vp8_prev_token_class[t0];
+ rate0 +=
+ mb->token_costs[type][band][pt][tokens[next][0].token];
+ rate1 +=
+ mb->token_costs[type][band][pt][tokens[next][1].token];
+ }
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
+ }
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp8_dct_value_cost_ptr + x);
+ dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
+ d2 = dx * dx;
+ tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][0].error = d2 + (best ? error1 : error0);
+ tokens[i][0].next = next;
+ tokens[i][0].token = t0;
+ tokens[i][0].qc = x;
+ best_mask[0] |= best << i;
+ /* Evaluate the second possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+
+ if ((abs(x)*dequant_ptr[rc] > abs(coeff_ptr[rc])) &&
+ (abs(x)*dequant_ptr[rc] < abs(coeff_ptr[rc]) + dequant_ptr[rc]))
+ shortcut = 1;
+ else
+ shortcut = 0;
+
+ if (shortcut) {
+ sz = -(x < 0);
+ x -= 2 * sz + 1;
+ }
+
+ /* Consider both possible successor states. */
+ if (!x) {
+ /* If we reduced this coefficient to zero, check to see if
+ * we need to move the EOB back here.
*/
- else
- {
- band = vp8_coef_bands[i + 1];
- t0 = tokens[next][0].token;
- t1 = tokens[next][1].token;
- /* Update the cost of each path if we're past the EOB token. */
- if (t0 != DCT_EOB_TOKEN)
- {
- tokens[next][0].rate += mb->token_costs[type][band][0][t0];
- tokens[next][0].token = ZERO_TOKEN;
- }
- if (t1 != DCT_EOB_TOKEN)
- {
- tokens[next][1].rate += mb->token_costs[type][band][0][t1];
- tokens[next][1].token = ZERO_TOKEN;
- }
- /* Don't update next, because we didn't add a new node. */
+ t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ } else {
+ t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ }
+ if (next < 16) {
+ band = vp8_coef_bands[i + 1];
+ if (t0 != DCT_EOB_TOKEN) {
+ pt = vp8_prev_token_class[t0];
+ rate0 += mb->token_costs[type][band][pt][
+ tokens[next][0].token];
}
- }
+ if (t1 != DCT_EOB_TOKEN) {
+ pt = vp8_prev_token_class[t1];
+ rate1 += mb->token_costs[type][band][pt][
+ tokens[next][1].token];
+ }
+ }
- /* Now pick the best path through the whole trellis. */
- band = vp8_coef_bands[i + 1];
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
- error0 = tokens[next][0].error;
- error1 = tokens[next][1].error;
- t0 = tokens[next][0].token;
- t1 = tokens[next][1].token;
- rate0 += mb->token_costs[type][band][pt][t0];
- rate1 += mb->token_costs[type][band][pt][t1];
- rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
- if (rd_cost0 == rd_cost1)
- {
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
+ }
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp8_dct_value_cost_ptr + x);
+
+ if (shortcut) {
+ dx -= (dequant_ptr[rc] + sz) ^ sz;
+ d2 = dx * dx;
+ }
+ tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][1].error = d2 + (best ? error1 : error0);
+ tokens[i][1].next = next;
+ tokens[i][1].token = best ? t1 : t0;
+ tokens[i][1].qc = x;
+ best_mask[1] |= best << i;
+ /* Finally, make this the new head of the trellis. */
+ next = i;
}
- best = rd_cost1 < rd_cost0;
- final_eob = i0 - 1;
- for (i = next; i < eob; i = next)
- {
- x = tokens[i][best].qc;
- if (x)
- final_eob = i;
- rc = vp8_default_zig_zag1d[i];
- qcoeff_ptr[rc] = x;
- dqcoeff_ptr[rc] = x * dequant_ptr[rc];
- next = tokens[i][best].next;
- best = (best_mask[best] >> i) & 1;
+ /* There's no choice to make for a zero coefficient, so we don't
+ * add a new trellis node, but we do need to update the costs.
+ */
+ else {
+ band = vp8_coef_bands[i + 1];
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ /* Update the cost of each path if we're past the EOB token. */
+ if (t0 != DCT_EOB_TOKEN) {
+ tokens[next][0].rate += mb->token_costs[type][band][0][t0];
+ tokens[next][0].token = ZERO_TOKEN;
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ tokens[next][1].rate += mb->token_costs[type][band][0][t1];
+ tokens[next][1].token = ZERO_TOKEN;
+ }
+ /* Don't update next, because we didn't add a new node. */
}
- final_eob++;
-
- d->eob = final_eob;
- *a = *l = (d->eob != !type);
+ }
+
+ /* Now pick the best path through the whole trellis. */
+ band = vp8_coef_bands[i + 1];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ rate0 += mb->token_costs[type][band][pt][t0];
+ rate1 += mb->token_costs[type][band][pt][t1];
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);
+ }
+ best = rd_cost1 < rd_cost0;
+ final_eob = i0 - 1;
+ for (i = next; i < eob; i = next) {
+ x = tokens[i][best].qc;
+ if (x)
+ final_eob = i;
+ rc = vp8_default_zig_zag1d[i];
+ qcoeff_ptr[rc] = x;
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc];
+ next = tokens[i][best].next;
+ best = (best_mask[best] >> i) & 1;
+ }
+ final_eob++;
+
+ d->eob = final_eob;
+ *a = *l = (d->eob != !type);
}
- /**************************************************************************
- our inverse hadamard transform effectively is weighted sum of all 16 inputs
- with weight either 1 or -1. It has a last stage scaling of (sum+1)>>2. And
- dc only idct is (dc+16)>>5. So if all the sums are between -65 and 63 the
- output after inverse wht and idct will be all zero. A sum of absolute value
- smaller than 65 guarantees all 16 different (+1/-1) weighted sums in wht
- fall between -65 and +65.
- **************************************************************************/
+/**************************************************************************
+our inverse hadamard transform effectively is weighted sum of all 16 inputs
+with weight either 1 or -1. It has a last stage scaling of (sum+1)>>2. And
+dc only idct is (dc+16)>>5. So if all the sums are between -65 and 63 the
+output after inverse wht and idct will be all zero. A sum of absolute value
+smaller than 65 guarantees all 16 different (+1/-1) weighted sums in wht
+fall between -65 and +65.
+**************************************************************************/
#define SUM_2ND_COEFF_THRESH 65
static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
-{
- int sum=0;
- int i;
- BLOCKD *bd = &x->block[24];
- if(bd->dequant[0]>=SUM_2ND_COEFF_THRESH
- && bd->dequant[1]>=SUM_2ND_COEFF_THRESH)
- return;
-
- for(i=0;i<bd->eob;i++)
- {
- int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
- sum+= (coef>=0)?coef:-coef;
- if(sum>=SUM_2ND_COEFF_THRESH)
- return;
- }
-
- if(sum < SUM_2ND_COEFF_THRESH)
- {
- for(i=0;i<bd->eob;i++)
- {
- int rc = vp8_default_zig_zag1d[i];
- bd->qcoeff[rc]=0;
- bd->dqcoeff[rc]=0;
- }
- bd->eob = 0;
- *a = *l = (bd->eob != !type);
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
+ int sum = 0;
+ int i;
+ BLOCKD *bd = &x->block[24];
+ if (bd->dequant[0] >= SUM_2ND_COEFF_THRESH
+ && bd->dequant[1] >= SUM_2ND_COEFF_THRESH)
+ return;
+
+ for (i = 0; i < bd->eob; i++) {
+ int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
+ sum += (coef >= 0) ? coef : -coef;
+ if (sum >= SUM_2ND_COEFF_THRESH)
+ return;
+ }
+
+ if (sum < SUM_2ND_COEFF_THRESH) {
+ for (i = 0; i < bd->eob; i++) {
+ int rc = vp8_default_zig_zag1d[i];
+ bd->qcoeff[rc] = 0;
+ bd->dqcoeff[rc] = 0;
}
+ bd->eob = 0;
+ *a = *l = (bd->eob != !type);
+ }
}
#define SUM_2ND_COEFF_THRESH_8X8 32
static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *x, int type,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
-{
- int sum=0;
- BLOCKD *bd = &x->block[24];
- int coef;
-
- coef = bd->dqcoeff[0];
- sum+= (coef>=0)?coef:-coef;
- coef = bd->dqcoeff[1];
- sum+= (coef>=0)?coef:-coef;
- coef = bd->dqcoeff[4];
- sum+= (coef>=0)?coef:-coef;
- coef = bd->dqcoeff[8];
- sum+= (coef>=0)?coef:-coef;
-
- if(sum < SUM_2ND_COEFF_THRESH_8X8)
- {
- bd->qcoeff[0] = 0;
- bd->dqcoeff[0] = 0;
- bd->qcoeff[1] = 0;
- bd->dqcoeff[1] = 0;
- bd->qcoeff[4] = 0;
- bd->dqcoeff[4] = 0;
- bd->qcoeff[8] = 0;
- bd->dqcoeff[8] = 0;
- bd->eob = 0;
- *a = *l = (bd->eob != !type);
- }
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
+ int sum = 0;
+ BLOCKD *bd = &x->block[24];
+ int coef;
+
+ coef = bd->dqcoeff[0];
+ sum += (coef >= 0) ? coef : -coef;
+ coef = bd->dqcoeff[1];
+ sum += (coef >= 0) ? coef : -coef;
+ coef = bd->dqcoeff[4];
+ sum += (coef >= 0) ? coef : -coef;
+ coef = bd->dqcoeff[8];
+ sum += (coef >= 0) ? coef : -coef;
+
+ if (sum < SUM_2ND_COEFF_THRESH_8X8) {
+ bd->qcoeff[0] = 0;
+ bd->dqcoeff[0] = 0;
+ bd->qcoeff[1] = 0;
+ bd->dqcoeff[1] = 0;
+ bd->qcoeff[4] = 0;
+ bd->dqcoeff[4] = 0;
+ bd->qcoeff[8] = 0;
+ bd->dqcoeff[8] = 0;
+ bd->eob = 0;
+ *a = *l = (bd->eob != !type);
+ }
}
-static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- int type;
- int has_2nd_order;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ int type;
+ int has_2nd_order;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- &&x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
+ has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
- for (b = 0; b < 16; b++)
- {
- optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
- }
+ for (b = 0; b < 16; b++) {
+ optimize_b(x, b, type,
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ }
- for (b = 16; b < 24; b++)
- {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
- }
+ for (b = 16; b < 24; b++) {
+ optimize_b(x, b, PLANE_TYPE_UV,
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ }
- if (has_2nd_order)
- {
- b=24;
- optimize_b(x, b, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
- check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
- }
+ if (has_2nd_order) {
+ b = 24;
+ optimize_b(x, b, PLANE_TYPE_Y2,
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
}
-void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- int type;
- int has_2nd_order;
+void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ int type;
+ int has_2nd_order;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
+ if (!x->e_mbd.above_context)
+ return;
- if (!x->e_mbd.left_context)
- return;
+ if (!x->e_mbd.left_context)
+ return;
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- &&x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
+ has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
- for (b = 0; b < 16; b++)
- {
- optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
- }
+ for (b = 0; b < 16; b++) {
+ optimize_b(x, b, type,
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ }
- if (has_2nd_order)
- {
- b=24;
- optimize_b(x, b, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
- check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
- }
+ if (has_2nd_order) {
+ b = 24;
+ optimize_b(x, b, PLANE_TYPE_Y2,
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ }
}
-void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
+ if (!x->e_mbd.above_context)
+ return;
- if (!x->e_mbd.left_context)
- return;
+ if (!x->e_mbd.left_context)
+ return;
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- for (b = 16; b < 24; b++)
- {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
- }
+ for (b = 16; b < 24; b++) {
+ optimize_b(x, b, PLANE_TYPE_UV,
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ }
}
void optimize_b_8x8(MACROBLOCK *mb, int i, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- const VP8_ENCODER_RTCD *rtcd)
-{
- BLOCK *b;
- BLOCKD *d;
- vp8_token_state tokens[65][2];
- unsigned best_mask[2];
- const short *dequant_ptr;
- const short *coeff_ptr;
- short *qcoeff_ptr;
- short *dqcoeff_ptr;
- int eob;
- int i0;
- int rc;
- int x;
- int sz = 0;
- int next;
- int rdmult;
- int rddiv;
- int final_eob;
- int rd_cost0;
- int rd_cost1;
- int rate0;
- int rate1;
- int error0;
- int error1;
- int t0;
- int t1;
- int best;
- int band;
- int pt;
- int err_mult = plane_rd_mult[type];
-
- b = &mb->block[i];
- d = &mb->e_mbd.block[i];
-
- dequant_ptr = d->dequant;
- coeff_ptr = b->coeff;
- qcoeff_ptr = d->qcoeff;
- dqcoeff_ptr = d->dqcoeff;
- i0 = !type;
- eob = d->eob;
-
- /* Now set up a Viterbi trellis to evaluate alternative roundings. */
- rdmult = mb->rdmult * err_mult;
- if(mb->e_mbd.mode_info_context->mbmi.ref_frame==INTRA_FRAME)
- rdmult = (rdmult * 9)>>4;
- rddiv = mb->rddiv;
- best_mask[0] = best_mask[1] = 0;
- /* Initialize the sentinel node of the trellis. */
- tokens[eob][0].rate = 0;
- tokens[eob][0].error = 0;
- tokens[eob][0].next = 64;
- tokens[eob][0].token = DCT_EOB_TOKEN;
- tokens[eob][0].qc = 0;
- *(tokens[eob] + 1) = *(tokens[eob] + 0);
- next = eob;
- for (i = eob; i-- > i0;)
- {
- int base_bits;
- int d2;
- int dx;
-
- rc = vp8_default_zig_zag1d_8x8[i];
- x = qcoeff_ptr[rc];
- /* Only add a trellis state for non-zero coefficients. */
- if (x)
- {
- int shortcut=0;
- error0 = tokens[next][0].error;
- error1 = tokens[next][1].error;
- /* Evaluate the first possibility for this state. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
- t0 = (vp8_dct_value_tokens_ptr + x)->Token;
- /* Consider both possible successor states. */
- if (next < 64)
- {
- band = vp8_coef_bands_8x8[i + 1];
- pt = vp8_prev_token_class[t0];
- rate0 +=
- mb->token_costs_8x8[type][band][pt][tokens[next][0].token];
- rate1 +=
- mb->token_costs_8x8[type][band][pt][tokens[next][1].token];
- }
- rd_cost0 = RDCOST_8x8(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDCOST_8x8(rdmult, rddiv, rate1, error1);
- if (rd_cost0 == rd_cost1)
- {
- rd_cost0 = RDTRUNC_8x8(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDTRUNC_8x8(rdmult, rddiv, rate1, error1);
- }
- /* And pick the best. */
- best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
- dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
- d2 = dx*dx;
- tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
- tokens[i][0].error = d2 + (best ? error1 : error0);
- tokens[i][0].next = next;
- tokens[i][0].token = t0;
- tokens[i][0].qc = x;
- best_mask[0] |= best << i;
- /* Evaluate the second possibility for this state. */
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
-
- if((abs(x)*dequant_ptr[rc!=0]>abs(coeff_ptr[rc])) &&
- (abs(x)*dequant_ptr[rc!=0]<abs(coeff_ptr[rc])+dequant_ptr[rc!=0]))
- shortcut = 1;
- else
- shortcut = 0;
-
- if(shortcut)
- {
- sz = -(x < 0);
- x -= 2*sz + 1;
- }
-
- /* Consider both possible successor states. */
- if (!x)
- {
- /* If we reduced this coefficient to zero, check to see if
- * we need to move the EOB back here.
- */
- t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
- DCT_EOB_TOKEN : ZERO_TOKEN;
- t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
- DCT_EOB_TOKEN : ZERO_TOKEN;
- }
- else
- {
- t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token;
- }
- if (next < 64)
- {
- band = vp8_coef_bands_8x8[i + 1];
- if(t0!=DCT_EOB_TOKEN)
- {
- pt = vp8_prev_token_class[t0];
- rate0 += mb->token_costs_8x8[type][band][pt][
- tokens[next][0].token];
- }
- if(t1!=DCT_EOB_TOKEN)
- {
- pt = vp8_prev_token_class[t1];
- rate1 += mb->token_costs_8x8[type][band][pt][
- tokens[next][1].token];
- }
- }
-
- rd_cost0 = RDCOST_8x8(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDCOST_8x8(rdmult, rddiv, rate1, error1);
- if (rd_cost0 == rd_cost1)
- {
- rd_cost0 = RDTRUNC_8x8(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDTRUNC_8x8(rdmult, rddiv, rate1, error1);
- }
- /* And pick the best. */
- best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
-
- if(shortcut)
- {
- dx -= (dequant_ptr[rc!=0] + sz) ^ sz;
- d2 = dx*dx;
- }
- tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
- tokens[i][1].error = d2 + (best ? error1 : error0);
- tokens[i][1].next = next;
- tokens[i][1].token =best?t1:t0;
- tokens[i][1].qc = x;
- best_mask[1] |= best << i;
- /* Finally, make this the new head of the trellis. */
- next = i;
- }
- /* There's no choice to make for a zero coefficient, so we don't
- * add a new trellis node, but we do need to update the costs.
+ const VP8_ENCODER_RTCD *rtcd) {
+ BLOCK *b;
+ BLOCKD *d;
+ vp8_token_state tokens[65][2];
+ unsigned best_mask[2];
+ const short *dequant_ptr;
+ const short *coeff_ptr;
+ short *qcoeff_ptr;
+ short *dqcoeff_ptr;
+ int eob;
+ int i0;
+ int rc;
+ int x;
+ int sz = 0;
+ int next;
+ int rdmult;
+ int rddiv;
+ int final_eob;
+ int rd_cost0;
+ int rd_cost1;
+ int rate0;
+ int rate1;
+ int error0;
+ int error1;
+ int t0;
+ int t1;
+ int best;
+ int band;
+ int pt;
+ int err_mult = plane_rd_mult[type];
+
+ b = &mb->block[i];
+ d = &mb->e_mbd.block[i];
+
+ dequant_ptr = d->dequant;
+ coeff_ptr = b->coeff;
+ qcoeff_ptr = d->qcoeff;
+ dqcoeff_ptr = d->dqcoeff;
+ i0 = !type;
+ eob = d->eob;
+
+ /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+ rdmult = mb->rdmult * err_mult;
+ if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
+ rdmult = (rdmult * 9) >> 4;
+ rddiv = mb->rddiv;
+ best_mask[0] = best_mask[1] = 0;
+ /* Initialize the sentinel node of the trellis. */
+ tokens[eob][0].rate = 0;
+ tokens[eob][0].error = 0;
+ tokens[eob][0].next = 64;
+ tokens[eob][0].token = DCT_EOB_TOKEN;
+ tokens[eob][0].qc = 0;
+ *(tokens[eob] + 1) = *(tokens[eob] + 0);
+ next = eob;
+ for (i = eob; i-- > i0;) {
+ int base_bits;
+ int d2;
+ int dx;
+
+ rc = vp8_default_zig_zag1d_8x8[i];
+ x = qcoeff_ptr[rc];
+ /* Only add a trellis state for non-zero coefficients. */
+ if (x) {
+ int shortcut = 0;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ /* Evaluate the first possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ /* Consider both possible successor states. */
+ if (next < 64) {
+ band = vp8_coef_bands_8x8[i + 1];
+ pt = vp8_prev_token_class[t0];
+ rate0 +=
+ mb->token_costs_8x8[type][band][pt][tokens[next][0].token];
+ rate1 +=
+ mb->token_costs_8x8[type][band][pt][tokens[next][1].token];
+ }
+ rd_cost0 = RDCOST_8x8(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST_8x8(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC_8x8(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC_8x8(rdmult, rddiv, rate1, error1);
+ }
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp8_dct_value_cost_ptr + x);
+ dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
+ d2 = dx * dx;
+ tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][0].error = d2 + (best ? error1 : error0);
+ tokens[i][0].next = next;
+ tokens[i][0].token = t0;
+ tokens[i][0].qc = x;
+ best_mask[0] |= best << i;
+ /* Evaluate the second possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+
+ if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff_ptr[rc])) &&
+ (abs(x)*dequant_ptr[rc != 0] < abs(coeff_ptr[rc]) + dequant_ptr[rc != 0]))
+ shortcut = 1;
+ else
+ shortcut = 0;
+
+ if (shortcut) {
+ sz = -(x < 0);
+ x -= 2 * sz + 1;
+ }
+
+ /* Consider both possible successor states. */
+ if (!x) {
+ /* If we reduced this coefficient to zero, check to see if
+ * we need to move the EOB back here.
*/
- else
- {
- band = vp8_coef_bands_8x8[i + 1];
- t0 = tokens[next][0].token;
- t1 = tokens[next][1].token;
- /* Update the cost of each path if we're past the EOB token. */
- if (t0 != DCT_EOB_TOKEN)
- {
- tokens[next][0].rate += mb->token_costs_8x8[type][band][0][t0];
- tokens[next][0].token = ZERO_TOKEN;
- }
- if (t1 != DCT_EOB_TOKEN)
- {
- tokens[next][1].rate += mb->token_costs_8x8[type][band][0][t1];
- tokens[next][1].token = ZERO_TOKEN;
- }
- /* Don't update next, because we didn't add a new node. */
+ t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ } else {
+ t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ }
+ if (next < 64) {
+ band = vp8_coef_bands_8x8[i + 1];
+ if (t0 != DCT_EOB_TOKEN) {
+ pt = vp8_prev_token_class[t0];
+ rate0 += mb->token_costs_8x8[type][band][pt][
+ tokens[next][0].token];
}
- }
+ if (t1 != DCT_EOB_TOKEN) {
+ pt = vp8_prev_token_class[t1];
+ rate1 += mb->token_costs_8x8[type][band][pt][
+ tokens[next][1].token];
+ }
+ }
- /* Now pick the best path through the whole trellis. */
- band = vp8_coef_bands_8x8[i + 1];
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- rate0 = tokens[next][0].rate;
- rate1 = tokens[next][1].rate;
- error0 = tokens[next][0].error;
- error1 = tokens[next][1].error;
- t0 = tokens[next][0].token;
- t1 = tokens[next][1].token;
- rate0 += mb->token_costs_8x8[type][band][pt][t0];
- rate1 += mb->token_costs_8x8[type][band][pt][t1];
- rd_cost0 = RDCOST_8x8(rdmult, rddiv, rate0, error0);
- rd_cost1 = RDCOST_8x8(rdmult, rddiv, rate1, error1);
- if (rd_cost0 == rd_cost1)
- {
+ rd_cost0 = RDCOST_8x8(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST_8x8(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
rd_cost0 = RDTRUNC_8x8(rdmult, rddiv, rate0, error0);
rd_cost1 = RDTRUNC_8x8(rdmult, rddiv, rate1, error1);
+ }
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp8_dct_value_cost_ptr + x);
+
+ if (shortcut) {
+ dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+ d2 = dx * dx;
+ }
+ tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][1].error = d2 + (best ? error1 : error0);
+ tokens[i][1].next = next;
+ tokens[i][1].token = best ? t1 : t0;
+ tokens[i][1].qc = x;
+ best_mask[1] |= best << i;
+ /* Finally, make this the new head of the trellis. */
+ next = i;
}
- best = rd_cost1 < rd_cost0;
- final_eob = i0 - 1;
- for (i = next; i < eob; i = next)
- {
- x = tokens[i][best].qc;
- if (x)
- final_eob = i;
- rc = vp8_default_zig_zag1d_8x8[i];
- qcoeff_ptr[rc] = x;
- dqcoeff_ptr[rc] = (x * dequant_ptr[rc!=0]);
-
- next = tokens[i][best].next;
- best = (best_mask[best] >> i) & 1;
+ /* There's no choice to make for a zero coefficient, so we don't
+ * add a new trellis node, but we do need to update the costs.
+ */
+ else {
+ band = vp8_coef_bands_8x8[i + 1];
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ /* Update the cost of each path if we're past the EOB token. */
+ if (t0 != DCT_EOB_TOKEN) {
+ tokens[next][0].rate += mb->token_costs_8x8[type][band][0][t0];
+ tokens[next][0].token = ZERO_TOKEN;
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ tokens[next][1].rate += mb->token_costs_8x8[type][band][0][t1];
+ tokens[next][1].token = ZERO_TOKEN;
+ }
+ /* Don't update next, because we didn't add a new node. */
}
- final_eob++;
-
- d->eob = final_eob;
- *a = *l = (d->eob != !type);
+ }
+
+ /* Now pick the best path through the whole trellis. */
+ band = vp8_coef_bands_8x8[i + 1];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ rate0 += mb->token_costs_8x8[type][band][pt][t0];
+ rate1 += mb->token_costs_8x8[type][band][pt][t1];
+ rd_cost0 = RDCOST_8x8(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDCOST_8x8(rdmult, rddiv, rate1, error1);
+ if (rd_cost0 == rd_cost1) {
+ rd_cost0 = RDTRUNC_8x8(rdmult, rddiv, rate0, error0);
+ rd_cost1 = RDTRUNC_8x8(rdmult, rddiv, rate1, error1);
+ }
+ best = rd_cost1 < rd_cost0;
+ final_eob = i0 - 1;
+ for (i = next; i < eob; i = next) {
+ x = tokens[i][best].qc;
+ if (x)
+ final_eob = i;
+ rc = vp8_default_zig_zag1d_8x8[i];
+ qcoeff_ptr[rc] = x;
+ dqcoeff_ptr[rc] = (x * dequant_ptr[rc != 0]);
+
+ next = tokens[i][best].next;
+ best = (best_mask[best] >> i) & 1;
+ }
+ final_eob++;
+
+ d->eob = final_eob;
+ *a = *l = (d->eob != !type);
}
-void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- int type;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- type = 0;
- for (b = 0; b < 16; b+=4)
- {
- optimize_b_8x8(x, b, type,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b] );
- }
-
- for (b = 16; b < 24; b+=4)
- {
- optimize_b_8x8(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd);
- *(ta + vp8_block2above_8x8[b]+1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b]+1 ) = *(tl + vp8_block2left_8x8[b]);
- }
-
- //8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
+void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ int type;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ type = 0;
+ for (b = 0; b < 16; b += 4) {
+ optimize_b_8x8(x, b, type,
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ rtcd);
+ *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ }
+
+ for (b = 16; b < 24; b += 4) {
+ optimize_b_8x8(x, b, PLANE_TYPE_UV,
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ rtcd);
+ *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ }
+
+ // 8x8 always have 2nd roder haar block
+ check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
}
-void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- int type;
+void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ int type;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
+ if (!x->e_mbd.above_context)
+ return;
- if (!x->e_mbd.left_context)
- return;
+ if (!x->e_mbd.left_context)
+ return;
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
- type = 0;
- for (b = 0; b < 16; b+=4)
- {
- optimize_b_8x8(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- rtcd);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b] );
- }
- //8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+ type = 0;
+ for (b = 0; b < 16; b += 4) {
+ optimize_b_8x8(x, b, type,
+ ta + vp8_block2above[b], tl + vp8_block2left[b],
+ rtcd);
+ *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ }
+ // 8x8 always have 2nd roder haar block
+ check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
}
-void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
+ if (!x->e_mbd.above_context)
+ return;
- if (!x->e_mbd.left_context)
- return;
+ if (!x->e_mbd.left_context)
+ return;
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- for (b = 16; b < 24; b+=4)
- {
- optimize_b_8x8(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd);
- *(ta + vp8_block2above_8x8[b]+1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b]+1 ) = *(tl + vp8_block2left_8x8[b]);
- }
+ for (b = 16; b < 24; b += 4) {
+ optimize_b_8x8(x, b, PLANE_TYPE_UV,
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ rtcd);
+ *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ }
}
-void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
- vp8_build_inter_predictors_mb(&x->e_mbd);
+void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+ vp8_build_inter_predictors_mb(&x->e_mbd);
- vp8_subtract_mb(rtcd, x);
+ vp8_subtract_mb(rtcd, x);
- if( tx_type == TX_8X8 )
- vp8_transform_mb_8x8(x);
- else
- transform_mb(x);
+ if (tx_type == TX_8X8)
+ vp8_transform_mb_8x8(x);
+ else
+ transform_mb(x);
- if( tx_type == TX_8X8 )
- vp8_quantize_mb_8x8(x);
- else
- vp8_quantize_mb(x);
-
- if (x->optimize)
- {
- if( tx_type == TX_8X8 )
- optimize_mb_8x8(x, rtcd);
- else
- optimize_mb(x, rtcd);
- }
+ if (tx_type == TX_8X8)
+ vp8_quantize_mb_8x8(x);
+ else
+ vp8_quantize_mb(x);
- if( tx_type == TX_8X8 )
- vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ if (x->optimize) {
+ if (tx_type == TX_8X8)
+ optimize_mb_8x8(x, rtcd);
else
- vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ optimize_mb(x, rtcd);
+ }
+
+ if (tx_type == TX_8X8)
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ else
+ vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- if( tx_type == TX_8X8 )
- {
+ if (tx_type == TX_8X8) {
#ifdef ENC_DEBUG
- if (enc_debug)
- {
- int i;
- printf("qcoeff:\n");
- printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.qcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("dqcoeff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.dqcoeff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("diff:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.diff[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("predictor:\n");
- for (i =0; i<400; i++) {
- printf("%3d ", x->e_mbd.predictor[i]);
- if (i%16 == 15) printf("\n");
- }
- printf("\n");
- }
-#endif
+ if (enc_debug) {
+ int i;
+ printf("qcoeff:\n");
+ printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.qcoeff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("dqcoeff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.dqcoeff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("diff:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.diff[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("predictor:\n");
+ for (i = 0; i < 400; i++) {
+ printf("%3d ", x->e_mbd.predictor[i]);
+ if (i % 16 == 15) printf("\n");
+ }
+ printf("\n");
}
+#endif
+ }
- RECON_INVOKE(&rtcd->common->recon, recon_mb)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mb)
+ (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
#ifdef ENC_DEBUG
- if (enc_debug) {
- int i, j, k;
- printf("Final Reconstruction\n");
- for (i =0; i<16; i+=4) {
- BLOCKD *b = &x->e_mbd.block[i];
- unsigned char *d = *(b->base_dst) + b->dst;
- for (k=0; k<4; k++) {
- for (j=0; j<16; j++)
- printf("%3d ", d[j]);
- printf("\n");
- d+=b->dst_stride;
- }
+ if (enc_debug) {
+ int i, j, k;
+ printf("Final Reconstruction\n");
+ for (i = 0; i < 16; i += 4) {
+ BLOCKD *b = &x->e_mbd.block[i];
+ unsigned char *d = *(b->base_dst) + b->dst;
+ for (k = 0; k < 4; k++) {
+ for (j = 0; j < 16; j++)
+ printf("%3d ", d[j]);
+ printf("\n");
+ d += b->dst_stride;
}
}
+ }
#endif
}
/* this function is used by first pass only */
-void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
- BLOCK *b = &x->block[0];
+ BLOCK *b = &x->block[0];
#if CONFIG_PRED_FILTER
- // Disable the prediction filter for firstpass
- x->e_mbd.mode_info_context->mbmi.pred_filter_enabled = 0;
+ // Disable the prediction filter for firstpass
+ x->e_mbd.mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
- vp8_build_inter16x16_predictors_mby(&x->e_mbd);
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd);
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
- if( tx_type == TX_8X8 )
- vp8_transform_mby_8x8(x);
- else
- transform_mby(x);
+ if (tx_type == TX_8X8)
+ vp8_transform_mby_8x8(x);
+ else
+ transform_mby(x);
- vp8_quantize_mby(x);
+ vp8_quantize_mby(x);
- if( tx_type == TX_8X8 )
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ if (tx_type == TX_8X8)
+ vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ else
+ vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mby)
+ (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h
index 396a15196..bfcd0f92c 100644
--- a/vp8/encoder/encodemb.h
+++ b/vp8/encoder/encodemb.h
@@ -16,23 +16,23 @@
#include "block.h"
#define prototype_mberr(sym) \
- int (sym)(MACROBLOCK *mb, int dc)
+ int (sym)(MACROBLOCK *mb, int dc)
#define prototype_berr(sym) \
- int (sym)(short *coeff, short *dqcoeff)
+ int (sym)(short *coeff, short *dqcoeff)
#define prototype_mbuverr(sym) \
- int (sym)(MACROBLOCK *mb)
+ int (sym)(MACROBLOCK *mb)
#define prototype_subb(sym) \
- void (sym)(BLOCK *be,BLOCKD *bd, int pitch)
+ void (sym)(BLOCK *be,BLOCKD *bd, int pitch)
#define prototype_submby(sym) \
- void (sym)(short *diff, unsigned char *src, unsigned char *pred, int stride)
+ void (sym)(short *diff, unsigned char *src, unsigned char *pred, int stride)
#define prototype_submbuv(sym) \
- void (sym)(short *diff, unsigned char *usrc, unsigned char *vsrc,\
- unsigned char *pred, int stride)
+ void (sym)(short *diff, unsigned char *usrc, unsigned char *vsrc,\
+ unsigned char *pred, int stride)
#if ARCH_X86 || ARCH_X86_64
#include "x86/encodemb_x86.h"
@@ -73,23 +73,21 @@ extern prototype_submby(vp8_encodemb_submby);
extern prototype_submbuv(vp8_encodemb_submbuv);
-typedef struct
-{
- prototype_berr(*berr);
- prototype_mberr(*mberr);
- prototype_mbuverr(*mbuverr);
- prototype_subb(*subb);
- prototype_submby(*submby);
- prototype_submbuv(*submbuv);
+typedef struct {
+ prototype_berr(*berr);
+ prototype_mberr(*mberr);
+ prototype_mbuverr(*mbuverr);
+ prototype_subb(*subb);
+ prototype_submby(*submby);
+ prototype_submbuv(*submbuv);
} vp8_encodemb_rtcd_vtable_t;
-typedef struct
-{
- MB_PREDICTION_MODE mode;
- MV_REFERENCE_FRAME ref_frame;
- MV_REFERENCE_FRAME second_ref_frame;
+typedef struct {
+ MB_PREDICTION_MODE mode;
+ MV_REFERENCE_FRAME ref_frame;
+ MV_REFERENCE_FRAME second_ref_frame;
#if CONFIG_PRED_FILTER
- int pred_filter_flag;
+ int pred_filter_flag;
#endif
} MODE_DEFINITION;
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index 0ba8848a0..e2643f012 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -20,177 +20,159 @@
extern unsigned int active_section;
#endif
-//#define DEBUG_ENC_MV
+// #define DEBUG_ENC_MV
#ifdef DEBUG_ENC_MV
int enc_mvcount = 0;
#endif
static void encode_mvcomponent(
- vp8_writer *const w,
- const int v,
- const struct mv_context *mvc
-)
-{
- const vp8_prob *p = mvc->prob;
- const int x = v < 0 ? -v : v;
-
- if (x < mvnum_short) // Small
- {
- vp8_write(w, 0, p [mvpis_short]);
- vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
- if (!x)
- return; // no sign bit
- }
- else // Large
- {
- int i = 0;
+ vp8_writer *const w,
+ const int v,
+ const struct mv_context *mvc
+) {
+ const vp8_prob *p = mvc->prob;
+ const int x = v < 0 ? -v : v;
- vp8_write(w, 1, p [mvpis_short]);
+ if (x < mvnum_short) { // Small
+ vp8_write(w, 0, p [mvpis_short]);
+ vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
+ if (!x)
+ return; // no sign bit
+ } else { // Large
+ int i = 0;
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits + i]);
+ vp8_write(w, 1, p [mvpis_short]);
- while (++i < mvnum_short_bits);
+ do
+ vp8_write(w, (x >> i) & 1, p [MVPbits + i]);
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
+ while (++i < mvnum_short_bits);
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits + i]);
+ i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
- while (--i > mvnum_short_bits);
+ do
+ vp8_write(w, (x >> i) & 1, p [MVPbits + i]);
- if (x & ~((2<<mvnum_short_bits)-1))
- vp8_write(w, (x >> mvnum_short_bits) & 1, p [MVPbits + mvnum_short_bits]);
- }
+ while (--i > mvnum_short_bits);
+
+ if (x & ~((2 << mvnum_short_bits) - 1))
+ vp8_write(w, (x >> mvnum_short_bits) & 1, p [MVPbits + mvnum_short_bits]);
+ }
- vp8_write(w, v < 0, p [MVPsign]);
+ vp8_write(w, v < 0, p [MVPsign]);
}
#if 0
static int max_mv_r = 0;
static int max_mv_c = 0;
#endif
-void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc)
-{
+void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc) {
#if 0
- {
- if (abs(mv->row >> 1) > max_mv_r)
- {
- FILE *f = fopen("maxmv.stt", "a");
- max_mv_r = abs(mv->row >> 1);
- fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
-
- if ((abs(mv->row) / 2) != max_mv_r)
- fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
-
- fclose(f);
- }
-
- if (abs(mv->col >> 1) > max_mv_c)
- {
- FILE *f = fopen("maxmv.stt", "a");
- fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
- max_mv_c = abs(mv->col >> 1);
- fclose(f);
- }
+ {
+ if (abs(mv->row >> 1) > max_mv_r) {
+ FILE *f = fopen("maxmv.stt", "a");
+ max_mv_r = abs(mv->row >> 1);
+ fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
+
+ if ((abs(mv->row) / 2) != max_mv_r)
+ fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
+
+ fclose(f);
+ }
+
+ if (abs(mv->col >> 1) > max_mv_c) {
+ FILE *f = fopen("maxmv.stt", "a");
+ fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
+ max_mv_c = abs(mv->col >> 1);
+ fclose(f);
}
+ }
#endif
- encode_mvcomponent(w, mv->row >> 1, &mvc[0]);
- encode_mvcomponent(w, mv->col >> 1, &mvc[1]);
+ encode_mvcomponent(w, mv->row >> 1, &mvc[0]);
+ encode_mvcomponent(w, mv->col >> 1, &mvc[1]);
#ifdef DEBUG_ENC_MV
- {
+ {
int i;
printf("%d (np): %d %d\n", enc_mvcount++,
- (mv->row >> 1)<<1, (mv->col >> 1)<<1);
- //for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[0])->prob[i]);
- //printf("\n");
- //for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[1])->prob[i]);
- //printf("\n");
+ (mv->row >> 1) << 1, (mv->col >> 1) << 1);
+ // for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[0])->prob[i]);
+ // printf("\n");
+ // for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[1])->prob[i]);
+ // printf("\n");
fflush(stdout);
- }
+ }
#endif
}
-static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc)
-{
- const vp8_prob *p = mvc->prob;
- const int x = v; //v<0? -v:v;
- unsigned int cost;
+static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc) {
+ const vp8_prob *p = mvc->prob;
+ const int x = v; // v<0? -v:v;
+ unsigned int cost;
- if (x < mvnum_short)
- {
- cost = vp8_cost_zero(p [mvpis_short])
- + vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
+ if (x < mvnum_short) {
+ cost = vp8_cost_zero(p [mvpis_short])
+ + vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
- if (!x)
- return cost;
- }
- else
- {
- int i = 0;
- cost = vp8_cost_one(p [mvpis_short]);
+ if (!x)
+ return cost;
+ } else {
+ int i = 0;
+ cost = vp8_cost_one(p [mvpis_short]);
- do
- cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
+ do
+ cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
- while (++i < mvnum_short_bits);
+ while (++i < mvnum_short_bits);
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
+ i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
- do
- cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
+ do
+ cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
- while (--i > mvnum_short_bits);
+ while (--i > mvnum_short_bits);
- if (x & ~((2<<mvnum_short_bits)-1))
- cost += vp8_cost_bit(p [MVPbits + mvnum_short_bits], (x >> mvnum_short_bits) & 1);
- }
+ if (x & ~((2 << mvnum_short_bits) - 1))
+ cost += vp8_cost_bit(p [MVPbits + mvnum_short_bits], (x >> mvnum_short_bits) & 1);
+ }
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
+ return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
}
-void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2])
-{
- int i = 1; //-mv_max;
- unsigned int cost0 = 0;
- unsigned int cost1 = 0;
+void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]) {
+ int i = 1; // -mv_max;
+ unsigned int cost0 = 0;
+ unsigned int cost1 = 0;
- vp8_clear_system_state();
+ vp8_clear_system_state();
- i = 1;
+ i = 1;
- if (mvc_flag[0])
- {
- mvcost [0] [0] = cost_mvcomponent(0, &mvc[0]);
+ if (mvc_flag[0]) {
+ mvcost [0] [0] = cost_mvcomponent(0, &mvc[0]);
- do
- {
- //mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
- cost0 = cost_mvcomponent(i, &mvc[0]);
-
- mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
- mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]);
- }
- while (++i <= mv_max);
- }
+ do {
+ // mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
+ cost0 = cost_mvcomponent(i, &mvc[0]);
- i = 1;
+ mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
+ mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]);
+ } while (++i <= mv_max);
+ }
- if (mvc_flag[1])
- {
- mvcost [1] [0] = cost_mvcomponent(0, &mvc[1]);
+ i = 1;
- do
- {
- //mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
- cost1 = cost_mvcomponent(i, &mvc[1]);
-
- mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
- mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]);
- }
- while (++i <= mv_max);
- }
+ if (mvc_flag[1]) {
+ mvcost [1] [0] = cost_mvcomponent(0, &mvc[1]);
+
+ do {
+ // mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
+ cost1 = cost_mvcomponent(i, &mvc[1]);
+
+ mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
+ mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]);
+ } while (++i <= mv_max);
+ }
}
@@ -201,574 +183,537 @@ void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int m
#define MV_PROB_UPDATE_CORRECTION -1
-__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2])
-{
- const unsigned int tot = ct[0] + ct[1];
+__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2]) {
+ const unsigned int tot = ct[0] + ct[1];
- if (tot)
- {
- const vp8_prob x = ((ct[0] * 255) / tot) & -2;
- *p = x ? x : 1;
- }
+ if (tot) {
+ const vp8_prob x = ((ct[0] * 255) / tot) & -2;
+ *p = x ? x : 1;
+ }
}
static void update(
- vp8_writer *const w,
- const unsigned int ct[2],
- vp8_prob *const cur_p,
- const vp8_prob new_p,
- const vp8_prob update_p,
- int *updated
-)
-{
- const int cur_b = vp8_cost_branch(ct, *cur_p);
- const int new_b = vp8_cost_branch(ct, new_p);
- const int cost = 7 + MV_PROB_UPDATE_CORRECTION + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8);
-
- if (cur_b - new_b > cost)
- {
- *cur_p = new_p;
- vp8_write(w, 1, update_p);
- vp8_write_literal(w, new_p >> 1, 7);
- *updated = 1;
-
- }
- else
- vp8_write(w, 0, update_p);
+ vp8_writer *const w,
+ const unsigned int ct[2],
+ vp8_prob *const cur_p,
+ const vp8_prob new_p,
+ const vp8_prob update_p,
+ int *updated
+) {
+ const int cur_b = vp8_cost_branch(ct, *cur_p);
+ const int new_b = vp8_cost_branch(ct, new_p);
+ const int cost = 7 + MV_PROB_UPDATE_CORRECTION + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8);
+
+ if (cur_b - new_b > cost) {
+ *cur_p = new_p;
+ vp8_write(w, 1, update_p);
+ vp8_write_literal(w, new_p >> 1, 7);
+ *updated = 1;
+
+ } else
+ vp8_write(w, 0, update_p);
}
static void write_component_probs(
- vp8_writer *const w,
- struct mv_context *cur_mvc,
- const struct mv_context *default_mvc_,
- const struct mv_context *update_mvc,
- const unsigned int events [MVvals],
- unsigned int rc,
- int *updated
-)
-{
- vp8_prob *Pcur = cur_mvc->prob;
- const vp8_prob *default_mvc = default_mvc_->prob;
- const vp8_prob *Pupdate = update_mvc->prob;
- unsigned int is_short_ct[2], sign_ct[2];
-
- unsigned int bit_ct [mvlong_width] [2];
-
- unsigned int short_ct [mvnum_short];
- unsigned int short_bct [mvnum_short-1] [2];
-
- vp8_prob Pnew [MVPcount];
-
- (void) rc;
- vp8_copy_array(Pnew, default_mvc, MVPcount);
-
- vp8_zero(is_short_ct)
- vp8_zero(sign_ct)
- vp8_zero(bit_ct)
- vp8_zero(short_ct)
- vp8_zero(short_bct)
-
-
- //j=0
- {
- const int c = events [mv_max];
-
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
-
- //j: 1 ~ mv_max (1023)
- {
- int j = 1;
-
+ vp8_writer *const w,
+ struct mv_context *cur_mvc,
+ const struct mv_context *default_mvc_,
+ const struct mv_context *update_mvc,
+ const unsigned int events [MVvals],
+ unsigned int rc,
+ int *updated
+) {
+ vp8_prob *Pcur = cur_mvc->prob;
+ const vp8_prob *default_mvc = default_mvc_->prob;
+ const vp8_prob *Pupdate = update_mvc->prob;
+ unsigned int is_short_ct[2], sign_ct[2];
+
+ unsigned int bit_ct [mvlong_width] [2];
+
+ unsigned int short_ct [mvnum_short];
+ unsigned int short_bct [mvnum_short - 1] [2];
+
+ vp8_prob Pnew [MVPcount];
+
+ (void) rc;
+ vp8_copy_array(Pnew, default_mvc, MVPcount);
+
+ vp8_zero(is_short_ct)
+ vp8_zero(sign_ct)
+ vp8_zero(bit_ct)
+ vp8_zero(short_ct)
+ vp8_zero(short_bct)
+
+
+ // j=0
+ {
+ const int c = events [mv_max];
+
+ is_short_ct [0] += c; // Short vector
+ short_ct [0] += c; // Magnitude distribution
+ }
+
+ // j: 1 ~ mv_max (1023)
+ {
+ int j = 1;
+
+ do {
+ const int c1 = events [mv_max + j]; // positive
+ const int c2 = events [mv_max - j]; // negative
+ const int c = c1 + c2;
+ int a = j;
+
+ sign_ct [0] += c1;
+ sign_ct [1] += c2;
+
+ if (a < mvnum_short) {
+ is_short_ct [0] += c; // Short vector
+ short_ct [a] += c; // Magnitude distribution
+ } else {
+ int k = mvlong_width - 1;
+ is_short_ct [1] += c; // Long vector
+
+ /* bit 3 not always encoded. */
do
- {
- const int c1 = events [mv_max + j]; //positive
- const int c2 = events [mv_max - j]; //negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short)
- {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- }
- else
- {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- /* bit 3 not always encoded. */
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- }
- while (++j <= mv_max);
- }
-
- /*
- {
- int j = -mv_max;
- do
- {
-
- const int c = events [mv_max + j];
- int a = j;
-
- if( j < 0)
- {
- sign_ct [1] += c;
- a = -j;
- }
- else if( j)
- sign_ct [0] += c;
-
- if( a < mvnum_short)
- {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- }
- else
- {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- // bit 3 not always encoded.
-
- do
- bit_ct [k] [(a >> k) & 1] += c;
- while( --k >= 0);
- }
- } while( ++j <= mv_max);
- }
- */
-
- calc_prob(Pnew + mvpis_short, is_short_ct);
-
- calc_prob(Pnew + MVPsign, sign_ct);
-
- {
- vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
- int j = 0;
+ bit_ct [k] [(a >> k) & 1] += c;
+
+ while (--k >= 0);
+ }
+ } while (++j <= mv_max);
+ }
+
+ /*
+ {
+ int j = -mv_max;
+ do
+ {
+
+ const int c = events [mv_max + j];
+ int a = j;
+
+ if( j < 0)
+ {
+ sign_ct [1] += c;
+ a = -j;
+ }
+ else if( j)
+ sign_ct [0] += c;
+
+ if( a < mvnum_short)
+ {
+ is_short_ct [0] += c; // Short vector
+ short_ct [a] += c; // Magnitude distribution
+ }
+ else
+ {
+ int k = mvlong_width - 1;
+ is_short_ct [1] += c; // Long vector
+
+ // bit 3 not always encoded.
+
+ do
+ bit_ct [k] [(a >> k) & 1] += c;
+ while( --k >= 0);
+ }
+ } while( ++j <= mv_max);
+ }
+ */
+
+ calc_prob(Pnew + mvpis_short, is_short_ct);
+
+ calc_prob(Pnew + MVPsign, sign_ct);
+
+ {
+ vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
+ int j = 0;
+
+ vp8_tree_probs_from_distribution(
+ mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
+ p, short_bct, short_ct,
+ 256, 1
+ );
- vp8_tree_probs_from_distribution(
- mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
- p, short_bct, short_ct,
- 256, 1
- );
+ do
+ calc_prob(Pnew + MVPshort + j, short_bct[j]);
- do
- calc_prob(Pnew + MVPshort + j, short_bct[j]);
-
- while (++j < mvnum_short - 1);
- }
+ while (++j < mvnum_short - 1);
+ }
- {
- int j = 0;
+ {
+ int j = 0;
- do
- calc_prob(Pnew + MVPbits + j, bit_ct[j]);
+ do
+ calc_prob(Pnew + MVPbits + j, bit_ct[j]);
- while (++j < mvlong_width);
- }
+ while (++j < mvlong_width);
+ }
- update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++, updated);
+ update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++, updated);
- update(w, sign_ct, Pcur + MVPsign, Pnew[MVPsign], *Pupdate++, updated);
+ update(w, sign_ct, Pcur + MVPsign, Pnew[MVPsign], *Pupdate++, updated);
- {
- const vp8_prob *const new_p = Pnew + MVPshort;
- vp8_prob *const cur_p = Pcur + MVPshort;
+ {
+ const vp8_prob *const new_p = Pnew + MVPshort;
+ vp8_prob *const cur_p = Pcur + MVPshort;
- int j = 0;
+ int j = 0;
- do
+ do
- update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+ update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
- while (++j < mvnum_short - 1);
- }
+ while (++j < mvnum_short - 1);
+ }
- {
- const vp8_prob *const new_p = Pnew + MVPbits;
- vp8_prob *const cur_p = Pcur + MVPbits;
+ {
+ const vp8_prob *const new_p = Pnew + MVPbits;
+ vp8_prob *const cur_p = Pcur + MVPbits;
- int j = 0;
+ int j = 0;
- do
+ do
- update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+ update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
- while (++j < mvlong_width);
- }
+ while (++j < mvlong_width);
+ }
}
-void vp8_write_mvprobs(VP8_COMP *cpi)
-{
- vp8_writer *const w = & cpi->bc;
- MV_CONTEXT *mvc = cpi->common.fc.mvc;
- int flags[2] = {0, 0};
+void vp8_write_mvprobs(VP8_COMP *cpi) {
+ vp8_writer *const w = & cpi->bc;
+ MV_CONTEXT *mvc = cpi->common.fc.mvc;
+ int flags[2] = {0, 0};
#ifdef ENTROPY_STATS
- active_section = 4;
+ active_section = 4;
#endif
- write_component_probs(
- w, &mvc[0], &vp8_default_mv_context[0], &vp8_mv_update_probs[0], cpi->MVcount[0], 0, &flags[0]
- );
- write_component_probs(
- w, &mvc[1], &vp8_default_mv_context[1], &vp8_mv_update_probs[1], cpi->MVcount[1], 1, &flags[1]
- );
+ write_component_probs(
+ w, &mvc[0], &vp8_default_mv_context[0], &vp8_mv_update_probs[0], cpi->MVcount[0], 0, &flags[0]
+ );
+ write_component_probs(
+ w, &mvc[1], &vp8_default_mv_context[1], &vp8_mv_update_probs[1], cpi->MVcount[1], 1, &flags[1]
+ );
- if (flags[0] || flags[1])
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
+ if (flags[0] || flags[1])
+ vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
#ifdef ENTROPY_STATS
- active_section = 5;
+ active_section = 5;
#endif
}
#if CONFIG_HIGH_PRECISION_MV
static void encode_mvcomponent_hp(
- vp8_writer *const w,
- const int v,
- const struct mv_context_hp *mvc
-)
-{
- const vp8_prob *p = mvc->prob;
- const int x = v < 0 ? -v : v;
-
- if (x < mvnum_short_hp) // Small
- {
- vp8_write(w, 0, p [mvpis_short_hp]);
- vp8_treed_write(w, vp8_small_mvtree_hp, p + MVPshort_hp, x,
- mvnum_short_bits_hp);
- if (!x)
- return; // no sign bit
- }
- else // Large
- {
- int i = 0;
+ vp8_writer *const w,
+ const int v,
+ const struct mv_context_hp *mvc
+) {
+ const vp8_prob *p = mvc->prob;
+ const int x = v < 0 ? -v : v;
- vp8_write(w, 1, p [mvpis_short_hp]);
+ if (x < mvnum_short_hp) { // Small
+ vp8_write(w, 0, p [mvpis_short_hp]);
+ vp8_treed_write(w, vp8_small_mvtree_hp, p + MVPshort_hp, x,
+ mvnum_short_bits_hp);
+ if (!x)
+ return; // no sign bit
+ } else { // Large
+ int i = 0;
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits_hp + i]);
+ vp8_write(w, 1, p [mvpis_short_hp]);
- while (++i < mvnum_short_bits_hp);
+ do
+ vp8_write(w, (x >> i) & 1, p [MVPbits_hp + i]);
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
+ while (++i < mvnum_short_bits_hp);
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits_hp + i]);
+ i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
- while (--i > mvnum_short_bits_hp);
+ do
+ vp8_write(w, (x >> i) & 1, p [MVPbits_hp + i]);
- if (x & ~((2<<mvnum_short_bits_hp)-1))
- vp8_write(w, (x >> mvnum_short_bits_hp) & 1,
- p [MVPbits_hp + mvnum_short_bits_hp]);
- }
+ while (--i > mvnum_short_bits_hp);
+
+ if (x & ~((2 << mvnum_short_bits_hp) - 1))
+ vp8_write(w, (x >> mvnum_short_bits_hp) & 1,
+ p [MVPbits_hp + mvnum_short_bits_hp]);
+ }
- vp8_write(w, v < 0, p [MVPsign_hp]);
+ vp8_write(w, v < 0, p [MVPsign_hp]);
}
#if 0
static int max_mv_r = 0;
static int max_mv_c = 0;
#endif
void vp8_encode_motion_vector_hp(vp8_writer *w, const MV *mv,
- const MV_CONTEXT_HP *mvc)
-{
+ const MV_CONTEXT_HP *mvc) {
#if 0
- {
- if (abs(mv->row >> 1) > max_mv_r)
- {
- FILE *f = fopen("maxmv.stt", "a");
- max_mv_r = abs(mv->row >> 1);
- fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
-
- if ((abs(mv->row) / 2) != max_mv_r)
- fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
-
- fclose(f);
- }
-
- if (abs(mv->col >> 1) > max_mv_c)
- {
- FILE *f = fopen("maxmv.stt", "a");
- fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
- max_mv_c = abs(mv->col >> 1);
- fclose(f);
- }
+ {
+ if (abs(mv->row >> 1) > max_mv_r) {
+ FILE *f = fopen("maxmv.stt", "a");
+ max_mv_r = abs(mv->row >> 1);
+ fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
+
+ if ((abs(mv->row) / 2) != max_mv_r)
+ fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
+
+ fclose(f);
}
+
+ if (abs(mv->col >> 1) > max_mv_c) {
+ FILE *f = fopen("maxmv.stt", "a");
+ fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
+ max_mv_c = abs(mv->col >> 1);
+ fclose(f);
+ }
+ }
#endif
- encode_mvcomponent_hp(w, mv->row, &mvc[0]);
- encode_mvcomponent_hp(w, mv->col, &mvc[1]);
+ encode_mvcomponent_hp(w, mv->row, &mvc[0]);
+ encode_mvcomponent_hp(w, mv->col, &mvc[1]);
#ifdef DEBUG_ENC_MV
- {
+ {
int i;
printf("%d (hp): %d %d\n", enc_mvcount++, mv->row, mv->col);
- //for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[0])->prob[i]);
- //printf("\n");
- //for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[1])->prob[i]);
- //printf("\n");
+ // for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[0])->prob[i]);
+ // printf("\n");
+ // for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[1])->prob[i]);
+ // printf("\n");
fflush(stdout);
- }
+ }
#endif
}
static unsigned int cost_mvcomponent_hp(const int v,
- const struct mv_context_hp *mvc)
-{
- const vp8_prob *p = mvc->prob;
- const int x = v; //v<0? -v:v;
- unsigned int cost;
-
- if (x < mvnum_short_hp)
- {
- cost = vp8_cost_zero(p [mvpis_short_hp])
- + vp8_treed_cost(vp8_small_mvtree_hp, p + MVPshort_hp, x,
- mvnum_short_bits_hp);
-
- if (!x)
- return cost;
- }
- else
- {
- int i = 0;
- cost = vp8_cost_one(p [mvpis_short_hp]);
+ const struct mv_context_hp *mvc) {
+ const vp8_prob *p = mvc->prob;
+ const int x = v; // v<0? -v:v;
+ unsigned int cost;
- do
- cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
+ if (x < mvnum_short_hp) {
+ cost = vp8_cost_zero(p [mvpis_short_hp])
+ + vp8_treed_cost(vp8_small_mvtree_hp, p + MVPshort_hp, x,
+ mvnum_short_bits_hp);
- while (++i < mvnum_short_bits_hp);
+ if (!x)
+ return cost;
+ } else {
+ int i = 0;
+ cost = vp8_cost_one(p [mvpis_short_hp]);
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
+ do
+ cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
- do
- cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
+ while (++i < mvnum_short_bits_hp);
- while (--i > mvnum_short_bits_hp);
+ i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
- if (x & ~((2<<mvnum_short_bits_hp)-1))
- cost += vp8_cost_bit(p [MVPbits_hp + mvnum_short_bits_hp],
- (x >> mvnum_short_bits_hp) & 1);
- }
+ do
+ cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
+
+ while (--i > mvnum_short_bits_hp);
+
+ if (x & ~((2 << mvnum_short_bits_hp) - 1))
+ cost += vp8_cost_bit(p [MVPbits_hp + mvnum_short_bits_hp],
+ (x >> mvnum_short_bits_hp) & 1);
+ }
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
+ return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
}
void vp8_build_component_cost_table_hp(int *mvcost[2],
const MV_CONTEXT_HP *mvc,
- int mvc_flag[2])
-{
- int i = 1; //-mv_max;
- unsigned int cost0 = 0;
- unsigned int cost1 = 0;
+ int mvc_flag[2]) {
+ int i = 1; // -mv_max;
+ unsigned int cost0 = 0;
+ unsigned int cost1 = 0;
- vp8_clear_system_state();
+ vp8_clear_system_state();
- i = 1;
+ i = 1;
- if (mvc_flag[0])
- {
- mvcost [0] [0] = cost_mvcomponent_hp(0, &mvc[0]);
+ if (mvc_flag[0]) {
+ mvcost [0] [0] = cost_mvcomponent_hp(0, &mvc[0]);
- do
- {
- //mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
- cost0 = cost_mvcomponent_hp(i, &mvc[0]);
-
- mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign_hp]);
- mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign_hp]);
- }
- while (++i <= mv_max_hp);
- }
+ do {
+ // mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
+ cost0 = cost_mvcomponent_hp(i, &mvc[0]);
- i = 1;
+ mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign_hp]);
+ mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign_hp]);
+ } while (++i <= mv_max_hp);
+ }
- if (mvc_flag[1])
- {
- mvcost [1] [0] = cost_mvcomponent_hp(0, &mvc[1]);
+ i = 1;
- do
- {
- //mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
- cost1 = cost_mvcomponent_hp(i, &mvc[1]);
-
- mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign_hp]);
- mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign_hp]);
- }
- while (++i <= mv_max_hp);
- }
-}
+ if (mvc_flag[1]) {
+ mvcost [1] [0] = cost_mvcomponent_hp(0, &mvc[1]);
+ do {
+ // mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
+ cost1 = cost_mvcomponent_hp(i, &mvc[1]);
-static void write_component_probs_hp(
- vp8_writer *const w,
- struct mv_context_hp *cur_mvc,
- const struct mv_context_hp *default_mvc_,
- const struct mv_context_hp *update_mvc,
- const unsigned int events [MVvals_hp],
- unsigned int rc,
- int *updated
-)
-{
- vp8_prob *Pcur = cur_mvc->prob;
- const vp8_prob *default_mvc = default_mvc_->prob;
- const vp8_prob *Pupdate = update_mvc->prob;
- unsigned int is_short_ct[2], sign_ct[2];
-
- unsigned int bit_ct [mvlong_width_hp] [2];
-
- unsigned int short_ct [mvnum_short_hp];
- unsigned int short_bct [mvnum_short_hp-1] [2];
-
- vp8_prob Pnew [MVPcount_hp];
-
- (void) rc;
- vp8_copy_array(Pnew, default_mvc, MVPcount_hp);
-
- vp8_zero(is_short_ct)
- vp8_zero(sign_ct)
- vp8_zero(bit_ct)
- vp8_zero(short_ct)
- vp8_zero(short_bct)
-
-
- //j=0
- {
- const int c = events [mv_max_hp];
-
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
+ mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign_hp]);
+ mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign_hp]);
+ } while (++i <= mv_max_hp);
+ }
+}
- //j: 1 ~ mv_max (1023)
- {
- int j = 1;
+static void write_component_probs_hp(
+ vp8_writer *const w,
+ struct mv_context_hp *cur_mvc,
+ const struct mv_context_hp *default_mvc_,
+ const struct mv_context_hp *update_mvc,
+ const unsigned int events [MVvals_hp],
+ unsigned int rc,
+ int *updated
+) {
+ vp8_prob *Pcur = cur_mvc->prob;
+ const vp8_prob *default_mvc = default_mvc_->prob;
+ const vp8_prob *Pupdate = update_mvc->prob;
+ unsigned int is_short_ct[2], sign_ct[2];
+
+ unsigned int bit_ct [mvlong_width_hp] [2];
+
+ unsigned int short_ct [mvnum_short_hp];
+ unsigned int short_bct [mvnum_short_hp - 1] [2];
+
+ vp8_prob Pnew [MVPcount_hp];
+
+ (void) rc;
+ vp8_copy_array(Pnew, default_mvc, MVPcount_hp);
+
+ vp8_zero(is_short_ct)
+ vp8_zero(sign_ct)
+ vp8_zero(bit_ct)
+ vp8_zero(short_ct)
+ vp8_zero(short_bct)
+
+
+ // j=0
+ {
+ const int c = events [mv_max_hp];
+
+ is_short_ct [0] += c; // Short vector
+ short_ct [0] += c; // Magnitude distribution
+ }
+
+ // j: 1 ~ mv_max (1023)
+ {
+ int j = 1;
+
+ do {
+ const int c1 = events [mv_max_hp + j]; // positive
+ const int c2 = events [mv_max_hp - j]; // negative
+ const int c = c1 + c2;
+ int a = j;
+
+ sign_ct [0] += c1;
+ sign_ct [1] += c2;
+
+ if (a < mvnum_short_hp) {
+ is_short_ct [0] += c; // Short vector
+ short_ct [a] += c; // Magnitude distribution
+ } else {
+ int k = mvlong_width_hp - 1;
+ is_short_ct [1] += c; // Long vector
+
+ /* bit 3 not always encoded. */
do
- {
- const int c1 = events [mv_max_hp + j]; //positive
- const int c2 = events [mv_max_hp - j]; //negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short_hp)
- {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- }
- else
- {
- int k = mvlong_width_hp - 1;
- is_short_ct [1] += c; // Long vector
-
- /* bit 3 not always encoded. */
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- }
- while (++j <= mv_max_hp);
- }
+ bit_ct [k] [(a >> k) & 1] += c;
- calc_prob(Pnew + mvpis_short_hp, is_short_ct);
+ while (--k >= 0);
+ }
+ } while (++j <= mv_max_hp);
+ }
- calc_prob(Pnew + MVPsign_hp, sign_ct);
+ calc_prob(Pnew + mvpis_short_hp, is_short_ct);
- {
- vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
- int j = 0;
+ calc_prob(Pnew + MVPsign_hp, sign_ct);
- vp8_tree_probs_from_distribution(
- mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
- p, short_bct, short_ct,
- 256, 1
- );
+ {
+ vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
+ int j = 0;
- do
- calc_prob(Pnew + MVPshort_hp + j, short_bct[j]);
+ vp8_tree_probs_from_distribution(
+ mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
+ p, short_bct, short_ct,
+ 256, 1
+ );
- while (++j < mvnum_short_hp - 1);
- }
+ do
+ calc_prob(Pnew + MVPshort_hp + j, short_bct[j]);
- {
- int j = 0;
+ while (++j < mvnum_short_hp - 1);
+ }
- do
- calc_prob(Pnew + MVPbits_hp + j, bit_ct[j]);
+ {
+ int j = 0;
- while (++j < mvlong_width_hp);
- }
+ do
+ calc_prob(Pnew + MVPbits_hp + j, bit_ct[j]);
- update(w, is_short_ct, Pcur + mvpis_short_hp, Pnew[mvpis_short_hp],
- *Pupdate++, updated);
+ while (++j < mvlong_width_hp);
+ }
- update(w, sign_ct, Pcur + MVPsign_hp, Pnew[MVPsign_hp], *Pupdate++,
- updated);
+ update(w, is_short_ct, Pcur + mvpis_short_hp, Pnew[mvpis_short_hp],
+ *Pupdate++, updated);
- {
- const vp8_prob *const new_p = Pnew + MVPshort_hp;
- vp8_prob *const cur_p = Pcur + MVPshort_hp;
+ update(w, sign_ct, Pcur + MVPsign_hp, Pnew[MVPsign_hp], *Pupdate++,
+ updated);
- int j = 0;
+ {
+ const vp8_prob *const new_p = Pnew + MVPshort_hp;
+ vp8_prob *const cur_p = Pcur + MVPshort_hp;
- do
+ int j = 0;
- update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+ do
- while (++j < mvnum_short_hp - 1);
- }
+ update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
- {
- const vp8_prob *const new_p = Pnew + MVPbits_hp;
- vp8_prob *const cur_p = Pcur + MVPbits_hp;
+ while (++j < mvnum_short_hp - 1);
+ }
- int j = 0;
+ {
+ const vp8_prob *const new_p = Pnew + MVPbits_hp;
+ vp8_prob *const cur_p = Pcur + MVPbits_hp;
- do
+ int j = 0;
- update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+ do
- while (++j < mvlong_width_hp);
- }
+ update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
+
+ while (++j < mvlong_width_hp);
+ }
}
-void vp8_write_mvprobs_hp(VP8_COMP *cpi)
-{
- vp8_writer *const w = & cpi->bc;
- MV_CONTEXT_HP *mvc = cpi->common.fc.mvc_hp;
- int flags[2] = {0, 0};
+void vp8_write_mvprobs_hp(VP8_COMP *cpi) {
+ vp8_writer *const w = & cpi->bc;
+ MV_CONTEXT_HP *mvc = cpi->common.fc.mvc_hp;
+ int flags[2] = {0, 0};
#ifdef ENTROPY_STATS
- active_section = 4;
+ active_section = 4;
#endif
- write_component_probs_hp(
- w, &mvc[0], &vp8_default_mv_context_hp[0], &vp8_mv_update_probs_hp[0],
- cpi->MVcount_hp[0], 0, &flags[0]
- );
- write_component_probs_hp(
- w, &mvc[1], &vp8_default_mv_context_hp[1], &vp8_mv_update_probs_hp[1],
- cpi->MVcount_hp[1], 1, &flags[1]
- );
-
- if (flags[0] || flags[1])
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp,
- (const MV_CONTEXT_HP *)
- cpi->common.fc.mvc_hp, flags);
+ write_component_probs_hp(
+ w, &mvc[0], &vp8_default_mv_context_hp[0], &vp8_mv_update_probs_hp[0],
+ cpi->MVcount_hp[0], 0, &flags[0]
+ );
+ write_component_probs_hp(
+ w, &mvc[1], &vp8_default_mv_context_hp[1], &vp8_mv_update_probs_hp[1],
+ cpi->MVcount_hp[1], 1, &flags[1]
+ );
+
+ if (flags[0] || flags[1])
+ vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp,
+ (const MV_CONTEXT_HP *)
+ cpi->common.fc.mvc_hp, flags);
#ifdef ENTROPY_STATS
- active_section = 5;
+ active_section = 5;
#endif
}
#endif /* CONFIG_HIGH_PRECISION_MV */
diff --git a/vp8/encoder/find_rotation.c b/vp8/encoder/find_rotation.c
index 742c0ba81..59a0a722f 100644
--- a/vp8/encoder/find_rotation.c
+++ b/vp8/encoder/find_rotation.c
@@ -16,43 +16,40 @@
int vp8_find_best_rotation(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv,
int_mv *ref_mv, int *bri, int error_per_bit,
const vp8_variance_fn_ptr_t *vfp, int *mvcost[2],
- int *distortion, unsigned int *sse1)
-{
- unsigned char *z = (*(b->base_src) + b->src);
-
- int ri;
-
- int y_stride;
-
- unsigned int besterr;
- int br = bestmv->as_mv.row;
- int bc = bestmv->as_mv.col;
- unsigned char *y = *(d->base_pre) + d->pre + br * d->pre_stride + bc;
- y_stride = d->pre_stride;
-
- // calculate central point error
- besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
- *distortion = besterr;
-
- // find the best matching rotation
- *bri = 5;
- for (ri = 0; ri < ROTATIONS; ri++)
- {
- unsigned int this_err;
- unsigned char pb[256];
- predict_rotated_16x16(ri, y, y_stride, pb, 16);
- this_err = vfp->vf(pb, 16, z, b->src_stride, sse1);
-
- if (this_err < besterr)
- {
- *bri = ri;
- besterr = this_err;
- }
+ int *distortion, unsigned int *sse1) {
+ unsigned char *z = (*(b->base_src) + b->src);
+
+ int ri;
+
+ int y_stride;
+
+ unsigned int besterr;
+ int br = bestmv->as_mv.row;
+ int bc = bestmv->as_mv.col;
+ unsigned char *y = *(d->base_pre) + d->pre + br * d->pre_stride + bc;
+ y_stride = d->pre_stride;
+
+ // calculate central point error
+ besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = besterr;
+
+ // find the best matching rotation
+ *bri = 5;
+ for (ri = 0; ri < ROTATIONS; ri++) {
+ unsigned int this_err;
+ unsigned char pb[256];
+ predict_rotated_16x16(ri, y, y_stride, pb, 16);
+ this_err = vfp->vf(pb, 16, z, b->src_stride, sse1);
+
+ if (this_err < besterr) {
+ *bri = ri;
+ besterr = this_err;
}
- *sse1 = besterr;
- *distortion = besterr;
+ }
+ *sse1 = besterr;
+ *distortion = besterr;
- return 0;
+ return 0;
}
#endif
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 77076ff5f..6715c80f6 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -30,7 +30,7 @@
#include "vp8/common/quant_common.h"
#include "encodemv.h"
-//#define OUTPUT_FPF 1
+// #define OUTPUT_FPF 1
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
@@ -67,788 +67,736 @@ extern void vp8_alloc_compressor_data(VP8_COMP *cpi);
static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame);
-static int select_cq_level( int qindex )
-{
- int ret_val = QINDEX_RANGE - 1;
- int i;
+static int select_cq_level(int qindex) {
+ int ret_val = QINDEX_RANGE - 1;
+ int i;
- double target_q = ( vp8_convert_qindex_to_q( qindex ) * 0.5847 ) + 1.0;
+ double target_q = (vp8_convert_qindex_to_q(qindex) * 0.5847) + 1.0;
- for ( i = 0; i < QINDEX_RANGE; i++ )
- {
- if ( target_q <= vp8_convert_qindex_to_q( i ) )
- {
- ret_val = i;
- break;
- }
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (target_q <= vp8_convert_qindex_to_q(i)) {
+ ret_val = i;
+ break;
}
+ }
- return ret_val;
+ return ret_val;
}
// Resets the first pass file to the given position using a relative seek from the current position
-static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position)
-{
- cpi->twopass.stats_in = Position;
+static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position) {
+ cpi->twopass.stats_in = Position;
}
-static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame)
-{
- if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
- return EOF;
+static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
+ return EOF;
- *next_frame = *cpi->twopass.stats_in;
- return 1;
+ *next_frame = *cpi->twopass.stats_in;
+ return 1;
}
// Read frame stats at an offset from the current position
-static int read_frame_stats( VP8_COMP *cpi,
- FIRSTPASS_STATS *frame_stats,
- int offset )
-{
- FIRSTPASS_STATS * fps_ptr = cpi->twopass.stats_in;
-
- // Check legality of offset
- if ( offset >= 0 )
- {
- if ( &fps_ptr[offset] >= cpi->twopass.stats_in_end )
- return EOF;
- }
- else if ( offset < 0 )
- {
- if ( &fps_ptr[offset] < cpi->twopass.stats_in_start )
- return EOF;
- }
-
- *frame_stats = fps_ptr[offset];
- return 1;
+static int read_frame_stats(VP8_COMP *cpi,
+ FIRSTPASS_STATS *frame_stats,
+ int offset) {
+ FIRSTPASS_STATS *fps_ptr = cpi->twopass.stats_in;
+
+ // Check legality of offset
+ if (offset >= 0) {
+ if (&fps_ptr[offset] >= cpi->twopass.stats_in_end)
+ return EOF;
+ } else if (offset < 0) {
+ if (&fps_ptr[offset] < cpi->twopass.stats_in_start)
+ return EOF;
+ }
+
+ *frame_stats = fps_ptr[offset];
+ return 1;
}
-static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps)
-{
- if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
- return EOF;
+static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
+ return EOF;
- *fps = *cpi->twopass.stats_in;
- cpi->twopass.stats_in =
- (void*)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS));
- return 1;
+ *fps = *cpi->twopass.stats_in;
+ cpi->twopass.stats_in =
+ (void *)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS));
+ return 1;
}
static void output_stats(const VP8_COMP *cpi,
struct vpx_codec_pkt_list *pktlist,
- FIRSTPASS_STATS *stats)
-{
- struct vpx_codec_cx_pkt pkt;
- pkt.kind = VPX_CODEC_STATS_PKT;
- pkt.data.twopass_stats.buf = stats;
- pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
- vpx_codec_pkt_list_add(pktlist, &pkt);
+ FIRSTPASS_STATS *stats) {
+ struct vpx_codec_cx_pkt pkt;
+ pkt.kind = VPX_CODEC_STATS_PKT;
+ pkt.data.twopass_stats.buf = stats;
+ pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
+ vpx_codec_pkt_list_add(pktlist, &pkt);
// TEMP debug code
#if OUTPUT_FPF
- {
- FILE *fpfile;
- fpfile = fopen("firstpass.stt", "a");
-
- fprintf(fpfile, "%12.0f %12.0f %12.0f %12.0f %12.0f %12.4f %12.4f"
- "%12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f"
- "%12.0f %12.0f %12.4f %12.0f %12.0f %12.4f\n",
- stats->frame,
- stats->intra_error,
- stats->coded_error,
- stats->sr_coded_error,
- stats->ssim_weighted_pred_err,
- stats->pcnt_inter,
- stats->pcnt_motion,
- stats->pcnt_second_ref,
- stats->pcnt_neutral,
- stats->MVr,
- stats->mvr_abs,
- stats->MVc,
- stats->mvc_abs,
- stats->MVrv,
- stats->MVcv,
- stats->mv_in_out_count,
- stats->new_mv_count,
- stats->count,
- stats->duration);
- fclose(fpfile);
- }
+ {
+ FILE *fpfile;
+ fpfile = fopen("firstpass.stt", "a");
+
+ fprintf(fpfile, "%12.0f %12.0f %12.0f %12.0f %12.0f %12.4f %12.4f"
+ "%12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f"
+ "%12.0f %12.0f %12.4f %12.0f %12.0f %12.4f\n",
+ stats->frame,
+ stats->intra_error,
+ stats->coded_error,
+ stats->sr_coded_error,
+ stats->ssim_weighted_pred_err,
+ stats->pcnt_inter,
+ stats->pcnt_motion,
+ stats->pcnt_second_ref,
+ stats->pcnt_neutral,
+ stats->MVr,
+ stats->mvr_abs,
+ stats->MVc,
+ stats->mvc_abs,
+ stats->MVrv,
+ stats->MVcv,
+ stats->mv_in_out_count,
+ stats->new_mv_count,
+ stats->count,
+ stats->duration);
+ fclose(fpfile);
+ }
#endif
}
-static void zero_stats(FIRSTPASS_STATS *section)
-{
- section->frame = 0.0;
- section->intra_error = 0.0;
- section->coded_error = 0.0;
- section->sr_coded_error = 0.0;
- section->ssim_weighted_pred_err = 0.0;
- section->pcnt_inter = 0.0;
- section->pcnt_motion = 0.0;
- section->pcnt_second_ref = 0.0;
- section->pcnt_neutral = 0.0;
- section->MVr = 0.0;
- section->mvr_abs = 0.0;
- section->MVc = 0.0;
- section->mvc_abs = 0.0;
- section->MVrv = 0.0;
- section->MVcv = 0.0;
- section->mv_in_out_count = 0.0;
- section->new_mv_count = 0.0;
- section->count = 0.0;
- section->duration = 1.0;
+static void zero_stats(FIRSTPASS_STATS *section) {
+ section->frame = 0.0;
+ section->intra_error = 0.0;
+ section->coded_error = 0.0;
+ section->sr_coded_error = 0.0;
+ section->ssim_weighted_pred_err = 0.0;
+ section->pcnt_inter = 0.0;
+ section->pcnt_motion = 0.0;
+ section->pcnt_second_ref = 0.0;
+ section->pcnt_neutral = 0.0;
+ section->MVr = 0.0;
+ section->mvr_abs = 0.0;
+ section->MVc = 0.0;
+ section->mvc_abs = 0.0;
+ section->MVrv = 0.0;
+ section->MVcv = 0.0;
+ section->mv_in_out_count = 0.0;
+ section->new_mv_count = 0.0;
+ section->count = 0.0;
+ section->duration = 1.0;
}
-static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame)
-{
- section->frame += frame->frame;
- section->intra_error += frame->intra_error;
- section->coded_error += frame->coded_error;
- section->sr_coded_error += frame->sr_coded_error;
- section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err;
- section->pcnt_inter += frame->pcnt_inter;
- section->pcnt_motion += frame->pcnt_motion;
- section->pcnt_second_ref += frame->pcnt_second_ref;
- section->pcnt_neutral += frame->pcnt_neutral;
- section->MVr += frame->MVr;
- section->mvr_abs += frame->mvr_abs;
- section->MVc += frame->MVc;
- section->mvc_abs += frame->mvc_abs;
- section->MVrv += frame->MVrv;
- section->MVcv += frame->MVcv;
- section->mv_in_out_count += frame->mv_in_out_count;
- section->new_mv_count += frame->new_mv_count;
- section->count += frame->count;
- section->duration += frame->duration;
+static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame += frame->frame;
+ section->intra_error += frame->intra_error;
+ section->coded_error += frame->coded_error;
+ section->sr_coded_error += frame->sr_coded_error;
+ section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err;
+ section->pcnt_inter += frame->pcnt_inter;
+ section->pcnt_motion += frame->pcnt_motion;
+ section->pcnt_second_ref += frame->pcnt_second_ref;
+ section->pcnt_neutral += frame->pcnt_neutral;
+ section->MVr += frame->MVr;
+ section->mvr_abs += frame->mvr_abs;
+ section->MVc += frame->MVc;
+ section->mvc_abs += frame->mvc_abs;
+ section->MVrv += frame->MVrv;
+ section->MVcv += frame->MVcv;
+ section->mv_in_out_count += frame->mv_in_out_count;
+ section->new_mv_count += frame->new_mv_count;
+ section->count += frame->count;
+ section->duration += frame->duration;
}
-static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame)
-{
- section->frame -= frame->frame;
- section->intra_error -= frame->intra_error;
- section->coded_error -= frame->coded_error;
- section->sr_coded_error -= frame->sr_coded_error;
- section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err;
- section->pcnt_inter -= frame->pcnt_inter;
- section->pcnt_motion -= frame->pcnt_motion;
- section->pcnt_second_ref -= frame->pcnt_second_ref;
- section->pcnt_neutral -= frame->pcnt_neutral;
- section->MVr -= frame->MVr;
- section->mvr_abs -= frame->mvr_abs;
- section->MVc -= frame->MVc;
- section->mvc_abs -= frame->mvc_abs;
- section->MVrv -= frame->MVrv;
- section->MVcv -= frame->MVcv;
- section->mv_in_out_count -= frame->mv_in_out_count;
- section->new_mv_count -= frame->new_mv_count;
- section->count -= frame->count;
- section->duration -= frame->duration;
+static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame -= frame->frame;
+ section->intra_error -= frame->intra_error;
+ section->coded_error -= frame->coded_error;
+ section->sr_coded_error -= frame->sr_coded_error;
+ section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err;
+ section->pcnt_inter -= frame->pcnt_inter;
+ section->pcnt_motion -= frame->pcnt_motion;
+ section->pcnt_second_ref -= frame->pcnt_second_ref;
+ section->pcnt_neutral -= frame->pcnt_neutral;
+ section->MVr -= frame->MVr;
+ section->mvr_abs -= frame->mvr_abs;
+ section->MVc -= frame->MVc;
+ section->mvc_abs -= frame->mvc_abs;
+ section->MVrv -= frame->MVrv;
+ section->MVcv -= frame->MVcv;
+ section->mv_in_out_count -= frame->mv_in_out_count;
+ section->new_mv_count -= frame->new_mv_count;
+ section->count -= frame->count;
+ section->duration -= frame->duration;
}
-static void avg_stats(FIRSTPASS_STATS *section)
-{
- if (section->count < 1.0)
- return;
-
- section->intra_error /= section->count;
- section->coded_error /= section->count;
- section->sr_coded_error /= section->count;
- section->ssim_weighted_pred_err /= section->count;
- section->pcnt_inter /= section->count;
- section->pcnt_second_ref /= section->count;
- section->pcnt_neutral /= section->count;
- section->pcnt_motion /= section->count;
- section->MVr /= section->count;
- section->mvr_abs /= section->count;
- section->MVc /= section->count;
- section->mvc_abs /= section->count;
- section->MVrv /= section->count;
- section->MVcv /= section->count;
- section->mv_in_out_count /= section->count;
- section->duration /= section->count;
+static void avg_stats(FIRSTPASS_STATS *section) {
+ if (section->count < 1.0)
+ return;
+
+ section->intra_error /= section->count;
+ section->coded_error /= section->count;
+ section->sr_coded_error /= section->count;
+ section->ssim_weighted_pred_err /= section->count;
+ section->pcnt_inter /= section->count;
+ section->pcnt_second_ref /= section->count;
+ section->pcnt_neutral /= section->count;
+ section->pcnt_motion /= section->count;
+ section->MVr /= section->count;
+ section->mvr_abs /= section->count;
+ section->MVc /= section->count;
+ section->mvc_abs /= section->count;
+ section->MVrv /= section->count;
+ section->MVcv /= section->count;
+ section->mv_in_out_count /= section->count;
+ section->duration /= section->count;
}
// Calculate a modified Error used in distributing bits between easier and harder frames
-static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
-{
- double av_err = ( cpi->twopass.total_stats->ssim_weighted_pred_err /
- cpi->twopass.total_stats->count );
- double this_err = this_frame->ssim_weighted_pred_err;
- double modified_err;
-
- if (this_err > av_err)
- modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW1);
- else
- modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW2);
-
- return modified_err;
+static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ double av_err = (cpi->twopass.total_stats->ssim_weighted_pred_err /
+ cpi->twopass.total_stats->count);
+ double this_err = this_frame->ssim_weighted_pred_err;
+ double modified_err;
+
+ if (this_err > av_err)
+ modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW1);
+ else
+ modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW2);
+
+ return modified_err;
}
static const double weight_table[256] = {
-0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
-0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
-0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
-0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
-0.020000, 0.031250, 0.062500, 0.093750, 0.125000, 0.156250, 0.187500, 0.218750,
-0.250000, 0.281250, 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750,
-0.500000, 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750,
-0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500, 0.968750,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
-1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.031250, 0.062500, 0.093750, 0.125000, 0.156250, 0.187500, 0.218750,
+ 0.250000, 0.281250, 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750,
+ 0.500000, 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750,
+ 0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500, 0.968750,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000
};
-static double simple_weight(YV12_BUFFER_CONFIG *source)
-{
- int i, j;
-
- unsigned char *src = source->y_buffer;
- double sum_weights = 0.0;
-
- // Loop throught the Y plane raw examining levels and creating a weight for the image
- i = source->y_height;
- do
- {
- j = source->y_width;
- do
- {
- sum_weights += weight_table[ *src];
- src++;
- }while(--j);
- src -= source->y_width;
- src += source->y_stride;
- }while(--i);
-
- sum_weights /= (source->y_height * source->y_width);
-
- return sum_weights;
+static double simple_weight(YV12_BUFFER_CONFIG *source) {
+ int i, j;
+
+ unsigned char *src = source->y_buffer;
+ double sum_weights = 0.0;
+
+ // Loop throught the Y plane raw examining levels and creating a weight for the image
+ i = source->y_height;
+ do {
+ j = source->y_width;
+ do {
+ sum_weights += weight_table[ *src];
+ src++;
+ } while (--j);
+ src -= source->y_width;
+ src += source->y_stride;
+ } while (--i);
+
+ sum_weights /= (source->y_height * source->y_width);
+
+ return sum_weights;
}
// This function returns the current per frame maximum bitrate target
-static int frame_max_bits(VP8_COMP *cpi)
-{
- // Max allocation for a single frame based on the max section guidelines passed in and how many bits are left
- int max_bits;
+static int frame_max_bits(VP8_COMP *cpi) {
+ // Max allocation for a single frame based on the max section guidelines passed in and how many bits are left
+ int max_bits;
- // For VBR base this on the bits and frames left plus the two_pass_vbrmax_section rate passed in by the user
- max_bits = (int)(((double)cpi->twopass.bits_left / (cpi->twopass.total_stats->count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
+ // For VBR base this on the bits and frames left plus the two_pass_vbrmax_section rate passed in by the user
+ max_bits = (int)(((double)cpi->twopass.bits_left / (cpi->twopass.total_stats->count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0));
- // Trap case where we are out of bits
- if (max_bits < 0)
- max_bits = 0;
+ // Trap case where we are out of bits
+ if (max_bits < 0)
+ max_bits = 0;
- return max_bits;
+ return max_bits;
}
-void vp8_init_first_pass(VP8_COMP *cpi)
-{
- zero_stats(cpi->twopass.total_stats);
+void vp8_init_first_pass(VP8_COMP *cpi) {
+ zero_stats(cpi->twopass.total_stats);
}
-void vp8_end_first_pass(VP8_COMP *cpi)
-{
- output_stats(cpi, cpi->output_pkt_list, cpi->twopass.total_stats);
+void vp8_end_first_pass(VP8_COMP *cpi) {
+ output_stats(cpi, cpi->output_pkt_list, cpi->twopass.total_stats);
}
-static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG * recon_buffer, int * best_motion_err, int recon_yoffset )
-{
- MACROBLOCKD * const xd = & x->e_mbd;
- BLOCK *b = &x->block[0];
- BLOCKD *d = &x->e_mbd.block[0];
+static void zz_motion_search(VP8_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = & x->e_mbd;
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
- unsigned char *src_ptr = (*(b->base_src) + b->src);
- int src_stride = b->src_stride;
- unsigned char *ref_ptr;
- int ref_stride=d->pre_stride;
+ unsigned char *src_ptr = (*(b->base_src) + b->src);
+ int src_stride = b->src_stride;
+ unsigned char *ref_ptr;
+ int ref_stride = d->pre_stride;
- // Set up pointers for this macro block recon buffer
- xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
+ // Set up pointers for this macro block recon buffer
+ xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
- ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
+ ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre);
- VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
+ VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16)(src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
}
static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *ref_mv, MV *best_mv,
YV12_BUFFER_CONFIG *recon_buffer,
- int *best_motion_err, int recon_yoffset )
-{
- MACROBLOCKD *const xd = & x->e_mbd;
- BLOCK *b = &x->block[0];
- BLOCKD *d = &x->e_mbd.block[0];
- int num00;
-
- int_mv tmp_mv;
- int_mv ref_mv_full;
-
- int tmp_err;
- int step_param = 3;
- int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
- int n;
- vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
- int new_mv_mode_penalty = 256;
-
- // override the default variance function to use MSE
- v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
-
- // Set up pointers for this macro block recon buffer
- xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
-
- // Initial step/diamond search centred on best mv
- tmp_mv.as_int = 0;
- ref_mv_full.as_mv.col = ref_mv->as_mv.col>>3;
- ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3;
- tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param,
- x->sadperbit16, &num00, &v_fn_ptr,
- XMVCOST, ref_mv);
- if ( tmp_err < INT_MAX-new_mv_mode_penalty )
+ int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = & x->e_mbd;
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ int num00;
+
+ int_mv tmp_mv;
+ int_mv ref_mv_full;
+
+ int tmp_err;
+ int step_param = 3;
+ int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+ int n;
+ vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+ int new_mv_mode_penalty = 256;
+
+ // override the default variance function to use MSE
+ v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
+
+ // Set up pointers for this macro block recon buffer
+ xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
+
+ // Initial step/diamond search centred on best mv
+ tmp_mv.as_int = 0;
+ ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3;
+ tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param,
+ x->sadperbit16, &num00, &v_fn_ptr,
+ XMVCOST, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty)
+ tmp_err += new_mv_mode_penalty;
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+
+ // Further step/diamond searches as necessary
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
+ step_param + n, x->sadperbit16,
+ &num00, &v_fn_ptr,
+ XMVCOST, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty)
tmp_err += new_mv_mode_penalty;
- if (tmp_err < *best_motion_err)
- {
+ if (tmp_err < *best_motion_err) {
*best_motion_err = tmp_err;
best_mv->row = tmp_mv.as_mv.row;
best_mv->col = tmp_mv.as_mv.col;
+ }
}
-
- // Further step/diamond searches as necessary
- n = num00;
- num00 = 0;
-
- while (n < further_steps)
- {
- n++;
-
- if (num00)
- num00--;
- else
- {
- tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
- step_param + n, x->sadperbit16,
- &num00, &v_fn_ptr,
- XMVCOST, ref_mv);
- if ( tmp_err < INT_MAX-new_mv_mode_penalty )
- tmp_err += new_mv_mode_penalty;
-
- if (tmp_err < *best_motion_err)
- {
- *best_motion_err = tmp_err;
- best_mv->row = tmp_mv.as_mv.row;
- best_mv->col = tmp_mv.as_mv.col;
- }
- }
- }
+ }
}
-void vp8_first_pass(VP8_COMP *cpi)
-{
- int mb_row, mb_col;
- MACROBLOCK *const x = & cpi->mb;
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & x->e_mbd;
-
- int recon_yoffset, recon_uvoffset;
- YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx];
- YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
- YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx];
- int recon_y_stride = lst_yv12->y_stride;
- int recon_uv_stride = lst_yv12->uv_stride;
- int64_t intra_error = 0;
- int64_t coded_error = 0;
- int64_t sr_coded_error = 0;
-
- int sum_mvr = 0, sum_mvc = 0;
- int sum_mvr_abs = 0, sum_mvc_abs = 0;
- int sum_mvrs = 0, sum_mvcs = 0;
- int mvcount = 0;
- int intercount = 0;
- int second_ref_count = 0;
- int intrapenalty = 256;
- int neutral_count = 0;
- int new_mv_count = 0;
- int sum_in_vectors = 0;
- uint32_t lastmv_as_int = 0;
-
- int_mv zero_ref_mv;
-
- zero_ref_mv.as_int = 0;
-
- vp8_clear_system_state(); //__asm emms;
-
- x->src = * cpi->Source;
- xd->pre = *lst_yv12;
- xd->dst = *new_yv12;
-
- x->partition_info = x->pi;
-
- xd->mode_info_context = cm->mi;
-
- vp8_build_block_offsets(x);
-
- vp8_setup_block_dptrs(&x->e_mbd);
-
- vp8_setup_block_ptrs(x);
-
- // set up frame new frame for intra coded blocks
- vp8_setup_intra_recon(new_yv12);
- vp8cx_frame_init_quantizer(cpi);
-
- // Initialise the MV cost table to the defaults
- //if( cm->current_video_frame == 0)
- //if ( 0 )
- {
- int flag[2] = {1, 1};
- vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
- vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
+void vp8_first_pass(VP8_COMP *cpi) {
+ int mb_row, mb_col;
+ MACROBLOCK *const x = & cpi->mb;
+ VP8_COMMON *const cm = & cpi->common;
+ MACROBLOCKD *const xd = & x->e_mbd;
+
+ int recon_yoffset, recon_uvoffset;
+ YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx];
+ YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx];
+ int recon_y_stride = lst_yv12->y_stride;
+ int recon_uv_stride = lst_yv12->uv_stride;
+ int64_t intra_error = 0;
+ int64_t coded_error = 0;
+ int64_t sr_coded_error = 0;
+
+ int sum_mvr = 0, sum_mvc = 0;
+ int sum_mvr_abs = 0, sum_mvc_abs = 0;
+ int sum_mvrs = 0, sum_mvcs = 0;
+ int mvcount = 0;
+ int intercount = 0;
+ int second_ref_count = 0;
+ int intrapenalty = 256;
+ int neutral_count = 0;
+ int new_mv_count = 0;
+ int sum_in_vectors = 0;
+ uint32_t lastmv_as_int = 0;
+
+ int_mv zero_ref_mv;
+
+ zero_ref_mv.as_int = 0;
+
+ vp8_clear_system_state(); // __asm emms;
+
+ x->src = * cpi->Source;
+ xd->pre = *lst_yv12;
+ xd->dst = *new_yv12;
+
+ x->partition_info = x->pi;
+
+ xd->mode_info_context = cm->mi;
+
+ vp8_build_block_offsets(x);
+
+ vp8_setup_block_dptrs(&x->e_mbd);
+
+ vp8_setup_block_ptrs(x);
+
+ // set up frame new frame for intra coded blocks
+ vp8_setup_intra_recon(new_yv12);
+ vp8cx_frame_init_quantizer(cpi);
+
+ // Initialise the MV cost table to the defaults
+ // if( cm->current_video_frame == 0)
+ // if ( 0 )
+ {
+ int flag[2] = {1, 1};
+ vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
+ vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+ vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
#if CONFIG_HIGH_PRECISION_MV
- vpx_memcpy(cm->fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
+ vpx_memcpy(cm->fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
+ vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
#endif
- }
+ }
+
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ int_mv best_ref_mv;
+
+ best_ref_mv.as_int = 0;
+
+ // reset above block coeffs
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8);
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
+
+
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ int this_error;
+ int gf_motion_error = INT_MAX;
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
+ xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
+ xd->left_available = (mb_col != 0);
+
+ // Copy current mb to a buffer
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+
+ // do intra 16x16 prediction
+ this_error = vp8_encode_intra(cpi, x, use_dc_pred);
+
+ // "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
+ // We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
+ // When the error score is very low this causes us to pick all or lots of INTRA modes and throw lots of key frames.
+ // This penalty adds a cost matching that of a 0,0 mv to the intra case.
+ this_error += intrapenalty;
+
+ // Cumulative intra error total
+ intra_error += (int64_t)this_error;
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
+
+ // Other than for the first frame do a motion search
+ if (cm->current_video_frame > 0) {
+ int tmp_err;
+ int motion_error = INT_MAX;
+ int_mv mv, tmp_mv;
+
+ // Simple 0,0 motion with no mv overhead
+ zz_motion_search(cpi, x, lst_yv12, &motion_error, recon_yoffset);
+ mv.as_int = tmp_mv.as_int = 0;
+
+ // Test last reference frame using the previous best mv as the
+ // starting point (best reference) for the search
+ first_pass_motion_search(cpi, x, &best_ref_mv,
+ &mv.as_mv, lst_yv12,
+ &motion_error, recon_yoffset);
+
+ // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ if (best_ref_mv.as_int) {
+ tmp_err = INT_MAX;
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
+ lst_yv12, &tmp_err, recon_yoffset);
+
+ if (tmp_err < motion_error) {
+ motion_error = tmp_err;
+ mv.as_int = tmp_mv.as_int;
+ }
+ }
- // for each macroblock row in image
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
- {
- int_mv best_ref_mv;
+ // Experimental search in an older reference frame
+ if (cm->current_video_frame > 1) {
+ // Simple 0,0 motion with no mv overhead
+ zz_motion_search(cpi, x, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ first_pass_motion_search(cpi, x, &zero_ref_mv,
+ &tmp_mv.as_mv, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ if ((gf_motion_error < motion_error) &&
+ (gf_motion_error < this_error)) {
+ second_ref_count++;
+ }
+
+ // Reset to last frame as reference buffer
+ xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
+ xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
+ xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
+
+ // In accumulating a score for the older reference frame
+ // take the best of the motion predicted score and
+ // the intra coded error (just as will be done for)
+ // accumulation of "coded_error" for the last frame.
+ if (gf_motion_error < this_error)
+ sr_coded_error += gf_motion_error;
+ else
+ sr_coded_error += this_error;
+ } else
+ sr_coded_error += motion_error;
+ /* Intra assumed best */
best_ref_mv.as_int = 0;
- // reset above block coeffs
- xd->up_available = (mb_row != 0);
- recon_yoffset = (mb_row * recon_y_stride * 16);
- recon_uvoffset = (mb_row * recon_uv_stride * 8);
-
- // Set up limit values for motion vectors to prevent them extending outside the UMV borders
- x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
- x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
-
-
- // for each macroblock col in image
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
- int this_error;
- int gf_motion_error = INT_MAX;
- int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
-
- xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
- xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset;
- xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
- xd->left_available = (mb_col != 0);
-
- //Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-
- // do intra 16x16 prediction
- this_error = vp8_encode_intra(cpi, x, use_dc_pred);
-
- // "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
- // We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
- // When the error score is very low this causes us to pick all or lots of INTRA modes and throw lots of key frames.
- // This penalty adds a cost matching that of a 0,0 mv to the intra case.
- this_error += intrapenalty;
-
- // Cumulative intra error total
- intra_error += (int64_t)this_error;
-
- // Set up limit values for motion vectors to prevent them extending outside the UMV borders
- x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
- x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
-
- // Other than for the first frame do a motion search
- if (cm->current_video_frame > 0)
- {
- int tmp_err;
- int motion_error = INT_MAX;
- int_mv mv, tmp_mv;
-
- // Simple 0,0 motion with no mv overhead
- zz_motion_search( cpi, x, lst_yv12, &motion_error, recon_yoffset );
- mv.as_int = tmp_mv.as_int = 0;
-
- // Test last reference frame using the previous best mv as the
- // starting point (best reference) for the search
- first_pass_motion_search(cpi, x, &best_ref_mv,
- &mv.as_mv, lst_yv12,
- &motion_error, recon_yoffset);
-
- // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
- if (best_ref_mv.as_int)
- {
- tmp_err = INT_MAX;
- first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
- lst_yv12, &tmp_err, recon_yoffset);
-
- if ( tmp_err < motion_error )
- {
- motion_error = tmp_err;
- mv.as_int = tmp_mv.as_int;
- }
- }
-
- // Experimental search in an older reference frame
- if (cm->current_video_frame > 1)
- {
- // Simple 0,0 motion with no mv overhead
- zz_motion_search( cpi, x, gld_yv12,
- &gf_motion_error, recon_yoffset );
-
- first_pass_motion_search(cpi, x, &zero_ref_mv,
- &tmp_mv.as_mv, gld_yv12,
- &gf_motion_error, recon_yoffset);
-
- if ( (gf_motion_error < motion_error) &&
- (gf_motion_error < this_error))
- {
- second_ref_count++;
- }
-
- // Reset to last frame as reference buffer
- xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
- xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
- xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
-
- // In accumulating a score for the older reference frame
- // take the best of the motion predicted score and
- // the intra coded error (just as will be done for)
- // accumulation of "coded_error" for the last frame.
- if ( gf_motion_error < this_error )
- sr_coded_error += gf_motion_error;
- else
- sr_coded_error += this_error;
- }
- else
- sr_coded_error += motion_error;
-
- /* Intra assumed best */
- best_ref_mv.as_int = 0;
-
- if (motion_error <= this_error)
- {
- // Keep a count of cases where the inter and intra were
- // very close and very low. This helps with scene cut
- // detection for example in cropped clips with black bars
- // at the sides or top and bottom.
- if( (((this_error-intrapenalty) * 9) <=
- (motion_error*10)) &&
- (this_error < (2*intrapenalty)) )
- {
- neutral_count++;
- }
-
- mv.as_mv.row <<= 3;
- mv.as_mv.col <<= 3;
- this_error = motion_error;
- vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
- vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
- sum_mvr += mv.as_mv.row;
- sum_mvr_abs += abs(mv.as_mv.row);
- sum_mvc += mv.as_mv.col;
- sum_mvc_abs += abs(mv.as_mv.col);
- sum_mvrs += mv.as_mv.row * mv.as_mv.row;
- sum_mvcs += mv.as_mv.col * mv.as_mv.col;
- intercount++;
-
- best_ref_mv.as_int = mv.as_int;
-
- // Was the vector non-zero
- if (mv.as_int)
- {
- mvcount++;
-
- // Was it different from the last non zero vector
- if ( mv.as_int != lastmv_as_int )
- new_mv_count++;
- lastmv_as_int = mv.as_int;
-
- // Does the Row vector point inwards or outwards
- if (mb_row < cm->mb_rows / 2)
- {
- if (mv.as_mv.row > 0)
- sum_in_vectors--;
- else if (mv.as_mv.row < 0)
- sum_in_vectors++;
- }
- else if (mb_row > cm->mb_rows / 2)
- {
- if (mv.as_mv.row > 0)
- sum_in_vectors++;
- else if (mv.as_mv.row < 0)
- sum_in_vectors--;
- }
-
- // Does the Row vector point inwards or outwards
- if (mb_col < cm->mb_cols / 2)
- {
- if (mv.as_mv.col > 0)
- sum_in_vectors--;
- else if (mv.as_mv.col < 0)
- sum_in_vectors++;
- }
- else if (mb_col > cm->mb_cols / 2)
- {
- if (mv.as_mv.col > 0)
- sum_in_vectors++;
- else if (mv.as_mv.col < 0)
- sum_in_vectors--;
- }
- }
- }
+ if (motion_error <= this_error) {
+ // Keep a count of cases where the inter and intra were
+ // very close and very low. This helps with scene cut
+ // detection for example in cropped clips with black bars
+ // at the sides or top and bottom.
+ if ((((this_error - intrapenalty) * 9) <=
+ (motion_error * 10)) &&
+ (this_error < (2 * intrapenalty))) {
+ neutral_count++;
+ }
+
+ mv.as_mv.row <<= 3;
+ mv.as_mv.col <<= 3;
+ this_error = motion_error;
+ vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
+ vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
+ sum_mvr += mv.as_mv.row;
+ sum_mvr_abs += abs(mv.as_mv.row);
+ sum_mvc += mv.as_mv.col;
+ sum_mvc_abs += abs(mv.as_mv.col);
+ sum_mvrs += mv.as_mv.row * mv.as_mv.row;
+ sum_mvcs += mv.as_mv.col * mv.as_mv.col;
+ intercount++;
+
+ best_ref_mv.as_int = mv.as_int;
+
+ // Was the vector non-zero
+ if (mv.as_int) {
+ mvcount++;
+
+ // Was it different from the last non zero vector
+ if (mv.as_int != lastmv_as_int)
+ new_mv_count++;
+ lastmv_as_int = mv.as_int;
+
+ // Does the Row vector point inwards or outwards
+ if (mb_row < cm->mb_rows / 2) {
+ if (mv.as_mv.row > 0)
+ sum_in_vectors--;
+ else if (mv.as_mv.row < 0)
+ sum_in_vectors++;
+ } else if (mb_row > cm->mb_rows / 2) {
+ if (mv.as_mv.row > 0)
+ sum_in_vectors++;
+ else if (mv.as_mv.row < 0)
+ sum_in_vectors--;
}
- else
- sr_coded_error += (int64_t)this_error;
-
- coded_error += (int64_t)this_error;
- // adjust to the next column of macroblocks
- x->src.y_buffer += 16;
- x->src.u_buffer += 8;
- x->src.v_buffer += 8;
-
- recon_yoffset += 16;
- recon_uvoffset += 8;
+ // Does the Row vector point inwards or outwards
+ if (mb_col < cm->mb_cols / 2) {
+ if (mv.as_mv.col > 0)
+ sum_in_vectors--;
+ else if (mv.as_mv.col < 0)
+ sum_in_vectors++;
+ } else if (mb_col > cm->mb_cols / 2) {
+ if (mv.as_mv.col > 0)
+ sum_in_vectors++;
+ else if (mv.as_mv.col < 0)
+ sum_in_vectors--;
+ }
+ }
}
+ } else
+ sr_coded_error += (int64_t)this_error;
- // adjust to the next row of mbs
- x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
- x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
- x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ coded_error += (int64_t)this_error;
- //extend the recon for intra prediction
- vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
- vp8_clear_system_state(); //__asm emms;
- }
+ // adjust to the next column of macroblocks
+ x->src.y_buffer += 16;
+ x->src.u_buffer += 8;
+ x->src.v_buffer += 8;
- vp8_clear_system_state(); //__asm emms;
- {
- double weight = 0.0;
-
- FIRSTPASS_STATS fps;
-
- fps.frame = cm->current_video_frame ;
- fps.intra_error = intra_error >> 8;
- fps.coded_error = coded_error >> 8;
- fps.sr_coded_error = sr_coded_error >> 8;
- weight = simple_weight(cpi->Source);
-
-
- if (weight < 0.1)
- weight = 0.1;
-
- fps.ssim_weighted_pred_err = fps.coded_error * weight;
-
- fps.pcnt_inter = 0.0;
- fps.pcnt_motion = 0.0;
- fps.MVr = 0.0;
- fps.mvr_abs = 0.0;
- fps.MVc = 0.0;
- fps.mvc_abs = 0.0;
- fps.MVrv = 0.0;
- fps.MVcv = 0.0;
- fps.mv_in_out_count = 0.0;
- fps.new_mv_count = 0.0;
- fps.count = 1.0;
-
- fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs;
- fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs;
- fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs;
-
- if (mvcount > 0)
- {
- fps.MVr = (double)sum_mvr / (double)mvcount;
- fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount;
- fps.MVc = (double)sum_mvc / (double)mvcount;
- fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount;
- fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / (double)mvcount;
- fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / (double)mvcount;
- fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2);
- fps.new_mv_count = new_mv_count;
-
- fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs;
- }
-
- // TODO: handle the case when duration is set to 0, or something less
- // than the full time between subsequent cpi->source_time_stamp s .
- fps.duration = cpi->source->ts_end
- - cpi->source->ts_start;
-
- // don't want to do output stats with a stack variable!
- memcpy(cpi->twopass.this_frame_stats,
- &fps,
- sizeof(FIRSTPASS_STATS));
- output_stats(cpi, cpi->output_pkt_list, cpi->twopass.this_frame_stats);
- accumulate_stats(cpi->twopass.total_stats, &fps);
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
}
- // Copy the previous Last Frame back into gf and and arf buffers if
- // the prediction is good enough... but also dont allow it to lag too far
- if ((cpi->twopass.sr_update_lag > 3) ||
- ((cm->current_video_frame > 0) &&
- (cpi->twopass.this_frame_stats->pcnt_inter > 0.20) &&
- ((cpi->twopass.this_frame_stats->intra_error /
- cpi->twopass.this_frame_stats->coded_error) > 2.0)))
- {
- vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12);
- cpi->twopass.sr_update_lag = 1;
+ // adjust to the next row of mbs
+ x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
+ x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+
+ // extend the recon for intra prediction
+ vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+ vp8_clear_system_state(); // __asm emms;
+ }
+
+ vp8_clear_system_state(); // __asm emms;
+ {
+ double weight = 0.0;
+
+ FIRSTPASS_STATS fps;
+
+ fps.frame = cm->current_video_frame;
+ fps.intra_error = intra_error >> 8;
+ fps.coded_error = coded_error >> 8;
+ fps.sr_coded_error = sr_coded_error >> 8;
+ weight = simple_weight(cpi->Source);
+
+
+ if (weight < 0.1)
+ weight = 0.1;
+
+ fps.ssim_weighted_pred_err = fps.coded_error * weight;
+
+ fps.pcnt_inter = 0.0;
+ fps.pcnt_motion = 0.0;
+ fps.MVr = 0.0;
+ fps.mvr_abs = 0.0;
+ fps.MVc = 0.0;
+ fps.mvc_abs = 0.0;
+ fps.MVrv = 0.0;
+ fps.MVcv = 0.0;
+ fps.mv_in_out_count = 0.0;
+ fps.new_mv_count = 0.0;
+ fps.count = 1.0;
+
+ fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs;
+ fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs;
+ fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs;
+
+ if (mvcount > 0) {
+ fps.MVr = (double)sum_mvr / (double)mvcount;
+ fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount;
+ fps.MVc = (double)sum_mvc / (double)mvcount;
+ fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount;
+ fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / (double)mvcount;
+ fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / (double)mvcount;
+ fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2);
+ fps.new_mv_count = new_mv_count;
+
+ fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs;
}
- else
- cpi->twopass.sr_update_lag ++;
- // swap frame pointers so last frame refers to the frame we just compressed
- vp8_swap_yv12_buffer(lst_yv12, new_yv12);
- vp8_yv12_extend_frame_borders(lst_yv12);
+ // TODO: handle the case when duration is set to 0, or something less
+ // than the full time between subsequent cpi->source_time_stamp s .
+ fps.duration = cpi->source->ts_end
+ - cpi->source->ts_start;
+
+ // don't want to do output stats with a stack variable!
+ memcpy(cpi->twopass.this_frame_stats,
+ &fps,
+ sizeof(FIRSTPASS_STATS));
+ output_stats(cpi, cpi->output_pkt_list, cpi->twopass.this_frame_stats);
+ accumulate_stats(cpi->twopass.total_stats, &fps);
+ }
+
+ // Copy the previous Last Frame back into gf and and arf buffers if
+ // the prediction is good enough... but also dont allow it to lag too far
+ if ((cpi->twopass.sr_update_lag > 3) ||
+ ((cm->current_video_frame > 0) &&
+ (cpi->twopass.this_frame_stats->pcnt_inter > 0.20) &&
+ ((cpi->twopass.this_frame_stats->intra_error /
+ cpi->twopass.this_frame_stats->coded_error) > 2.0))) {
+ vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12);
+ cpi->twopass.sr_update_lag = 1;
+ } else
+ cpi->twopass.sr_update_lag++;
- // Special case for the first frame. Copy into the GF buffer as a second reference.
- if (cm->current_video_frame == 0)
- {
- vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12);
- }
+ // swap frame pointers so last frame refers to the frame we just compressed
+ vp8_swap_yv12_buffer(lst_yv12, new_yv12);
+ vp8_yv12_extend_frame_borders(lst_yv12);
+ // Special case for the first frame. Copy into the GF buffer as a second reference.
+ if (cm->current_video_frame == 0) {
+ vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12);
+ }
- // use this to see what the first pass reconstruction looks like
- if (0)
- {
- char filename[512];
- FILE *recon_file;
- sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
- if (cm->current_video_frame == 0)
- recon_file = fopen(filename, "wb");
- else
- recon_file = fopen(filename, "ab");
+ // use this to see what the first pass reconstruction looks like
+ if (0) {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
- if(fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file));
- fclose(recon_file);
- }
+ if (cm->current_video_frame == 0)
+ recon_file = fopen(filename, "wb");
+ else
+ recon_file = fopen(filename, "ab");
+
+ if (fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file));
+ fclose(recon_file);
+ }
- cm->current_video_frame++;
+ cm->current_video_frame++;
}
@@ -858,1840 +806,1723 @@ void vp8_first_pass(VP8_COMP *cpi)
//
-double bitcost( double prob )
-{
- return -(log( prob ) / log( 2.0 ));
+double bitcost(double prob) {
+ return -(log(prob) / log(2.0));
}
static long long estimate_modemvcost(VP8_COMP *cpi,
- FIRSTPASS_STATS * fpstats)
-{
- int mv_cost;
- int mode_cost;
-
- double av_pct_inter = fpstats->pcnt_inter / fpstats->count;
- double av_pct_motion = fpstats->pcnt_motion / fpstats->count;
- double av_intra = (1.0 - av_pct_inter);
-
- double zz_cost;
- double motion_cost;
- double intra_cost;
-
- zz_cost = bitcost(av_pct_inter - av_pct_motion);
- motion_cost = bitcost(av_pct_motion);
- intra_cost = bitcost(av_intra);
-
- // Estimate of extra bits per mv overhead for mbs
- // << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb
- mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
-
- // Crude estimate of overhead cost from modes
- // << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
- mode_cost =
- (int)( ( ((av_pct_inter - av_pct_motion) * zz_cost) +
- (av_pct_motion * motion_cost) +
- (av_intra * intra_cost) ) * cpi->common.MBs ) << 9;
-
- //return mv_cost + mode_cost;
- // TODO PGW Fix overhead costs for extended Q range
- return 0;
+ FIRSTPASS_STATS *fpstats) {
+ int mv_cost;
+ int mode_cost;
+
+ double av_pct_inter = fpstats->pcnt_inter / fpstats->count;
+ double av_pct_motion = fpstats->pcnt_motion / fpstats->count;
+ double av_intra = (1.0 - av_pct_inter);
+
+ double zz_cost;
+ double motion_cost;
+ double intra_cost;
+
+ zz_cost = bitcost(av_pct_inter - av_pct_motion);
+ motion_cost = bitcost(av_pct_motion);
+ intra_cost = bitcost(av_intra);
+
+ // Estimate of extra bits per mv overhead for mbs
+ // << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb
+ mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
+
+ // Crude estimate of overhead cost from modes
+ // << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
+ mode_cost =
+ (int)((((av_pct_inter - av_pct_motion) * zz_cost) +
+ (av_pct_motion * motion_cost) +
+ (av_intra * intra_cost)) * cpi->common.MBs) << 9;
+
+ // return mv_cost + mode_cost;
+ // TODO PGW Fix overhead costs for extended Q range
+ return 0;
}
-static double calc_correction_factor( double err_per_mb,
- double err_divisor,
- double pt_low,
- double pt_high,
- int Q )
-{
- double power_term;
- double error_term = err_per_mb / err_divisor;
- double correction_factor;
+static double calc_correction_factor(double err_per_mb,
+ double err_divisor,
+ double pt_low,
+ double pt_high,
+ int Q) {
+ double power_term;
+ double error_term = err_per_mb / err_divisor;
+ double correction_factor;
- // Adjustment based on actual quantizer to power term.
- power_term = (vp8_convert_qindex_to_q(Q) * 0.01) + pt_low;
- power_term = (power_term > pt_high) ? pt_high : power_term;
+ // Adjustment based on actual quantizer to power term.
+ power_term = (vp8_convert_qindex_to_q(Q) * 0.01) + pt_low;
+ power_term = (power_term > pt_high) ? pt_high : power_term;
- // Adjustments to error term
- // TBD
+ // Adjustments to error term
+ // TBD
- // Calculate correction factor
- correction_factor = pow(error_term, power_term);
+ // Calculate correction factor
+ correction_factor = pow(error_term, power_term);
- // Clip range
- correction_factor =
- (correction_factor < 0.05)
- ? 0.05 : (correction_factor > 2.0) ? 2.0 : correction_factor;
+ // Clip range
+ correction_factor =
+ (correction_factor < 0.05)
+ ? 0.05 : (correction_factor > 2.0) ? 2.0 : correction_factor;
- return correction_factor;
+ return correction_factor;
}
// Given a current maxQ value sets a range for future values.
// PGW TODO..
// This code removes direct dependency on QIndex to determin the range
// (now uses the actual quantizer) but has not been tuned.
-static void adjust_maxq_qrange(VP8_COMP *cpi)
-{
- int i;
- double q;
-
- // Set the max corresponding to cpi->avg_q * 2.0
- q = cpi->avg_q * 2.0;
- cpi->twopass.maxq_max_limit = cpi->worst_quality;
- for ( i = cpi->best_quality; i <= cpi->worst_quality; i++ )
- {
- cpi->twopass.maxq_max_limit = i;
- if ( vp8_convert_qindex_to_q(i) >= q )
- break;
- }
-
- // Set the min corresponding to cpi->avg_q * 0.5
- q = cpi->avg_q * 0.5;
- cpi->twopass.maxq_min_limit = cpi->best_quality;
- for ( i = cpi->worst_quality; i >= cpi->best_quality; i-- )
- {
- cpi->twopass.maxq_min_limit = i;
- if ( vp8_convert_qindex_to_q(i) <= q )
- break;
- }
+static void adjust_maxq_qrange(VP8_COMP *cpi) {
+ int i;
+ double q;
+
+ // Set the max corresponding to cpi->avg_q * 2.0
+ q = cpi->avg_q * 2.0;
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ for (i = cpi->best_quality; i <= cpi->worst_quality; i++) {
+ cpi->twopass.maxq_max_limit = i;
+ if (vp8_convert_qindex_to_q(i) >= q)
+ break;
+ }
+
+ // Set the min corresponding to cpi->avg_q * 0.5
+ q = cpi->avg_q * 0.5;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
+ for (i = cpi->worst_quality; i >= cpi->best_quality; i--) {
+ cpi->twopass.maxq_min_limit = i;
+ if (vp8_convert_qindex_to_q(i) <= q)
+ break;
+ }
}
static int estimate_max_q(VP8_COMP *cpi,
- FIRSTPASS_STATS * fpstats,
+ FIRSTPASS_STATS *fpstats,
int section_target_bandwitdh,
- int overhead_bits )
-{
- int Q;
- int num_mbs = cpi->common.MBs;
- int target_norm_bits_per_mb;
-
- double section_err = (fpstats->coded_error / fpstats->count);
- double sr_err_diff;
- double sr_correction;
- double err_per_mb = section_err / num_mbs;
- double err_correction_factor;
- double speed_correction = 1.0;
- int overhead_bits_per_mb;
-
- if (section_target_bandwitdh <= 0)
- return cpi->twopass.maxq_max_limit; // Highest value allowed
-
- target_norm_bits_per_mb =
- (section_target_bandwitdh < (1 << 20))
- ? (512 * section_target_bandwitdh) / num_mbs
- : 512 * (section_target_bandwitdh / num_mbs);
-
- // Look at the drop in prediction quality between the last frame
- // and the GF buffer (which contained an older frame).
- sr_err_diff =
- (fpstats->sr_coded_error - fpstats->coded_error) /
- (fpstats->count * cpi->common.MBs);
- sr_correction = (sr_err_diff / 32.0);
- sr_correction = pow( sr_correction, 0.25 );
- if ( sr_correction < 0.75 )
- sr_correction = 0.75;
- else if ( sr_correction > 1.25 )
- sr_correction = 1.25;
-
- // Calculate a corrective factor based on a rolling ratio of bits spent
- // vs target bits
- if ((cpi->rolling_target_bits > 0) &&
- (cpi->active_worst_quality < cpi->worst_quality))
- {
- double rolling_ratio;
-
- rolling_ratio = (double)cpi->rolling_actual_bits /
- (double)cpi->rolling_target_bits;
-
- if (rolling_ratio < 0.95)
- cpi->twopass.est_max_qcorrection_factor -= 0.005;
- else if (rolling_ratio > 1.05)
- cpi->twopass.est_max_qcorrection_factor += 0.005;
-
- cpi->twopass.est_max_qcorrection_factor =
- (cpi->twopass.est_max_qcorrection_factor < 0.1)
- ? 0.1
- : (cpi->twopass.est_max_qcorrection_factor > 10.0)
- ? 10.0 : cpi->twopass.est_max_qcorrection_factor;
- }
-
- // Corrections for higher compression speed settings
- // (reduced compression expected)
- if (cpi->compressor_speed == 1)
- {
- if (cpi->oxcf.cpu_used <= 5)
- speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
- else
- speed_correction = 1.25;
- }
-
- // Estimate of overhead bits per mb
- // Correction to overhead bits for min allowed Q.
+ int overhead_bits) {
+ int Q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = (fpstats->coded_error / fpstats->count);
+ double sr_err_diff;
+ double sr_correction;
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+ int overhead_bits_per_mb;
+
+ if (section_target_bandwitdh <= 0)
+ return cpi->twopass.maxq_max_limit; // Highest value allowed
+
+ target_norm_bits_per_mb =
+ (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ // Look at the drop in prediction quality between the last frame
+ // and the GF buffer (which contained an older frame).
+ sr_err_diff =
+ (fpstats->sr_coded_error - fpstats->coded_error) /
+ (fpstats->count * cpi->common.MBs);
+ sr_correction = (sr_err_diff / 32.0);
+ sr_correction = pow(sr_correction, 0.25);
+ if (sr_correction < 0.75)
+ sr_correction = 0.75;
+ else if (sr_correction > 1.25)
+ sr_correction = 1.25;
+
+ // Calculate a corrective factor based on a rolling ratio of bits spent
+ // vs target bits
+ if ((cpi->rolling_target_bits > 0) &&
+ (cpi->active_worst_quality < cpi->worst_quality)) {
+ double rolling_ratio;
+
+ rolling_ratio = (double)cpi->rolling_actual_bits /
+ (double)cpi->rolling_target_bits;
+
+ if (rolling_ratio < 0.95)
+ cpi->twopass.est_max_qcorrection_factor -= 0.005;
+ else if (rolling_ratio > 1.05)
+ cpi->twopass.est_max_qcorrection_factor += 0.005;
+
+ cpi->twopass.est_max_qcorrection_factor =
+ (cpi->twopass.est_max_qcorrection_factor < 0.1)
+ ? 0.1
+ : (cpi->twopass.est_max_qcorrection_factor > 10.0)
+ ? 10.0 : cpi->twopass.est_max_qcorrection_factor;
+ }
+
+ // Corrections for higher compression speed settings
+ // (reduced compression expected)
+ if (cpi->compressor_speed == 1) {
+ if (cpi->oxcf.cpu_used <= 5)
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ else
+ speed_correction = 1.25;
+ }
+
+ // Estimate of overhead bits per mb
+ // Correction to overhead bits for min allowed Q.
+ // PGW TODO.. This code is broken for the extended Q range
+ // for now overhead set to 0.
+ overhead_bits_per_mb = overhead_bits / num_mbs;
+ overhead_bits_per_mb *= pow(0.98, (double)cpi->twopass.maxq_min_limit);
+
+ // Try and pick a max Q that will be high enough to encode the
+ // content at the given rate.
+ for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; Q++) {
+ int bits_per_mb_at_this_q;
+
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, ERR_DIVISOR, 0.4, 0.90, Q) *
+ sr_correction * speed_correction *
+ cpi->twopass.est_max_qcorrection_factor;
+
+ if (err_correction_factor < 0.05)
+ err_correction_factor = 0.05;
+ else if (err_correction_factor > 5.0)
+ err_correction_factor = 5.0;
+
+ bits_per_mb_at_this_q =
+ vp8_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
+
+ bits_per_mb_at_this_q = (int)(.5 + err_correction_factor *
+ (double)bits_per_mb_at_this_q);
+
+ // Mode and motion overhead
+ // As Q rises in real encode loop rd code will force overhead down
+ // We make a crude adjustment for this here as *.98 per Q step.
// PGW TODO.. This code is broken for the extended Q range
// for now overhead set to 0.
- overhead_bits_per_mb = overhead_bits / num_mbs;
- overhead_bits_per_mb *= pow( 0.98, (double)cpi->twopass.maxq_min_limit );
-
- // Try and pick a max Q that will be high enough to encode the
- // content at the given rate.
- for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; Q++)
- {
- int bits_per_mb_at_this_q;
-
- err_correction_factor =
- calc_correction_factor(err_per_mb, ERR_DIVISOR, 0.4, 0.90, Q) *
- sr_correction * speed_correction *
- cpi->twopass.est_max_qcorrection_factor;
-
- if ( err_correction_factor < 0.05 )
- err_correction_factor = 0.05;
- else if ( err_correction_factor > 5.0 )
- err_correction_factor = 5.0;
-
- bits_per_mb_at_this_q =
- vp8_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
-
- bits_per_mb_at_this_q = (int)(.5 + err_correction_factor *
- (double)bits_per_mb_at_this_q);
-
- // Mode and motion overhead
- // As Q rises in real encode loop rd code will force overhead down
- // We make a crude adjustment for this here as *.98 per Q step.
- // PGW TODO.. This code is broken for the extended Q range
- // for now overhead set to 0.
- //overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
-
- if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
- break;
- }
-
- // Restriction on active max q for constrained quality mode.
- if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
- (Q < cpi->cq_target_quality) )
- {
- Q = cpi->cq_target_quality;
- }
-
- // Adjust maxq_min_limit and maxq_max_limit limits based on
- // averaga q observed in clip for non kf/gf/arf frames
- // Give average a chance to settle though.
- // PGW TODO.. This code is broken for the extended Q range
- if ( (cpi->ni_frames >
- ((unsigned int)cpi->twopass.total_stats->count >> 8)) &&
- (cpi->ni_frames > 150) )
- {
- adjust_maxq_qrange( cpi );
- }
-
- return Q;
+ // overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
+ break;
+ }
+
+ // Restriction on active max q for constrained quality mode.
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (Q < cpi->cq_target_quality)) {
+ Q = cpi->cq_target_quality;
+ }
+
+ // Adjust maxq_min_limit and maxq_max_limit limits based on
+ // averaga q observed in clip for non kf/gf/arf frames
+ // Give average a chance to settle though.
+ // PGW TODO.. This code is broken for the extended Q range
+ if ((cpi->ni_frames >
+ ((unsigned int)cpi->twopass.total_stats->count >> 8)) &&
+ (cpi->ni_frames > 150)) {
+ adjust_maxq_qrange(cpi);
+ }
+
+ return Q;
}
// For cq mode estimate a cq level that matches the observed
// complexity and data rate.
-static int estimate_cq( VP8_COMP *cpi,
- FIRSTPASS_STATS * fpstats,
- int section_target_bandwitdh,
- int overhead_bits )
-{
- int Q;
- int num_mbs = cpi->common.MBs;
- int target_norm_bits_per_mb;
-
- double section_err = (fpstats->coded_error / fpstats->count);
- double err_per_mb = section_err / num_mbs;
- double err_correction_factor;
- double sr_err_diff;
- double sr_correction;
- double speed_correction = 1.0;
- double clip_iiratio;
- double clip_iifactor;
- int overhead_bits_per_mb;
-
-
- target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
- ? (512 * section_target_bandwitdh) / num_mbs
- : 512 * (section_target_bandwitdh / num_mbs);
-
- // Estimate of overhead bits per mb
- overhead_bits_per_mb = overhead_bits / num_mbs;
-
- // Corrections for higher compression speed settings
- // (reduced compression expected)
- if (cpi->compressor_speed == 1)
- {
- if (cpi->oxcf.cpu_used <= 5)
- speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
- else
- speed_correction = 1.25;
- }
+static int estimate_cq(VP8_COMP *cpi,
+ FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh,
+ int overhead_bits) {
+ int Q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = (fpstats->coded_error / fpstats->count);
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double sr_err_diff;
+ double sr_correction;
+ double speed_correction = 1.0;
+ double clip_iiratio;
+ double clip_iifactor;
+ int overhead_bits_per_mb;
+
+
+ target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ // Estimate of overhead bits per mb
+ overhead_bits_per_mb = overhead_bits / num_mbs;
+
+ // Corrections for higher compression speed settings
+ // (reduced compression expected)
+ if (cpi->compressor_speed == 1) {
+ if (cpi->oxcf.cpu_used <= 5)
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ else
+ speed_correction = 1.25;
+ }
+
+ // Look at the drop in prediction quality between the last frame
+ // and the GF buffer (which contained an older frame).
+ sr_err_diff =
+ (fpstats->sr_coded_error - fpstats->coded_error) /
+ (fpstats->count * cpi->common.MBs);
+ sr_correction = (sr_err_diff / 32.0);
+ sr_correction = pow(sr_correction, 0.25);
+ if (sr_correction < 0.75)
+ sr_correction = 0.75;
+ else if (sr_correction > 1.25)
+ sr_correction = 1.25;
+
+ // II ratio correction factor for clip as a whole
+ clip_iiratio = cpi->twopass.total_stats->intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats->coded_error);
+ clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
+ if (clip_iifactor < 0.80)
+ clip_iifactor = 0.80;
+
+ // Try and pick a Q that can encode the content at the given rate.
+ for (Q = 0; Q < MAXQ; Q++) {
+ int bits_per_mb_at_this_q;
+
+ // Error per MB based correction factor
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 100.0, 0.4, 0.90, Q) *
+ sr_correction * speed_correction * clip_iifactor;
+
+ if (err_correction_factor < 0.05)
+ err_correction_factor = 0.05;
+ else if (err_correction_factor > 5.0)
+ err_correction_factor = 5.0;
+
+ bits_per_mb_at_this_q =
+ vp8_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
+
+ bits_per_mb_at_this_q = (int)(.5 + err_correction_factor *
+ (double)bits_per_mb_at_this_q);
+
+ // Mode and motion overhead
+ // As Q rises in real encode loop rd code will force overhead down
+ // We make a crude adjustment for this here as *.98 per Q step.
+ // PGW TODO.. This code is broken for the extended Q range
+ // for now overhead set to 0.
+ overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
- // Look at the drop in prediction quality between the last frame
- // and the GF buffer (which contained an older frame).
- sr_err_diff =
- (fpstats->sr_coded_error - fpstats->coded_error) /
- (fpstats->count * cpi->common.MBs);
- sr_correction = (sr_err_diff / 32.0);
- sr_correction = pow( sr_correction, 0.25 );
- if ( sr_correction < 0.75 )
- sr_correction = 0.75;
- else if ( sr_correction > 1.25 )
- sr_correction = 1.25;
-
- // II ratio correction factor for clip as a whole
- clip_iiratio = cpi->twopass.total_stats->intra_error /
- DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats->coded_error);
- clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
- if (clip_iifactor < 0.80)
- clip_iifactor = 0.80;
-
- // Try and pick a Q that can encode the content at the given rate.
- for (Q = 0; Q < MAXQ; Q++)
- {
- int bits_per_mb_at_this_q;
-
- // Error per MB based correction factor
- err_correction_factor =
- calc_correction_factor(err_per_mb, 100.0, 0.4, 0.90, Q) *
- sr_correction * speed_correction * clip_iifactor;
-
- if ( err_correction_factor < 0.05 )
- err_correction_factor = 0.05;
- else if ( err_correction_factor > 5.0 )
- err_correction_factor = 5.0;
-
- bits_per_mb_at_this_q =
- vp8_bits_per_mb(INTER_FRAME, Q) + overhead_bits_per_mb;
-
- bits_per_mb_at_this_q = (int)(.5 + err_correction_factor *
- (double)bits_per_mb_at_this_q);
-
- // Mode and motion overhead
- // As Q rises in real encode loop rd code will force overhead down
- // We make a crude adjustment for this here as *.98 per Q step.
- // PGW TODO.. This code is broken for the extended Q range
- // for now overhead set to 0.
- overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98);
-
- if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
- break;
- }
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
+ break;
+ }
- // Clip value to range "best allowed to (worst allowed - 1)"
- Q = select_cq_level( Q );
- if ( Q >= cpi->worst_quality )
- Q = cpi->worst_quality - 1;
- if ( Q < cpi->best_quality )
- Q = cpi->best_quality;
+ // Clip value to range "best allowed to (worst allowed - 1)"
+ Q = select_cq_level(Q);
+ if (Q >= cpi->worst_quality)
+ Q = cpi->worst_quality - 1;
+ if (Q < cpi->best_quality)
+ Q = cpi->best_quality;
- return Q;
+ return Q;
}
extern void vp8_new_frame_rate(VP8_COMP *cpi, double framerate);
-void vp8_init_second_pass(VP8_COMP *cpi)
-{
- FIRSTPASS_STATS this_frame;
- FIRSTPASS_STATS *start_pos;
+void vp8_init_second_pass(VP8_COMP *cpi) {
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS *start_pos;
- double lower_bounds_min_rate = FRAME_OVERHEAD_BITS*cpi->oxcf.frame_rate;
- double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
- * cpi->oxcf.two_pass_vbrmin_section / 100);
+ double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.frame_rate;
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
+ * cpi->oxcf.two_pass_vbrmin_section / 100);
- if (two_pass_min_rate < lower_bounds_min_rate)
- two_pass_min_rate = lower_bounds_min_rate;
+ if (two_pass_min_rate < lower_bounds_min_rate)
+ two_pass_min_rate = lower_bounds_min_rate;
- zero_stats(cpi->twopass.total_stats);
- zero_stats(cpi->twopass.total_left_stats);
+ zero_stats(cpi->twopass.total_stats);
+ zero_stats(cpi->twopass.total_left_stats);
- if (!cpi->twopass.stats_in_end)
- return;
+ if (!cpi->twopass.stats_in_end)
+ return;
- *cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
- *cpi->twopass.total_left_stats = *cpi->twopass.total_stats;
+ *cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
+ *cpi->twopass.total_left_stats = *cpi->twopass.total_stats;
- // each frame can have a different duration, as the frame rate in the source
- // isn't guaranteed to be constant. The frame rate prior to the first frame
- // encoded in the second pass is a guess. However the sum duration is not.
- // Its calculated based on the actual durations of all frames from the first
- // pass.
- vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
+ // each frame can have a different duration, as the frame rate in the source
+ // isn't guaranteed to be constant. The frame rate prior to the first frame
+ // encoded in the second pass is a guess. However the sum duration is not.
+ // Its calculated based on the actual durations of all frames from the first
+ // pass.
+ vp8_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats->count / cpi->twopass.total_stats->duration);
- cpi->output_frame_rate = cpi->oxcf.frame_rate;
- cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0) ;
- cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
-
- // Calculate a minimum intra value to be used in determining the IIratio
- // scores used in the second pass. We have this minimum to make sure
- // that clips that are static but "low complexity" in the intra domain
- // are still boosted appropriately for KF/GF/ARF
- cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
- cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
-
- // This variable monitors how far behind the second ref update is lagging
- cpi->twopass.sr_update_lag = 1;
+ cpi->output_frame_rate = cpi->oxcf.frame_rate;
+ cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats->duration * cpi->oxcf.target_bandwidth / 10000000.0);
+ cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats->duration * two_pass_min_rate / 10000000.0);
- // Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
- {
- double sum_iiratio = 0.0;
- double IIRatio;
+ // Calculate a minimum intra value to be used in determining the IIratio
+ // scores used in the second pass. We have this minimum to make sure
+ // that clips that are static but "low complexity" in the intra domain
+ // are still boosted appropriately for KF/GF/ARF
+ cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
+ cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
- start_pos = cpi->twopass.stats_in; // Note starting "file" position
+ // This variable monitors how far behind the second ref update is lagging
+ cpi->twopass.sr_update_lag = 1;
- while (input_stats(cpi, &this_frame) != EOF)
- {
- IIRatio = this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
- IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio;
- sum_iiratio += IIRatio;
- }
+ // Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
+ {
+ double sum_iiratio = 0.0;
+ double IIRatio;
- cpi->twopass.avg_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats->count);
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
- // Reset file position
- reset_fpf_position(cpi, start_pos);
+ while (input_stats(cpi, &this_frame) != EOF) {
+ IIRatio = this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
+ IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio;
+ sum_iiratio += IIRatio;
}
- // Scan the first pass file and calculate a modified total error based upon the bias/power function
- // used to allocate bits
- {
- start_pos = cpi->twopass.stats_in; // Note starting "file" position
+ cpi->twopass.avg_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats->count);
- cpi->twopass.modified_error_total = 0.0;
- cpi->twopass.modified_error_used = 0.0;
+ // Reset file position
+ reset_fpf_position(cpi, start_pos);
+ }
- while (input_stats(cpi, &this_frame) != EOF)
- {
- cpi->twopass.modified_error_total += calculate_modified_err(cpi, &this_frame);
- }
- cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
+ // Scan the first pass file and calculate a modified total error based upon the bias/power function
+ // used to allocate bits
+ {
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
- reset_fpf_position(cpi, start_pos); // Reset file position
+ cpi->twopass.modified_error_total = 0.0;
+ cpi->twopass.modified_error_used = 0.0;
+ while (input_stats(cpi, &this_frame) != EOF) {
+ cpi->twopass.modified_error_total += calculate_modified_err(cpi, &this_frame);
}
+ cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
+
+ reset_fpf_position(cpi, start_pos); // Reset file position
+
+ }
}
-void vp8_end_second_pass(VP8_COMP *cpi)
-{
+void vp8_end_second_pass(VP8_COMP *cpi) {
}
// This function gives and estimate of how badly we believe
// the prediction quality is decaying from frame to frame.
-static double get_prediction_decay_rate( VP8_COMP *cpi,
- FIRSTPASS_STATS *next_frame)
-{
- double prediction_decay_rate;
- double second_ref_decay;
- double mb_sr_err_diff;
-
- // Initial basis is the % mbs inter coded
- prediction_decay_rate = next_frame->pcnt_inter;
-
- // Look at the observed drop in prediction quality between the last frame
- // and the GF buffer (which contains an older frame).
- mb_sr_err_diff =
- (next_frame->sr_coded_error - next_frame->coded_error) /
- (cpi->common.MBs);
- second_ref_decay = 1.0 - (mb_sr_err_diff / 512.0);
- second_ref_decay = pow( second_ref_decay, 0.5 );
- if ( second_ref_decay < 0.85 )
- second_ref_decay = 0.85;
- else if ( second_ref_decay > 1.0 )
- second_ref_decay = 1.0;
-
- if ( second_ref_decay < prediction_decay_rate )
- prediction_decay_rate = second_ref_decay;
-
- return prediction_decay_rate;
+static double get_prediction_decay_rate(VP8_COMP *cpi,
+ FIRSTPASS_STATS *next_frame) {
+ double prediction_decay_rate;
+ double second_ref_decay;
+ double mb_sr_err_diff;
+
+ // Initial basis is the % mbs inter coded
+ prediction_decay_rate = next_frame->pcnt_inter;
+
+ // Look at the observed drop in prediction quality between the last frame
+ // and the GF buffer (which contains an older frame).
+ mb_sr_err_diff =
+ (next_frame->sr_coded_error - next_frame->coded_error) /
+ (cpi->common.MBs);
+ second_ref_decay = 1.0 - (mb_sr_err_diff / 512.0);
+ second_ref_decay = pow(second_ref_decay, 0.5);
+ if (second_ref_decay < 0.85)
+ second_ref_decay = 0.85;
+ else if (second_ref_decay > 1.0)
+ second_ref_decay = 1.0;
+
+ if (second_ref_decay < prediction_decay_rate)
+ prediction_decay_rate = second_ref_decay;
+
+ return prediction_decay_rate;
}
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
static int detect_transition_to_still(
- VP8_COMP *cpi,
- int frame_interval,
- int still_interval,
- double loop_decay_rate,
- double last_decay_rate )
-{
- BOOL trans_to_still = FALSE;
-
- // Break clause to detect very still sections after motion
- // For example a static image after a fade or other transition
- // instead of a clean scene cut.
- if ( (frame_interval > MIN_GF_INTERVAL) &&
- (loop_decay_rate >= 0.999) &&
- (last_decay_rate < 0.9) )
- {
- int j;
- FIRSTPASS_STATS * position = cpi->twopass.stats_in;
- FIRSTPASS_STATS tmp_next_frame;
- double zz_inter;
-
- // Look ahead a few frames to see if static condition
- // persists...
- for ( j = 0; j < still_interval; j++ )
- {
- if (EOF == input_stats(cpi, &tmp_next_frame))
- break;
-
- zz_inter =
- (tmp_next_frame.pcnt_inter - tmp_next_frame.pcnt_motion);
- if ( zz_inter < 0.999 )
- break;
- }
- // Reset file position
- reset_fpf_position(cpi, position);
+ VP8_COMP *cpi,
+ int frame_interval,
+ int still_interval,
+ double loop_decay_rate,
+ double last_decay_rate) {
+ BOOL trans_to_still = FALSE;
+
+ // Break clause to detect very still sections after motion
+ // For example a static image after a fade or other transition
+ // instead of a clean scene cut.
+ if ((frame_interval > MIN_GF_INTERVAL) &&
+ (loop_decay_rate >= 0.999) &&
+ (last_decay_rate < 0.9)) {
+ int j;
+ FIRSTPASS_STATS *position = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_next_frame;
+ double zz_inter;
+
+ // Look ahead a few frames to see if static condition
+ // persists...
+ for (j = 0; j < still_interval; j++) {
+ if (EOF == input_stats(cpi, &tmp_next_frame))
+ break;
- // Only if it does do we signal a transition to still
- if ( j == still_interval )
- trans_to_still = TRUE;
+ zz_inter =
+ (tmp_next_frame.pcnt_inter - tmp_next_frame.pcnt_motion);
+ if (zz_inter < 0.999)
+ break;
}
+ // Reset file position
+ reset_fpf_position(cpi, position);
- return trans_to_still;
+ // Only if it does do we signal a transition to still
+ if (j == still_interval)
+ trans_to_still = TRUE;
+ }
+
+ return trans_to_still;
}
// This function detects a flash through the high relative pcnt_second_ref
// score in the frame following a flash frame. The offset passed in should
// reflect this
-static BOOL detect_flash( VP8_COMP *cpi, int offset )
-{
- FIRSTPASS_STATS next_frame;
-
- BOOL flash_detected = FALSE;
-
- // Read the frame data.
- // The return is FALSE (no flash detected) if not a valid frame
- if ( read_frame_stats(cpi, &next_frame, offset) != EOF )
- {
- // What we are looking for here is a situation where there is a
- // brief break in prediction (such as a flash) but subsequent frames
- // are reasonably well predicted by an earlier (pre flash) frame.
- // The recovery after a flash is indicated by a high pcnt_second_ref
- // comapred to pcnt_inter.
- if ( (next_frame.pcnt_second_ref > next_frame.pcnt_inter) &&
- (next_frame.pcnt_second_ref >= 0.5 ) )
- {
- flash_detected = TRUE;
- }
+static BOOL detect_flash(VP8_COMP *cpi, int offset) {
+ FIRSTPASS_STATS next_frame;
+
+ BOOL flash_detected = FALSE;
+
+ // Read the frame data.
+ // The return is FALSE (no flash detected) if not a valid frame
+ if (read_frame_stats(cpi, &next_frame, offset) != EOF) {
+ // What we are looking for here is a situation where there is a
+ // brief break in prediction (such as a flash) but subsequent frames
+ // are reasonably well predicted by an earlier (pre flash) frame.
+ // The recovery after a flash is indicated by a high pcnt_second_ref
+ // comapred to pcnt_inter.
+ if ((next_frame.pcnt_second_ref > next_frame.pcnt_inter) &&
+ (next_frame.pcnt_second_ref >= 0.5)) {
+ flash_detected = TRUE;
}
+ }
- return flash_detected;
+ return flash_detected;
}
// Update the motion related elements to the GF arf boost calculation
static void accumulate_frame_motion_stats(
- VP8_COMP *cpi,
- FIRSTPASS_STATS * this_frame,
- double * this_frame_mv_in_out,
- double * mv_in_out_accumulator,
- double * abs_mv_in_out_accumulator,
- double * mv_ratio_accumulator )
-{
- //double this_frame_mv_in_out;
- double this_frame_mvr_ratio;
- double this_frame_mvc_ratio;
- double motion_pct;
-
- // Accumulate motion stats.
- motion_pct = this_frame->pcnt_motion;
-
- // Accumulate Motion In/Out of frame stats
- *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct;
- *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct;
- *abs_mv_in_out_accumulator +=
- fabs(this_frame->mv_in_out_count * motion_pct);
-
- // Accumulate a measure of how uniform (or conversely how random)
- // the motion field is. (A ratio of absmv / mv)
- if (motion_pct > 0.05)
- {
- this_frame_mvr_ratio = fabs(this_frame->mvr_abs) /
- DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr));
-
- this_frame_mvc_ratio = fabs(this_frame->mvc_abs) /
- DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc));
-
- *mv_ratio_accumulator +=
- (this_frame_mvr_ratio < this_frame->mvr_abs)
- ? (this_frame_mvr_ratio * motion_pct)
- : this_frame->mvr_abs * motion_pct;
-
- *mv_ratio_accumulator +=
- (this_frame_mvc_ratio < this_frame->mvc_abs)
- ? (this_frame_mvc_ratio * motion_pct)
- : this_frame->mvc_abs * motion_pct;
-
- }
+ VP8_COMP *cpi,
+ FIRSTPASS_STATS *this_frame,
+ double *this_frame_mv_in_out,
+ double *mv_in_out_accumulator,
+ double *abs_mv_in_out_accumulator,
+ double *mv_ratio_accumulator) {
+ // double this_frame_mv_in_out;
+ double this_frame_mvr_ratio;
+ double this_frame_mvc_ratio;
+ double motion_pct;
+
+ // Accumulate motion stats.
+ motion_pct = this_frame->pcnt_motion;
+
+ // Accumulate Motion In/Out of frame stats
+ *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct;
+ *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct;
+ *abs_mv_in_out_accumulator +=
+ fabs(this_frame->mv_in_out_count * motion_pct);
+
+ // Accumulate a measure of how uniform (or conversely how random)
+ // the motion field is. (A ratio of absmv / mv)
+ if (motion_pct > 0.05) {
+ this_frame_mvr_ratio = fabs(this_frame->mvr_abs) /
+ DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr));
+
+ this_frame_mvc_ratio = fabs(this_frame->mvc_abs) /
+ DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc));
+
+ *mv_ratio_accumulator +=
+ (this_frame_mvr_ratio < this_frame->mvr_abs)
+ ? (this_frame_mvr_ratio * motion_pct)
+ : this_frame->mvr_abs * motion_pct;
+
+ *mv_ratio_accumulator +=
+ (this_frame_mvc_ratio < this_frame->mvc_abs)
+ ? (this_frame_mvc_ratio * motion_pct)
+ : this_frame->mvc_abs * motion_pct;
+
+ }
}
// Calculate a baseline boost number for the current frame.
static double calc_frame_boost(
- VP8_COMP *cpi,
- FIRSTPASS_STATS * this_frame,
- double this_frame_mv_in_out )
-{
- double frame_boost;
-
- // Underlying boost factor is based on inter intra error ratio
- if (this_frame->intra_error > cpi->twopass.gf_intra_err_min)
- frame_boost = (IIFACTOR * this_frame->intra_error /
- DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
- else
- frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
- DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
-
- // Increase boost for frames where new data coming into frame
- // (eg zoom out). Slightly reduce boost if there is a net balance
- // of motion out of the frame (zoom in).
- // The range for this_frame_mv_in_out is -1.0 to +1.0
- if (this_frame_mv_in_out > 0.0)
- frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
- // In extreme case boost is halved
- else
- frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
-
- // Clip to maximum
- if (frame_boost > GF_RMAX)
- frame_boost = GF_RMAX;
-
- return frame_boost;
+ VP8_COMP *cpi,
+ FIRSTPASS_STATS *this_frame,
+ double this_frame_mv_in_out) {
+ double frame_boost;
+
+ // Underlying boost factor is based on inter intra error ratio
+ if (this_frame->intra_error > cpi->twopass.gf_intra_err_min)
+ frame_boost = (IIFACTOR * this_frame->intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+ else
+ frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+
+ // Increase boost for frames where new data coming into frame
+ // (eg zoom out). Slightly reduce boost if there is a net balance
+ // of motion out of the frame (zoom in).
+ // The range for this_frame_mv_in_out is -1.0 to +1.0
+ if (this_frame_mv_in_out > 0.0)
+ frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
+ // In extreme case boost is halved
+ else
+ frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
+
+ // Clip to maximum
+ if (frame_boost > GF_RMAX)
+ frame_boost = GF_RMAX;
+
+ return frame_boost;
}
static int calc_arf_boost(
- VP8_COMP *cpi,
- int offset,
- int f_frames,
- int b_frames,
- int *f_boost,
- int *b_boost )
-{
- FIRSTPASS_STATS this_frame;
-
- int i;
- double boost_score = 0.0;
- double mv_ratio_accumulator = 0.0;
- double decay_accumulator = 1.0;
- double this_frame_mv_in_out = 0.0;
- double mv_in_out_accumulator = 0.0;
- double abs_mv_in_out_accumulator = 0.0;
- int arf_boost;
- BOOL flash_detected = FALSE;
-
- // Search forward from the proposed arf/next gf position
- for ( i = 0; i < f_frames; i++ )
- {
- if ( read_frame_stats(cpi, &this_frame, (i+offset)) == EOF )
- break;
-
- // Update the motion related elements to the boost calculation
- accumulate_frame_motion_stats( cpi, &this_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator, &mv_ratio_accumulator );
-
- // We want to discount the the flash frame itself and the recovery
- // frame that follows as both will have poor scores.
- flash_detected = detect_flash(cpi, (i+offset)) ||
- detect_flash(cpi, (i+offset+1));
-
- // Cumulative effect of prediction quality decay
- if ( !flash_detected )
- {
- decay_accumulator =
- decay_accumulator *
- get_prediction_decay_rate(cpi, &this_frame);
- decay_accumulator =
- decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
- }
-
- boost_score += (decay_accumulator *
- calc_frame_boost( cpi, &this_frame, this_frame_mv_in_out ));
+ VP8_COMP *cpi,
+ int offset,
+ int f_frames,
+ int b_frames,
+ int *f_boost,
+ int *b_boost) {
+ FIRSTPASS_STATS this_frame;
+
+ int i;
+ double boost_score = 0.0;
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+ int arf_boost;
+ BOOL flash_detected = FALSE;
+
+ // Search forward from the proposed arf/next gf position
+ for (i = 0; i < f_frames; i++) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF)
+ break;
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(cpi, &this_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // We want to discount the the flash frame itself and the recovery
+ // frame that follows as both will have poor scores.
+ flash_detected = detect_flash(cpi, (i + offset)) ||
+ detect_flash(cpi, (i + offset + 1));
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ decay_accumulator =
+ decay_accumulator *
+ get_prediction_decay_rate(cpi, &this_frame);
+ decay_accumulator =
+ decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
}
- *f_boost = boost_score;
-
- // Reset for backward looking loop
- boost_score = 0.0;
- mv_ratio_accumulator = 0.0;
- decay_accumulator = 1.0;
- this_frame_mv_in_out = 0.0;
- mv_in_out_accumulator = 0.0;
- abs_mv_in_out_accumulator = 0.0;
-
- // Search backward towards last gf position
- for ( i = -1; i >= -b_frames; i-- )
- {
- if ( read_frame_stats(cpi, &this_frame, (i+offset)) == EOF )
- break;
-
- // Update the motion related elements to the boost calculation
- accumulate_frame_motion_stats( cpi, &this_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator, &mv_ratio_accumulator );
-
- // We want to discount the the flash frame itself and the recovery
- // frame that follows as both will have poor scores.
- flash_detected = detect_flash(cpi, (i+offset)) ||
- detect_flash(cpi, (i+offset+1));
-
- // Cumulative effect of prediction quality decay
- if ( !flash_detected )
- {
- decay_accumulator =
- decay_accumulator *
- get_prediction_decay_rate(cpi, &this_frame);
- decay_accumulator =
- decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
- }
+ boost_score += (decay_accumulator *
+ calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out));
+ }
+
+ *f_boost = boost_score;
+
+ // Reset for backward looking loop
+ boost_score = 0.0;
+ mv_ratio_accumulator = 0.0;
+ decay_accumulator = 1.0;
+ this_frame_mv_in_out = 0.0;
+ mv_in_out_accumulator = 0.0;
+ abs_mv_in_out_accumulator = 0.0;
+
+ // Search backward towards last gf position
+ for (i = -1; i >= -b_frames; i--) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF)
+ break;
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(cpi, &this_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // We want to discount the the flash frame itself and the recovery
+ // frame that follows as both will have poor scores.
+ flash_detected = detect_flash(cpi, (i + offset)) ||
+ detect_flash(cpi, (i + offset + 1));
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ decay_accumulator =
+ decay_accumulator *
+ get_prediction_decay_rate(cpi, &this_frame);
+ decay_accumulator =
+ decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
+ }
- boost_score += (decay_accumulator *
- calc_frame_boost( cpi, &this_frame, this_frame_mv_in_out ));
+ boost_score += (decay_accumulator *
+ calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out));
- }
- *b_boost = boost_score;
+ }
+ *b_boost = boost_score;
- arf_boost = (*f_boost + *b_boost);
- if ( arf_boost < ((b_frames + f_frames) * 20) )
- arf_boost = ((b_frames + f_frames) * 20);
+ arf_boost = (*f_boost + *b_boost);
+ if (arf_boost < ((b_frames + f_frames) * 20))
+ arf_boost = ((b_frames + f_frames) * 20);
- return arf_boost;
+ return arf_boost;
}
-static void configure_arnr_filter( VP8_COMP *cpi, FIRSTPASS_STATS *this_frame )
-{
- int half_gf_int;
- int frames_after_arf;
- int frames_bwd = cpi->oxcf.arnr_max_frames - 1;
- int frames_fwd = cpi->oxcf.arnr_max_frames - 1;
-
- // Define the arnr filter width for this group of frames:
- // We only filter frames that lie within a distance of half
- // the GF interval from the ARF frame. We also have to trap
- // cases where the filter extends beyond the end of clip.
- // Note: this_frame->frame has been updated in the loop
- // so it now points at the ARF frame.
- half_gf_int = cpi->baseline_gf_interval >> 1;
- frames_after_arf = cpi->twopass.total_stats->count -
- this_frame->frame - 1;
-
- switch (cpi->oxcf.arnr_type)
- {
+static void configure_arnr_filter(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int half_gf_int;
+ int frames_after_arf;
+ int frames_bwd = cpi->oxcf.arnr_max_frames - 1;
+ int frames_fwd = cpi->oxcf.arnr_max_frames - 1;
+
+ // Define the arnr filter width for this group of frames:
+ // We only filter frames that lie within a distance of half
+ // the GF interval from the ARF frame. We also have to trap
+ // cases where the filter extends beyond the end of clip.
+ // Note: this_frame->frame has been updated in the loop
+ // so it now points at the ARF frame.
+ half_gf_int = cpi->baseline_gf_interval >> 1;
+ frames_after_arf = cpi->twopass.total_stats->count -
+ this_frame->frame - 1;
+
+ switch (cpi->oxcf.arnr_type) {
case 1: // Backward filter
- frames_fwd = 0;
- if (frames_bwd > half_gf_int)
- frames_bwd = half_gf_int;
- break;
+ frames_fwd = 0;
+ if (frames_bwd > half_gf_int)
+ frames_bwd = half_gf_int;
+ break;
case 2: // Forward filter
- if (frames_fwd > half_gf_int)
- frames_fwd = half_gf_int;
- if (frames_fwd > frames_after_arf)
- frames_fwd = frames_after_arf;
- frames_bwd = 0;
- break;
+ if (frames_fwd > half_gf_int)
+ frames_fwd = half_gf_int;
+ if (frames_fwd > frames_after_arf)
+ frames_fwd = frames_after_arf;
+ frames_bwd = 0;
+ break;
case 3: // Centered filter
default:
- frames_fwd >>= 1;
- if (frames_fwd > frames_after_arf)
- frames_fwd = frames_after_arf;
- if (frames_fwd > half_gf_int)
- frames_fwd = half_gf_int;
-
- frames_bwd = frames_fwd;
-
- // For even length filter there is one more frame backward
- // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
- if (frames_bwd < half_gf_int)
- frames_bwd += (cpi->oxcf.arnr_max_frames+1) & 0x1;
- break;
- }
-
- cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd;
+ frames_fwd >>= 1;
+ if (frames_fwd > frames_after_arf)
+ frames_fwd = frames_after_arf;
+ if (frames_fwd > half_gf_int)
+ frames_fwd = half_gf_int;
+
+ frames_bwd = frames_fwd;
+
+ // For even length filter there is one more frame backward
+ // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+ if (frames_bwd < half_gf_int)
+ frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1;
+ break;
+ }
+
+ cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd;
}
// Analyse and define a gf/arf group .
-static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
-{
- FIRSTPASS_STATS next_frame;
- FIRSTPASS_STATS *start_pos;
- int i;
- double boost_score = 0.0;
- double old_boost_score = 0.0;
- double gf_group_err = 0.0;
- double gf_first_frame_err = 0.0;
- double mod_frame_err = 0.0;
+static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_pos;
+ int i;
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double gf_group_err = 0.0;
+ double gf_first_frame_err = 0.0;
+ double mod_frame_err = 0.0;
- double mv_ratio_accumulator = 0.0;
- double decay_accumulator = 1.0;
- double zero_motion_accumulator = 1.0;
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double zero_motion_accumulator = 1.0;
- double loop_decay_rate = 1.00; // Starting decay rate
- double last_loop_decay_rate = 1.00;
+ double loop_decay_rate = 1.00; // Starting decay rate
+ double last_loop_decay_rate = 1.00;
- double this_frame_mv_in_out = 0.0;
- double mv_in_out_accumulator = 0.0;
- double abs_mv_in_out_accumulator = 0.0;
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
- int max_bits = frame_max_bits(cpi); // Max for a single frame
+ int max_bits = frame_max_bits(cpi); // Max for a single frame
- unsigned int allow_alt_ref =
- cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
+ unsigned int allow_alt_ref =
+ cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
- int f_boost = 0;
- int b_boost = 0;
- BOOL flash_detected;
+ int f_boost = 0;
+ int b_boost = 0;
+ BOOL flash_detected;
- cpi->twopass.gf_group_bits = 0;
+ cpi->twopass.gf_group_bits = 0;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state(); // __asm emms;
- start_pos = cpi->twopass.stats_in;
-
- vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+ start_pos = cpi->twopass.stats_in;
- // Load stats for the current frame.
- mod_frame_err = calculate_modified_err(cpi, this_frame);
+ vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
- // Note the error of the frame at the start of the group (this will be
- // the GF frame error if we code a normal gf
- gf_first_frame_err = mod_frame_err;
+ // Load stats for the current frame.
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
- // Special treatment if the current frame is a key frame (which is also
- // a gf). If it is then its error score (and hence bit allocation) need
- // to be subtracted out from the calculation for the GF group
- if (cpi->common.frame_type == KEY_FRAME)
- gf_group_err -= gf_first_frame_err;
+ // Note the error of the frame at the start of the group (this will be
+ // the GF frame error if we code a normal gf
+ gf_first_frame_err = mod_frame_err;
- // Scan forward to try and work out how many frames the next gf group
- // should contain and what level of boost is appropriate for the GF
- // or ARF that will be coded with the group
- i = 0;
+ // Special treatment if the current frame is a key frame (which is also
+ // a gf). If it is then its error score (and hence bit allocation) need
+ // to be subtracted out from the calculation for the GF group
+ if (cpi->common.frame_type == KEY_FRAME)
+ gf_group_err -= gf_first_frame_err;
- while (((i < cpi->twopass.static_scene_max_gf_interval) ||
- ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
- (i < cpi->twopass.frames_to_key))
- {
- i++; // Increment the loop counter
+ // Scan forward to try and work out how many frames the next gf group
+ // should contain and what level of boost is appropriate for the GF
+ // or ARF that will be coded with the group
+ i = 0;
- // Accumulate error score of frames in this gf group
- mod_frame_err = calculate_modified_err(cpi, this_frame);
- gf_group_err += mod_frame_err;
-
- if (EOF == input_stats(cpi, &next_frame))
- break;
-
- // Test for the case where there is a brief flash but the prediction
- // quality back to an earlier frame is then restored.
- flash_detected = detect_flash(cpi, 0);
-
- // Update the motion related elements to the boost calculation
- accumulate_frame_motion_stats( cpi, &next_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator, &mv_ratio_accumulator );
-
- // Cumulative effect of prediction quality decay
- if ( !flash_detected )
- {
- last_loop_decay_rate = loop_decay_rate;
- loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
- decay_accumulator = decay_accumulator * loop_decay_rate;
-
- // Monitor for static sections.
- if ( (next_frame.pcnt_inter - next_frame.pcnt_motion) <
- zero_motion_accumulator )
- {
- zero_motion_accumulator =
- (next_frame.pcnt_inter - next_frame.pcnt_motion);
- }
+ while (((i < cpi->twopass.static_scene_max_gf_interval) ||
+ ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
+ (i < cpi->twopass.frames_to_key)) {
+ i++; // Increment the loop counter
- // Break clause to detect very still sections after motion
- // (for example a staic image after a fade or other transition).
- if ( detect_transition_to_still( cpi, i, 5, loop_decay_rate,
- last_loop_decay_rate ) )
- {
- allow_alt_ref = FALSE;
- break;
- }
- }
+ // Accumulate error score of frames in this gf group
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+
+ if (EOF == input_stats(cpi, &next_frame))
+ break;
+
+ // Test for the case where there is a brief flash but the prediction
+ // quality back to an earlier frame is then restored.
+ flash_detected = detect_flash(cpi, 0);
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(cpi, &next_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ last_loop_decay_rate = loop_decay_rate;
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+
+ // Monitor for static sections.
+ if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
+ zero_motion_accumulator) {
+ zero_motion_accumulator =
+ (next_frame.pcnt_inter - next_frame.pcnt_motion);
+ }
+
+ // Break clause to detect very still sections after motion
+ // (for example a staic image after a fade or other transition).
+ if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
+ last_loop_decay_rate)) {
+ allow_alt_ref = FALSE;
+ break;
+ }
+ }
- // Calculate a boost number for this frame
- boost_score +=
- ( decay_accumulator *
- calc_frame_boost( cpi, &next_frame, this_frame_mv_in_out ) );
-
- // Break out conditions.
- if (
- // Break at cpi->max_gf_interval unless almost totally static
- (i >= cpi->max_gf_interval && (zero_motion_accumulator < 0.995)) ||
- (
- // Dont break out with a very short interval
- (i > MIN_GF_INTERVAL) &&
- // Dont break out very close to a key frame
- ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
- ((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
- (!flash_detected) &&
- ((mv_ratio_accumulator > 100.0) ||
- (abs_mv_in_out_accumulator > 3.0) ||
- (mv_in_out_accumulator < -2.0) ||
- ((boost_score - old_boost_score) < 12.5))
- ) )
- {
- boost_score = old_boost_score;
- break;
- }
+ // Calculate a boost number for this frame
+ boost_score +=
+ (decay_accumulator *
+ calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out));
+
+ // Break out conditions.
+ if (
+ // Break at cpi->max_gf_interval unless almost totally static
+ (i >= cpi->max_gf_interval && (zero_motion_accumulator < 0.995)) ||
+ (
+ // Dont break out with a very short interval
+ (i > MIN_GF_INTERVAL) &&
+ // Dont break out very close to a key frame
+ ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
+ ((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
+ (!flash_detected) &&
+ ((mv_ratio_accumulator > 100.0) ||
+ (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0) ||
+ ((boost_score - old_boost_score) < 12.5))
+ )) {
+ boost_score = old_boost_score;
+ break;
+ }
- vpx_memcpy(this_frame, &next_frame, sizeof(*this_frame));
+ vpx_memcpy(this_frame, &next_frame, sizeof(*this_frame));
- old_boost_score = boost_score;
- }
+ old_boost_score = boost_score;
+ }
- // Dont allow a gf too near the next kf
- if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)
- {
- while (i < cpi->twopass.frames_to_key)
- {
- i++;
+ // Dont allow a gf too near the next kf
+ if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
+ while (i < cpi->twopass.frames_to_key) {
+ i++;
- if (EOF == input_stats(cpi, this_frame))
- break;
+ if (EOF == input_stats(cpi, this_frame))
+ break;
- if (i < cpi->twopass.frames_to_key)
- {
- mod_frame_err = calculate_modified_err(cpi, this_frame);
- gf_group_err += mod_frame_err;
- }
- }
+ if (i < cpi->twopass.frames_to_key) {
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+ }
}
+ }
+
+ // Set the interval till the next gf or arf.
+ cpi->baseline_gf_interval = i;
+
+ // Should we use the alternate refernce frame
+ if (allow_alt_ref &&
+ (i < cpi->oxcf.lag_in_frames) &&
+ (i >= MIN_GF_INTERVAL) &&
+ // dont use ARF very near next kf
+ (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
+ ((next_frame.pcnt_inter > 0.75) ||
+ (next_frame.pcnt_second_ref > 0.5)) &&
+ ((mv_in_out_accumulator / (double)i > -0.2) ||
+ (mv_in_out_accumulator > -2.0)) &&
+ (boost_score > 100)) {
+ // Alterrnative boost calculation for alt ref
+ cpi->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
+ cpi->source_alt_ref_pending = TRUE;
+
+ configure_arnr_filter(cpi, this_frame);
+ } else {
+ cpi->gfu_boost = (int)boost_score;
+ cpi->source_alt_ref_pending = FALSE;
+ }
+
+ // Now decide how many bits should be allocated to the GF group as a
+ // proportion of those remaining in the kf group.
+ // The final key frame group in the clip is treated as a special case
+ // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
+ // This is also important for short clips where there may only be one
+ // key frame.
+ if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats->count -
+ cpi->common.current_video_frame)) {
+ cpi->twopass.kf_group_bits =
+ (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
+ }
+
+ // Calculate the bits to be allocated to the group as a whole
+ if ((cpi->twopass.kf_group_bits > 0) &&
+ (cpi->twopass.kf_group_error_left > 0)) {
+ cpi->twopass.gf_group_bits =
+ (int)((double)cpi->twopass.kf_group_bits *
+ (gf_group_err / (double)cpi->twopass.kf_group_error_left));
+ } else
+ cpi->twopass.gf_group_bits = 0;
- // Set the interval till the next gf or arf.
- cpi->baseline_gf_interval = i;
-
- // Should we use the alternate refernce frame
- if (allow_alt_ref &&
- (i < cpi->oxcf.lag_in_frames ) &&
- (i >= MIN_GF_INTERVAL) &&
- // dont use ARF very near next kf
- (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
- ((next_frame.pcnt_inter > 0.75) ||
- (next_frame.pcnt_second_ref > 0.5)) &&
- ((mv_in_out_accumulator / (double)i > -0.2) ||
- (mv_in_out_accumulator > -2.0)) &&
- (boost_score > 100))
- {
- // Alterrnative boost calculation for alt ref
- cpi->gfu_boost = calc_arf_boost( cpi, 0, (i-1), (i-1), &f_boost, &b_boost );
- cpi->source_alt_ref_pending = TRUE;
-
- configure_arnr_filter( cpi, this_frame );
- }
+ cpi->twopass.gf_group_bits =
+ (cpi->twopass.gf_group_bits < 0)
+ ? 0
+ : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
+ ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
+
+ // Clip cpi->twopass.gf_group_bits based on user supplied data rate
+ // variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ if (cpi->twopass.gf_group_bits > max_bits * cpi->baseline_gf_interval)
+ cpi->twopass.gf_group_bits = max_bits * cpi->baseline_gf_interval;
+
+ // Reset the file position
+ reset_fpf_position(cpi, start_pos);
+
+ // Update the record of error used so far (only done once per gf group)
+ cpi->twopass.modified_error_used += gf_group_err;
+
+ // Assign bits to the arf or gf.
+ for (i = 0; i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME); i++) {
+ int boost;
+ int allocation_chunks;
+ int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
+ int gf_bits;
+
+ boost = (cpi->gfu_boost * vp8_gfboost_qadjust(Q)) / 100;
+
+ // Set max and minimum boost and hence minimum allocation
+ if (boost > ((cpi->baseline_gf_interval + 1) * 200))
+ boost = ((cpi->baseline_gf_interval + 1) * 200);
+ else if (boost < 125)
+ boost = 125;
+
+ if (cpi->source_alt_ref_pending && i == 0)
+ allocation_chunks =
+ ((cpi->baseline_gf_interval + 1) * 100) + boost;
else
- {
- cpi->gfu_boost = (int)boost_score;
- cpi->source_alt_ref_pending = FALSE;
+ allocation_chunks =
+ (cpi->baseline_gf_interval * 100) + (boost - 100);
+
+ // Prevent overflow
+ if (boost > 1028) {
+ int divisor = boost >> 10;
+ boost /= divisor;
+ allocation_chunks /= divisor;
}
- // Now decide how many bits should be allocated to the GF group as a
- // proportion of those remaining in the kf group.
- // The final key frame group in the clip is treated as a special case
- // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
- // This is also important for short clips where there may only be one
- // key frame.
- if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats->count -
- cpi->common.current_video_frame))
- {
- cpi->twopass.kf_group_bits =
- (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
+ // Calculate the number of bits to be spent on the gf or arf based on
+ // the boost number
+ gf_bits = (int)((double)boost *
+ (cpi->twopass.gf_group_bits /
+ (double)allocation_chunks));
+
+ // If the frame that is to be boosted is simpler than the average for
+ // the gf/arf group then use an alternative calculation
+ // based on the error score of the frame itself
+ if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) {
+ double alt_gf_grp_bits;
+ int alt_gf_bits;
+
+ alt_gf_grp_bits =
+ (double)cpi->twopass.kf_group_bits *
+ (mod_frame_err * (double)cpi->baseline_gf_interval) /
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left);
+
+ alt_gf_bits = (int)((double)boost * (alt_gf_grp_bits /
+ (double)allocation_chunks));
+
+ if (gf_bits > alt_gf_bits) {
+ gf_bits = alt_gf_bits;
+ }
}
-
- // Calculate the bits to be allocated to the group as a whole
- if ((cpi->twopass.kf_group_bits > 0) &&
- (cpi->twopass.kf_group_error_left > 0))
- {
- cpi->twopass.gf_group_bits =
- (int)((double)cpi->twopass.kf_group_bits *
- (gf_group_err / (double)cpi->twopass.kf_group_error_left));
+ // Else if it is harder than other frames in the group make sure it at
+ // least receives an allocation in keeping with its relative error
+ // score, otherwise it may be worse off than an "un-boosted" frame
+ else {
+ int alt_gf_bits =
+ (int)((double)cpi->twopass.kf_group_bits *
+ mod_frame_err /
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left));
+
+ if (alt_gf_bits > gf_bits) {
+ gf_bits = alt_gf_bits;
+ }
}
- else
- cpi->twopass.gf_group_bits = 0;
-
- cpi->twopass.gf_group_bits =
- (cpi->twopass.gf_group_bits < 0)
- ? 0
- : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
- ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
-
- // Clip cpi->twopass.gf_group_bits based on user supplied data rate
- // variability limit (cpi->oxcf.two_pass_vbrmax_section)
- if (cpi->twopass.gf_group_bits > max_bits * cpi->baseline_gf_interval)
- cpi->twopass.gf_group_bits = max_bits * cpi->baseline_gf_interval;
-
- // Reset the file position
- reset_fpf_position(cpi, start_pos);
-
- // Update the record of error used so far (only done once per gf group)
- cpi->twopass.modified_error_used += gf_group_err;
-
- // Assign bits to the arf or gf.
- for (i = 0; i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME); i++)
- {
- int boost;
- int allocation_chunks;
- int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
- int gf_bits;
-
- boost = (cpi->gfu_boost * vp8_gfboost_qadjust(Q)) / 100;
-
- // Set max and minimum boost and hence minimum allocation
- if (boost > ((cpi->baseline_gf_interval + 1) * 200))
- boost = ((cpi->baseline_gf_interval + 1) * 200);
- else if (boost < 125)
- boost = 125;
-
- if ( cpi->source_alt_ref_pending && i == 0 )
- allocation_chunks =
- ((cpi->baseline_gf_interval + 1) * 100) + boost;
- else
- allocation_chunks =
- (cpi->baseline_gf_interval * 100) + (boost - 100);
-
- // Prevent overflow
- if ( boost > 1028 )
- {
- int divisor = boost >> 10;
- boost/= divisor;
- allocation_chunks /= divisor;
- }
-
- // Calculate the number of bits to be spent on the gf or arf based on
- // the boost number
- gf_bits = (int)((double)boost *
- (cpi->twopass.gf_group_bits /
- (double)allocation_chunks));
-
- // If the frame that is to be boosted is simpler than the average for
- // the gf/arf group then use an alternative calculation
- // based on the error score of the frame itself
- if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval)
- {
- double alt_gf_grp_bits;
- int alt_gf_bits;
-
- alt_gf_grp_bits =
- (double)cpi->twopass.kf_group_bits *
- (mod_frame_err * (double)cpi->baseline_gf_interval) /
- DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left);
-
- alt_gf_bits = (int)((double)boost * (alt_gf_grp_bits /
- (double)allocation_chunks));
-
- if (gf_bits > alt_gf_bits)
- {
- gf_bits = alt_gf_bits;
- }
- }
- // Else if it is harder than other frames in the group make sure it at
- // least receives an allocation in keeping with its relative error
- // score, otherwise it may be worse off than an "un-boosted" frame
- else
- {
- int alt_gf_bits =
- (int)((double)cpi->twopass.kf_group_bits *
- mod_frame_err /
- DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left));
-
- if (alt_gf_bits > gf_bits)
- {
- gf_bits = alt_gf_bits;
- }
- }
- // Dont allow a negative value for gf_bits
- if (gf_bits < 0)
- gf_bits = 0;
+ // Dont allow a negative value for gf_bits
+ if (gf_bits < 0)
+ gf_bits = 0;
- gf_bits += cpi->min_frame_bandwidth; // Add in minimum for a frame
+ gf_bits += cpi->min_frame_bandwidth; // Add in minimum for a frame
- if (i == 0)
- {
- cpi->twopass.gf_bits = gf_bits;
- }
- if (i == 1 || (!cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)))
- {
- cpi->per_frame_bandwidth = gf_bits; // Per frame bit target for this frame
- }
+ if (i == 0) {
+ cpi->twopass.gf_bits = gf_bits;
}
-
- {
- // Adjust KF group bits and error remainin
- cpi->twopass.kf_group_error_left -= gf_group_err;
- cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
-
- if (cpi->twopass.kf_group_bits < 0)
- cpi->twopass.kf_group_bits = 0;
-
- // Note the error score left in the remaining frames of the group.
- // For normal GFs we want to remove the error score for the first frame
- // of the group (except in Key frame case where this has already
- // happened)
- if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
- cpi->twopass.gf_group_error_left = gf_group_err - gf_first_frame_err;
- else
- cpi->twopass.gf_group_error_left = gf_group_err;
-
- cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits - cpi->min_frame_bandwidth;
-
- if (cpi->twopass.gf_group_bits < 0)
- cpi->twopass.gf_group_bits = 0;
-
- // This condition could fail if there are two kfs very close together
- // despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
- // calculation of cpi->twopass.alt_extra_bits.
- if ( cpi->baseline_gf_interval >= 3 )
- {
- int boost = (cpi->source_alt_ref_pending)
- ? b_boost : cpi->gfu_boost;
-
- if ( boost >= 150 )
- {
- int pct_extra;
-
- pct_extra = (boost - 100) / 50;
- pct_extra = (pct_extra > 20) ? 20 : pct_extra;
-
- cpi->twopass.alt_extra_bits =
- (cpi->twopass.gf_group_bits * pct_extra) / 100;
- cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits;
- cpi->twopass.alt_extra_bits /=
- ((cpi->baseline_gf_interval-1)>>1);
- }
- else
- cpi->twopass.alt_extra_bits = 0;
- }
- else
- cpi->twopass.alt_extra_bits = 0;
+ if (i == 1 || (!cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME))) {
+ cpi->per_frame_bandwidth = gf_bits; // Per frame bit target for this frame
}
+ }
+
+ {
+ // Adjust KF group bits and error remainin
+ cpi->twopass.kf_group_error_left -= gf_group_err;
+ cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
+
+ if (cpi->twopass.kf_group_bits < 0)
+ cpi->twopass.kf_group_bits = 0;
+
+ // Note the error score left in the remaining frames of the group.
+ // For normal GFs we want to remove the error score for the first frame
+ // of the group (except in Key frame case where this has already
+ // happened)
+ if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
+ cpi->twopass.gf_group_error_left = gf_group_err - gf_first_frame_err;
+ else
+ cpi->twopass.gf_group_error_left = gf_group_err;
- if (cpi->common.frame_type != KEY_FRAME)
- {
- FIRSTPASS_STATS sectionstats;
+ cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits - cpi->min_frame_bandwidth;
- zero_stats(&sectionstats);
- reset_fpf_position(cpi, start_pos);
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
+
+ // This condition could fail if there are two kfs very close together
+ // despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the
+ // calculation of cpi->twopass.alt_extra_bits.
+ if (cpi->baseline_gf_interval >= 3) {
+ int boost = (cpi->source_alt_ref_pending)
+ ? b_boost : cpi->gfu_boost;
+
+ if (boost >= 150) {
+ int pct_extra;
+
+ pct_extra = (boost - 100) / 50;
+ pct_extra = (pct_extra > 20) ? 20 : pct_extra;
+
+ cpi->twopass.alt_extra_bits =
+ (cpi->twopass.gf_group_bits * pct_extra) / 100;
+ cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits;
+ cpi->twopass.alt_extra_bits /=
+ ((cpi->baseline_gf_interval - 1) >> 1);
+ } else
+ cpi->twopass.alt_extra_bits = 0;
+ } else
+ cpi->twopass.alt_extra_bits = 0;
+ }
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ FIRSTPASS_STATS sectionstats;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_pos);
- for (i = 0 ; i < cpi->baseline_gf_interval ; i++)
- {
- input_stats(cpi, &next_frame);
- accumulate_stats(&sectionstats, &next_frame);
- }
+ for (i = 0; i < cpi->baseline_gf_interval; i++) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
- avg_stats(&sectionstats);
+ avg_stats(&sectionstats);
- cpi->twopass.section_intra_rating =
- sectionstats.intra_error /
- DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
+ cpi->twopass.section_intra_rating =
+ sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
- reset_fpf_position(cpi, start_pos);
- }
+ reset_fpf_position(cpi, start_pos);
+ }
}
// Allocate bits to a normal frame that is neither a gf an arf or a key frame.
-static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
-{
- int target_frame_size; // gf_group_error_left
+static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int target_frame_size; // gf_group_error_left
- double modified_err;
- double err_fraction; // What portion of the remaining GF group error is used by this frame
+ double modified_err;
+ double err_fraction; // What portion of the remaining GF group error is used by this frame
- int max_bits = frame_max_bits(cpi); // Max for a single frame
+ int max_bits = frame_max_bits(cpi); // Max for a single frame
- // Calculate modified prediction error used in bit allocation
- modified_err = calculate_modified_err(cpi, this_frame);
+ // Calculate modified prediction error used in bit allocation
+ modified_err = calculate_modified_err(cpi, this_frame);
- if (cpi->twopass.gf_group_error_left > 0)
- err_fraction = modified_err / cpi->twopass.gf_group_error_left; // What portion of the remaining GF group error is used by this frame
- else
- err_fraction = 0.0;
+ if (cpi->twopass.gf_group_error_left > 0)
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left; // What portion of the remaining GF group error is used by this frame
+ else
+ err_fraction = 0.0;
- target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); // How many of those bits available for allocation should we give it?
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); // How many of those bits available for allocation should we give it?
- // Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at the top end.
- if (target_frame_size < 0)
- target_frame_size = 0;
- else
- {
- if (target_frame_size > max_bits)
- target_frame_size = max_bits;
+ // Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at the top end.
+ if (target_frame_size < 0)
+ target_frame_size = 0;
+ else {
+ if (target_frame_size > max_bits)
+ target_frame_size = max_bits;
- if (target_frame_size > cpi->twopass.gf_group_bits)
- target_frame_size = cpi->twopass.gf_group_bits;
- }
+ if (target_frame_size > cpi->twopass.gf_group_bits)
+ target_frame_size = cpi->twopass.gf_group_bits;
+ }
- cpi->twopass.gf_group_error_left -= modified_err; // Adjust error remaining
- cpi->twopass.gf_group_bits -= target_frame_size; // Adjust bits remaining
+ cpi->twopass.gf_group_error_left -= modified_err; // Adjust error remaining
+ cpi->twopass.gf_group_bits -= target_frame_size; // Adjust bits remaining
- if (cpi->twopass.gf_group_bits < 0)
- cpi->twopass.gf_group_bits = 0;
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
- target_frame_size += cpi->min_frame_bandwidth; // Add in the minimum number of bits that is set aside for every frame.
+ target_frame_size += cpi->min_frame_bandwidth; // Add in the minimum number of bits that is set aside for every frame.
- cpi->per_frame_bandwidth = target_frame_size; // Per frame bit target for this frame
+ cpi->per_frame_bandwidth = target_frame_size; // Per frame bit target for this frame
}
// Make a damped adjustment to the active max q.
-int adjust_active_maxq( int old_maxqi, int new_maxqi )
-{
- int i;
- int ret_val = new_maxqi;
- double old_q;
- double new_q;
- double target_q;
-
- old_q = vp8_convert_qindex_to_q( old_maxqi );
- new_q = vp8_convert_qindex_to_q( new_maxqi );
-
- target_q = ((old_q * 7.0) + new_q) / 8.0;
-
- if ( target_q > old_q )
- {
- for ( i = old_maxqi; i <= new_maxqi; i++ )
- {
- if ( vp8_convert_qindex_to_q( i ) >= target_q )
- {
- ret_val = i;
- break;
- }
- }
+int adjust_active_maxq(int old_maxqi, int new_maxqi) {
+ int i;
+ int ret_val = new_maxqi;
+ double old_q;
+ double new_q;
+ double target_q;
+
+ old_q = vp8_convert_qindex_to_q(old_maxqi);
+ new_q = vp8_convert_qindex_to_q(new_maxqi);
+
+ target_q = ((old_q * 7.0) + new_q) / 8.0;
+
+ if (target_q > old_q) {
+ for (i = old_maxqi; i <= new_maxqi; i++) {
+ if (vp8_convert_qindex_to_q(i) >= target_q) {
+ ret_val = i;
+ break;
+ }
}
- else
- {
- for ( i = old_maxqi; i >= new_maxqi; i-- )
- {
- if ( vp8_convert_qindex_to_q( i ) <= target_q )
- {
- ret_val = i;
- break;
- }
- }
+ } else {
+ for (i = old_maxqi; i >= new_maxqi; i--) {
+ if (vp8_convert_qindex_to_q(i) <= target_q) {
+ ret_val = i;
+ break;
+ }
}
+ }
- return ret_val;
+ return ret_val;
}
-void vp8_second_pass(VP8_COMP *cpi)
-{
- int tmp_q;
- int frames_left = (int)(cpi->twopass.total_stats->count - cpi->common.current_video_frame);
+void vp8_second_pass(VP8_COMP *cpi) {
+ int tmp_q;
+ int frames_left = (int)(cpi->twopass.total_stats->count - cpi->common.current_video_frame);
- FIRSTPASS_STATS this_frame;
- FIRSTPASS_STATS this_frame_copy;
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS this_frame_copy;
- double this_frame_error;
- double this_frame_intra_error;
- double this_frame_coded_error;
+ double this_frame_error;
+ double this_frame_intra_error;
+ double this_frame_coded_error;
- FIRSTPASS_STATS *start_pos;
+ FIRSTPASS_STATS *start_pos;
- int overhead_bits;
+ int overhead_bits;
- if (!cpi->twopass.stats_in)
- {
- return ;
- }
+ if (!cpi->twopass.stats_in) {
+ return;
+ }
- vp8_clear_system_state();
+ vp8_clear_system_state();
- if (EOF == input_stats(cpi, &this_frame))
- return;
+ if (EOF == input_stats(cpi, &this_frame))
+ return;
- this_frame_error = this_frame.ssim_weighted_pred_err;
- this_frame_intra_error = this_frame.intra_error;
- this_frame_coded_error = this_frame.coded_error;
+ this_frame_error = this_frame.ssim_weighted_pred_err;
+ this_frame_intra_error = this_frame.intra_error;
+ this_frame_coded_error = this_frame.coded_error;
- start_pos = cpi->twopass.stats_in;
+ start_pos = cpi->twopass.stats_in;
- // keyframe and section processing !
- if (cpi->twopass.frames_to_key == 0)
- {
- // Define next KF group and assign bits to it
- vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
- find_next_key_frame(cpi, &this_frame_copy);
- }
+ // keyframe and section processing !
+ if (cpi->twopass.frames_to_key == 0) {
+ // Define next KF group and assign bits to it
+ vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ find_next_key_frame(cpi, &this_frame_copy);
+ }
- // Is this a GF / ARF (Note that a KF is always also a GF)
- if (cpi->frames_till_gf_update_due == 0)
- {
- // Define next gf group and assign bits to it
- vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
- define_gf_group(cpi, &this_frame_copy);
-
- // If we are going to code an altref frame at the end of the group and the current frame is not a key frame....
- // If the previous group used an arf this frame has already benefited from that arf boost and it should not be given extra bits
- // If the previous group was NOT coded using arf we may want to apply some boost to this GF as well
- if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME))
- {
- // Assign a standard frames worth of bits from those allocated to the GF group
- int bak = cpi->per_frame_bandwidth;
- vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
- assign_std_frame_bits(cpi, &this_frame_copy);
- cpi->per_frame_bandwidth = bak;
- }
- }
+ // Is this a GF / ARF (Note that a KF is always also a GF)
+ if (cpi->frames_till_gf_update_due == 0) {
+ // Define next gf group and assign bits to it
+ vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ define_gf_group(cpi, &this_frame_copy);
- // Otherwise this is an ordinary frame
- else
- {
- // Assign bits from those allocated to the GF group
- vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
- assign_std_frame_bits(cpi, &this_frame_copy);
+ // If we are going to code an altref frame at the end of the group and the current frame is not a key frame....
+ // If the previous group used an arf this frame has already benefited from that arf boost and it should not be given extra bits
+ // If the previous group was NOT coded using arf we may want to apply some boost to this GF as well
+ if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) {
+ // Assign a standard frames worth of bits from those allocated to the GF group
+ int bak = cpi->per_frame_bandwidth;
+ vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ cpi->per_frame_bandwidth = bak;
}
-
- // Keep a globally available copy of this and the next frame's iiratio.
- cpi->twopass.this_iiratio = this_frame_intra_error /
- DOUBLE_DIVIDE_CHECK(this_frame_coded_error);
- {
- FIRSTPASS_STATS next_frame;
- if ( lookup_next_frame_stats(cpi, &next_frame) != EOF )
- {
- cpi->twopass.next_iiratio = next_frame.intra_error /
- DOUBLE_DIVIDE_CHECK(next_frame.coded_error);
- }
+ }
+
+ // Otherwise this is an ordinary frame
+ else {
+ // Assign bits from those allocated to the GF group
+ vpx_memcpy(&this_frame_copy, &this_frame, sizeof(this_frame));
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ }
+
+ // Keep a globally available copy of this and the next frame's iiratio.
+ cpi->twopass.this_iiratio = this_frame_intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame_coded_error);
+ {
+ FIRSTPASS_STATS next_frame;
+ if (lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ cpi->twopass.next_iiratio = next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error);
}
+ }
- // Set nominal per second bandwidth for this frame
- cpi->target_bandwidth = cpi->per_frame_bandwidth * cpi->output_frame_rate;
- if (cpi->target_bandwidth < 0)
- cpi->target_bandwidth = 0;
-
+ // Set nominal per second bandwidth for this frame
+ cpi->target_bandwidth = cpi->per_frame_bandwidth * cpi->output_frame_rate;
+ if (cpi->target_bandwidth < 0)
+ cpi->target_bandwidth = 0;
- // Account for mv, mode and other overheads.
- overhead_bits = estimate_modemvcost(
- cpi, cpi->twopass.total_left_stats );
- // Special case code for first frame.
- if (cpi->common.current_video_frame == 0)
- {
- cpi->twopass.est_max_qcorrection_factor = 1.0;
+ // Account for mv, mode and other overheads.
+ overhead_bits = estimate_modemvcost(
+ cpi, cpi->twopass.total_left_stats);
- // Set a cq_level in constrained quality mode.
- if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
- {
- int est_cq;
+ // Special case code for first frame.
+ if (cpi->common.current_video_frame == 0) {
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
- est_cq =
- estimate_cq( cpi,
- cpi->twopass.total_left_stats,
- (int)(cpi->twopass.bits_left / frames_left),
- overhead_bits );
-
- cpi->cq_target_quality = cpi->oxcf.cq_level;
- if ( est_cq > cpi->cq_target_quality )
- cpi->cq_target_quality = est_cq;
- }
+ // Set a cq_level in constrained quality mode.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ int est_cq;
- // guess at maxq needed in 2nd pass
- cpi->twopass.maxq_max_limit = cpi->worst_quality;
- cpi->twopass.maxq_min_limit = cpi->best_quality;
-
- tmp_q = estimate_max_q(
- cpi,
+ est_cq =
+ estimate_cq(cpi,
cpi->twopass.total_left_stats,
(int)(cpi->twopass.bits_left / frames_left),
- overhead_bits );
-
- cpi->active_worst_quality = tmp_q;
- cpi->ni_av_qi = tmp_q;
- cpi->avg_q = vp8_convert_qindex_to_q( tmp_q );
-
- // Limit the maxq value returned subsequently.
- // This increases the risk of overspend or underspend if the initial
- // estimate for the clip is bad, but helps prevent excessive
- // variation in Q, especially near the end of a clip
- // where for example a small overspend may cause Q to crash
- adjust_maxq_qrange(cpi);
- }
+ overhead_bits);
- // The last few frames of a clip almost always have to few or too many
- // bits and for the sake of over exact rate control we dont want to make
- // radical adjustments to the allowed quantizer range just to use up a
- // few surplus bits or get beneath the target rate.
- else if ( (cpi->common.current_video_frame <
- (((unsigned int)cpi->twopass.total_stats->count * 255)>>8)) &&
- ((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
- (unsigned int)cpi->twopass.total_stats->count) )
- {
- if (frames_left < 1)
- frames_left = 1;
-
- tmp_q = estimate_max_q(
- cpi,
- cpi->twopass.total_left_stats,
- (int)(cpi->twopass.bits_left / frames_left),
- overhead_bits );
-
- // Make a damped adjustment to active max Q
- cpi->active_worst_quality =
- adjust_active_maxq( cpi->active_worst_quality, tmp_q );
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+ if (est_cq > cpi->cq_target_quality)
+ cpi->cq_target_quality = est_cq;
}
- cpi->twopass.frames_to_key --;
+ // guess at maxq needed in 2nd pass
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
- // Update the total stats remaining sturcture
- subtract_stats(cpi->twopass.total_left_stats, &this_frame );
+ tmp_q = estimate_max_q(
+ cpi,
+ cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left),
+ overhead_bits);
+
+ cpi->active_worst_quality = tmp_q;
+ cpi->ni_av_qi = tmp_q;
+ cpi->avg_q = vp8_convert_qindex_to_q(tmp_q);
+
+ // Limit the maxq value returned subsequently.
+ // This increases the risk of overspend or underspend if the initial
+ // estimate for the clip is bad, but helps prevent excessive
+ // variation in Q, especially near the end of a clip
+ // where for example a small overspend may cause Q to crash
+ adjust_maxq_qrange(cpi);
+ }
+
+ // The last few frames of a clip almost always have to few or too many
+ // bits and for the sake of over exact rate control we dont want to make
+ // radical adjustments to the allowed quantizer range just to use up a
+ // few surplus bits or get beneath the target rate.
+ else if ((cpi->common.current_video_frame <
+ (((unsigned int)cpi->twopass.total_stats->count * 255) >> 8)) &&
+ ((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
+ (unsigned int)cpi->twopass.total_stats->count)) {
+ if (frames_left < 1)
+ frames_left = 1;
+
+ tmp_q = estimate_max_q(
+ cpi,
+ cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left),
+ overhead_bits);
+
+ // Make a damped adjustment to active max Q
+ cpi->active_worst_quality =
+ adjust_active_maxq(cpi->active_worst_quality, tmp_q);
+ }
+
+ cpi->twopass.frames_to_key--;
+
+ // Update the total stats remaining sturcture
+ subtract_stats(cpi->twopass.total_left_stats, &this_frame);
}
-static BOOL test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTPASS_STATS *this_frame, FIRSTPASS_STATS *next_frame)
-{
- BOOL is_viable_kf = FALSE;
-
- // Does the frame satisfy the primary criteria of a key frame
- // If so, then examine how well it predicts subsequent frames
- if ((this_frame->pcnt_second_ref < 0.10) &&
- (next_frame->pcnt_second_ref < 0.10) &&
- ((this_frame->pcnt_inter < 0.05) ||
- (
- ((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .35) &&
- ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) &&
- ((fabs(last_frame->coded_error - this_frame->coded_error) / DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > .40) ||
- (fabs(last_frame->intra_error - this_frame->intra_error) / DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > .40) ||
- ((next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5)
- )
+static BOOL test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTPASS_STATS *this_frame, FIRSTPASS_STATS *next_frame) {
+ BOOL is_viable_kf = FALSE;
+
+ // Does the frame satisfy the primary criteria of a key frame
+ // If so, then examine how well it predicts subsequent frames
+ if ((this_frame->pcnt_second_ref < 0.10) &&
+ (next_frame->pcnt_second_ref < 0.10) &&
+ ((this_frame->pcnt_inter < 0.05) ||
+ (
+ ((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .35) &&
+ ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) &&
+ ((fabs(last_frame->coded_error - this_frame->coded_error) / DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > .40) ||
+ (fabs(last_frame->intra_error - this_frame->intra_error) / DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > .40) ||
+ ((next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5)
)
- )
)
- {
- int i;
- FIRSTPASS_STATS *start_pos;
-
- FIRSTPASS_STATS local_next_frame;
-
- double boost_score = 0.0;
- double old_boost_score = 0.0;
- double decay_accumulator = 1.0;
- double next_iiratio;
-
- vpx_memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
-
- // Note the starting file position so we can reset to it
- start_pos = cpi->twopass.stats_in;
-
- // Examine how well the key frame predicts subsequent frames
- for (i = 0 ; i < 16; i++)
- {
- next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)) ;
-
- if (next_iiratio > RMAX)
- next_iiratio = RMAX;
-
- // Cumulative effect of decay in prediction quality
- if (local_next_frame.pcnt_inter > 0.85)
- decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
- else
- decay_accumulator = decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
-
- //decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
-
- // Keep a running total
- boost_score += (decay_accumulator * next_iiratio);
-
- // Test various breakout clauses
- if ((local_next_frame.pcnt_inter < 0.05) ||
- (next_iiratio < 1.5) ||
- (((local_next_frame.pcnt_inter -
- local_next_frame.pcnt_neutral) < 0.20) &&
- (next_iiratio < 3.0)) ||
- ((boost_score - old_boost_score) < 3.0) ||
- (local_next_frame.intra_error < 200)
- )
- {
- break;
- }
+ )
+ ) {
+ int i;
+ FIRSTPASS_STATS *start_pos;
- old_boost_score = boost_score;
+ FIRSTPASS_STATS local_next_frame;
- // Get the next frame details
- if (EOF == input_stats(cpi, &local_next_frame))
- break;
- }
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double decay_accumulator = 1.0;
+ double next_iiratio;
- // If there is tolerable prediction for at least the next 3 frames then break out else discard this pottential key frame and move on
- if (boost_score > 30.0 && (i > 3))
- is_viable_kf = TRUE;
- else
- {
- // Reset the file position
- reset_fpf_position(cpi, start_pos);
+ vpx_memcpy(&local_next_frame, next_frame, sizeof(*next_frame));
- is_viable_kf = FALSE;
- }
- }
+ // Note the starting file position so we can reset to it
+ start_pos = cpi->twopass.stats_in;
- return is_viable_kf;
-}
-static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
-{
- int i,j;
- FIRSTPASS_STATS last_frame;
- FIRSTPASS_STATS first_frame;
- FIRSTPASS_STATS next_frame;
- FIRSTPASS_STATS *start_position;
+ // Examine how well the key frame predicts subsequent frames
+ for (i = 0; i < 16; i++) {
+ next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
+
+ if (next_iiratio > RMAX)
+ next_iiratio = RMAX;
+
+ // Cumulative effect of decay in prediction quality
+ if (local_next_frame.pcnt_inter > 0.85)
+ decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+ else
+ decay_accumulator = decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
+
+ // decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+
+ // Keep a running total
+ boost_score += (decay_accumulator * next_iiratio);
+
+ // Test various breakout clauses
+ if ((local_next_frame.pcnt_inter < 0.05) ||
+ (next_iiratio < 1.5) ||
+ (((local_next_frame.pcnt_inter -
+ local_next_frame.pcnt_neutral) < 0.20) &&
+ (next_iiratio < 3.0)) ||
+ ((boost_score - old_boost_score) < 3.0) ||
+ (local_next_frame.intra_error < 200)
+ ) {
+ break;
+ }
- double decay_accumulator = 1.0;
- double zero_motion_accumulator = 1.0;
- double boost_score = 0;
- double old_boost_score = 0.0;
- double loop_decay_rate;
+ old_boost_score = boost_score;
- double kf_mod_err = 0.0;
- double kf_group_err = 0.0;
- double kf_group_intra_err = 0.0;
- double kf_group_coded_err = 0.0;
- double recent_loop_decay[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
+ // Get the next frame details
+ if (EOF == input_stats(cpi, &local_next_frame))
+ break;
+ }
- vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+ // If there is tolerable prediction for at least the next 3 frames then break out else discard this pottential key frame and move on
+ if (boost_score > 30.0 && (i > 3))
+ is_viable_kf = TRUE;
+ else {
+ // Reset the file position
+ reset_fpf_position(cpi, start_pos);
- vp8_clear_system_state(); //__asm emms;
- start_position = cpi->twopass.stats_in;
+ is_viable_kf = FALSE;
+ }
+ }
- cpi->common.frame_type = KEY_FRAME;
+ return is_viable_kf;
+}
+static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int i, j;
+ FIRSTPASS_STATS last_frame;
+ FIRSTPASS_STATS first_frame;
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_position;
- // is this a forced key frame by interval
- cpi->this_key_frame_forced = cpi->next_key_frame_forced;
+ double decay_accumulator = 1.0;
+ double zero_motion_accumulator = 1.0;
+ double boost_score = 0;
+ double old_boost_score = 0.0;
+ double loop_decay_rate;
- // Clear the alt ref active flag as this can never be active on a key frame
- cpi->source_alt_ref_active = FALSE;
+ double kf_mod_err = 0.0;
+ double kf_group_err = 0.0;
+ double kf_group_intra_err = 0.0;
+ double kf_group_coded_err = 0.0;
+ double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
- // Kf is always a gf so clear frames till next gf counter
- cpi->frames_till_gf_update_due = 0;
+ vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
- cpi->twopass.frames_to_key = 1;
+ vp8_clear_system_state(); // __asm emms;
+ start_position = cpi->twopass.stats_in;
- // Take a copy of the initial frame details
- vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
+ cpi->common.frame_type = KEY_FRAME;
- cpi->twopass.kf_group_bits = 0; // Total bits avaialable to kf group
- cpi->twopass.kf_group_error_left = 0; // Group modified error score.
+ // is this a forced key frame by interval
+ cpi->this_key_frame_forced = cpi->next_key_frame_forced;
- kf_mod_err = calculate_modified_err(cpi, this_frame);
+ // Clear the alt ref active flag as this can never be active on a key frame
+ cpi->source_alt_ref_active = FALSE;
- // find the next keyframe
- i = 0;
- while (cpi->twopass.stats_in < cpi->twopass.stats_in_end)
- {
- // Accumulate kf group error
- kf_group_err += calculate_modified_err(cpi, this_frame);
+ // Kf is always a gf so clear frames till next gf counter
+ cpi->frames_till_gf_update_due = 0;
- // These figures keep intra and coded error counts for all frames including key frames in the group.
- // The effect of the key frame itself can be subtracted out using the first_frame data collected above
- kf_group_intra_err += this_frame->intra_error;
- kf_group_coded_err += this_frame->coded_error;
+ cpi->twopass.frames_to_key = 1;
- // load a the next frame's stats
- vpx_memcpy(&last_frame, this_frame, sizeof(*this_frame));
- input_stats(cpi, this_frame);
+ // Take a copy of the initial frame details
+ vpx_memcpy(&first_frame, this_frame, sizeof(*this_frame));
- // Provided that we are not at the end of the file...
- if (cpi->oxcf.auto_key
- && lookup_next_frame_stats(cpi, &next_frame) != EOF)
- {
- // Normal scene cut check
- if ( test_candidate_kf(cpi, &last_frame, this_frame, &next_frame) )
- {
- break;
- }
+ cpi->twopass.kf_group_bits = 0; // Total bits avaialable to kf group
+ cpi->twopass.kf_group_error_left = 0; // Group modified error score.
- // How fast is prediction quality decaying
- loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
-
- // We want to know something about the recent past... rather than
- // as used elsewhere where we are concened with decay in prediction
- // quality since the last GF or KF.
- recent_loop_decay[i%8] = loop_decay_rate;
- decay_accumulator = 1.0;
- for (j = 0; j < 8; j++)
- {
- decay_accumulator = decay_accumulator * recent_loop_decay[j];
- }
+ kf_mod_err = calculate_modified_err(cpi, this_frame);
- // Special check for transition or high motion followed by a
- // to a static scene.
- if ( detect_transition_to_still( cpi, i,
- (cpi->key_frame_frequency-i),
- loop_decay_rate,
- decay_accumulator ) )
- {
- break;
- }
+ // find the next keyframe
+ i = 0;
+ while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) {
+ // Accumulate kf group error
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+ // These figures keep intra and coded error counts for all frames including key frames in the group.
+ // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
- // Step on to the next frame
- cpi->twopass.frames_to_key ++;
+ // load a the next frame's stats
+ vpx_memcpy(&last_frame, this_frame, sizeof(*this_frame));
+ input_stats(cpi, this_frame);
- // If we don't have a real key frame within the next two
- // forcekeyframeevery intervals then break out of the loop.
- if (cpi->twopass.frames_to_key >= 2 *(int)cpi->key_frame_frequency)
- break;
- } else
- cpi->twopass.frames_to_key ++;
+ // Provided that we are not at the end of the file...
+ if (cpi->oxcf.auto_key
+ && lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ // Normal scene cut check
+ if (test_candidate_kf(cpi, &last_frame, this_frame, &next_frame)) {
+ break;
+ }
+
+ // How fast is prediction quality decaying
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+
+ // We want to know something about the recent past... rather than
+ // as used elsewhere where we are concened with decay in prediction
+ // quality since the last GF or KF.
+ recent_loop_decay[i % 8] = loop_decay_rate;
+ decay_accumulator = 1.0;
+ for (j = 0; j < 8; j++) {
+ decay_accumulator = decay_accumulator * recent_loop_decay[j];
+ }
+
+ // Special check for transition or high motion followed by a
+ // to a static scene.
+ if (detect_transition_to_still(cpi, i,
+ (cpi->key_frame_frequency - i),
+ loop_decay_rate,
+ decay_accumulator)) {
+ break;
+ }
- i++;
- }
- // If there is a max kf interval set by the user we must obey it.
- // We already breakout of the loop above at 2x max.
- // This code centers the extra kf if the actual natural
- // interval is between 1x and 2x
- if (cpi->oxcf.auto_key
- && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency )
- {
- FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
- FIRSTPASS_STATS tmp_frame;
+ // Step on to the next frame
+ cpi->twopass.frames_to_key++;
- cpi->twopass.frames_to_key /= 2;
+ // If we don't have a real key frame within the next two
+ // forcekeyframeevery intervals then break out of the loop.
+ if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency)
+ break;
+ } else
+ cpi->twopass.frames_to_key++;
- // Copy first frame details
- vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
+ i++;
+ }
- // Reset to the start of the group
- reset_fpf_position(cpi, start_position);
+ // If there is a max kf interval set by the user we must obey it.
+ // We already breakout of the loop above at 2x max.
+ // This code centers the extra kf if the actual natural
+ // interval is between 1x and 2x
+ if (cpi->oxcf.auto_key
+ && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) {
+ FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_frame;
- kf_group_err = 0;
- kf_group_intra_err = 0;
- kf_group_coded_err = 0;
+ cpi->twopass.frames_to_key /= 2;
- // Rescan to get the correct error data for the forced kf group
- for( i = 0; i < cpi->twopass.frames_to_key; i++ )
- {
- // Accumulate kf group errors
- kf_group_err += calculate_modified_err(cpi, &tmp_frame);
- kf_group_intra_err += tmp_frame.intra_error;
- kf_group_coded_err += tmp_frame.coded_error;
+ // Copy first frame details
+ vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
- // Load a the next frame's stats
- input_stats(cpi, &tmp_frame);
- }
+ // Reset to the start of the group
+ reset_fpf_position(cpi, start_position);
+
+ kf_group_err = 0;
+ kf_group_intra_err = 0;
+ kf_group_coded_err = 0;
- // Reset to the start of the group
- reset_fpf_position(cpi, current_pos);
+ // Rescan to get the correct error data for the forced kf group
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ // Accumulate kf group errors
+ kf_group_err += calculate_modified_err(cpi, &tmp_frame);
+ kf_group_intra_err += tmp_frame.intra_error;
+ kf_group_coded_err += tmp_frame.coded_error;
- cpi->next_key_frame_forced = TRUE;
+ // Load a the next frame's stats
+ input_stats(cpi, &tmp_frame);
}
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, current_pos);
+
+ cpi->next_key_frame_forced = TRUE;
+ } else
+ cpi->next_key_frame_forced = FALSE;
+
+ // Special case for the last frame of the file
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) {
+ // Accumulate kf group error
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ // These figures keep intra and coded error counts for all frames including key frames in the group.
+ // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+ }
+
+ // Calculate the number of bits that should be assigned to the kf group.
+ if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0)) {
+ // Max for a single normal frame (not key frame)
+ int max_bits = frame_max_bits(cpi);
+
+ // Maximum bits for the kf group
+ int64_t max_grp_bits;
+
+ // Default allocation based on bits left and relative
+ // complexity of the section
+ cpi->twopass.kf_group_bits = (int64_t)(cpi->twopass.bits_left *
+ (kf_group_err /
+ cpi->twopass.modified_error_left));
+
+ // Clip based on maximum per frame rate defined by the user.
+ max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
+ if (cpi->twopass.kf_group_bits > max_grp_bits)
+ cpi->twopass.kf_group_bits = max_grp_bits;
+ } else
+ cpi->twopass.kf_group_bits = 0;
+
+ // Reset the first pass file position
+ reset_fpf_position(cpi, start_position);
+
+ // determine how big to make this keyframe based on how well the subsequent frames use inter blocks
+ decay_accumulator = 1.0;
+ boost_score = 0.0;
+ loop_decay_rate = 1.00; // Starting decay rate
+
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ double r;
+
+ if (EOF == input_stats(cpi, &next_frame))
+ break;
+
+ if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
+ r = (IIKFACTOR2 * next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
else
- cpi->next_key_frame_forced = FALSE;
-
- // Special case for the last frame of the file
- if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
- {
- // Accumulate kf group error
- kf_group_err += calculate_modified_err(cpi, this_frame);
-
- // These figures keep intra and coded error counts for all frames including key frames in the group.
- // The effect of the key frame itself can be subtracted out using the first_frame data collected above
- kf_group_intra_err += this_frame->intra_error;
- kf_group_coded_err += this_frame->coded_error;
+ r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+
+ if (r > RMAX)
+ r = RMAX;
+
+ // Monitor for static sections.
+ if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
+ zero_motion_accumulator) {
+ zero_motion_accumulator =
+ (next_frame.pcnt_inter - next_frame.pcnt_motion);
}
- // Calculate the number of bits that should be assigned to the kf group.
- if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0))
- {
- // Max for a single normal frame (not key frame)
- int max_bits = frame_max_bits(cpi);
-
- // Maximum bits for the kf group
- int64_t max_grp_bits;
-
- // Default allocation based on bits left and relative
- // complexity of the section
- cpi->twopass.kf_group_bits = (int64_t)( cpi->twopass.bits_left *
- ( kf_group_err /
- cpi->twopass.modified_error_left ));
-
- // Clip based on maximum per frame rate defined by the user.
- max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
- if (cpi->twopass.kf_group_bits > max_grp_bits)
- cpi->twopass.kf_group_bits = max_grp_bits;
+ // How fast is prediction quality decaying
+ if (!detect_flash(cpi, 0)) {
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+ decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
}
- else
- cpi->twopass.kf_group_bits = 0;
- // Reset the first pass file position
- reset_fpf_position(cpi, start_position);
+ boost_score += (decay_accumulator * r);
- // determine how big to make this keyframe based on how well the subsequent frames use inter blocks
- decay_accumulator = 1.0;
- boost_score = 0.0;
- loop_decay_rate = 1.00; // Starting decay rate
-
- for (i = 0 ; i < cpi->twopass.frames_to_key ; i++)
- {
- double r;
-
- if (EOF == input_stats(cpi, &next_frame))
- break;
-
- if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
- r = (IIKFACTOR2 * next_frame.intra_error /
- DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
- else
- r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
- DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
-
- if (r > RMAX)
- r = RMAX;
-
- // Monitor for static sections.
- if ( (next_frame.pcnt_inter - next_frame.pcnt_motion) <
- zero_motion_accumulator )
- {
- zero_motion_accumulator =
- (next_frame.pcnt_inter - next_frame.pcnt_motion);
- }
+ if ((i > MIN_GF_INTERVAL) &&
+ ((boost_score - old_boost_score) < 6.25)) {
+ break;
+ }
- // How fast is prediction quality decaying
- if ( !detect_flash(cpi, 0) )
- {
- loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
- decay_accumulator = decay_accumulator * loop_decay_rate;
- decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator;
- }
+ old_boost_score = boost_score;
+ }
- boost_score += (decay_accumulator * r);
+ {
+ FIRSTPASS_STATS sectionstats;
- if ((i > MIN_GF_INTERVAL) &&
- ((boost_score - old_boost_score) < 6.25))
- {
- break;
- }
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_position);
- old_boost_score = boost_score;
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
}
- {
- FIRSTPASS_STATS sectionstats;
+ avg_stats(&sectionstats);
- zero_stats(&sectionstats);
- reset_fpf_position(cpi, start_position);
+ cpi->twopass.section_intra_rating =
+ sectionstats.intra_error
+ / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
+ }
- for (i = 0 ; i < cpi->twopass.frames_to_key ; i++)
- {
- input_stats(cpi, &next_frame);
- accumulate_stats(&sectionstats, &next_frame);
- }
+ // Reset the first pass file position
+ reset_fpf_position(cpi, start_position);
- avg_stats(&sectionstats);
+ // Work out how many bits to allocate for the key frame itself
+ if (1) {
+ int kf_boost = boost_score;
+ int allocation_chunks;
+ int alt_kf_bits;
- cpi->twopass.section_intra_rating =
- sectionstats.intra_error
- / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
+ if (kf_boost < 300) {
+ kf_boost += (cpi->twopass.frames_to_key * 3);
+ if (kf_boost > 300)
+ kf_boost = 300;
}
- // Reset the first pass file position
- reset_fpf_position(cpi, start_position);
-
- // Work out how many bits to allocate for the key frame itself
- if (1)
- {
- int kf_boost = boost_score;
- int allocation_chunks;
- int alt_kf_bits;
-
- if ( kf_boost < 300 )
- {
- kf_boost += (cpi->twopass.frames_to_key * 3);
- if ( kf_boost > 300 )
- kf_boost = 300;
- }
-
- if (kf_boost < 250) // Min KF boost
- kf_boost = 250;
-
- // Make a note of baseline boost and the zero motion
- // accumulator value for use elsewhere.
- cpi->kf_boost = kf_boost;
- cpi->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
-
- // We do three calculations for kf size.
- // The first is based on the error score for the whole kf group.
- // The second (optionaly) on the key frames own error if this is
- // smaller than the average for the group.
- // The final one insures that the frame receives at least the
- // allocation it would have received based on its own error score vs
- // the error score remaining
- // Special case if the sequence appears almost totaly static
- // In this case we want to spend almost all of the bits on the
- // key frame.
- // cpi->twopass.frames_to_key-1 because key frame itself is taken
- // care of by kf_boost.
- if ( zero_motion_accumulator >= 0.99 )
- {
- allocation_chunks =
- ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost;
- }
- else
- {
- allocation_chunks =
- ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
- }
+ if (kf_boost < 250) // Min KF boost
+ kf_boost = 250;
+
+ // Make a note of baseline boost and the zero motion
+ // accumulator value for use elsewhere.
+ cpi->kf_boost = kf_boost;
+ cpi->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
+
+ // We do three calculations for kf size.
+ // The first is based on the error score for the whole kf group.
+ // The second (optionaly) on the key frames own error if this is
+ // smaller than the average for the group.
+ // The final one insures that the frame receives at least the
+ // allocation it would have received based on its own error score vs
+ // the error score remaining
+ // Special case if the sequence appears almost totaly static
+ // In this case we want to spend almost all of the bits on the
+ // key frame.
+ // cpi->twopass.frames_to_key-1 because key frame itself is taken
+ // care of by kf_boost.
+ if (zero_motion_accumulator >= 0.99) {
+ allocation_chunks =
+ ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost;
+ } else {
+ allocation_chunks =
+ ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
+ }
- // Prevent overflow
- if ( kf_boost > 1028 )
- {
- int divisor = kf_boost >> 10;
- kf_boost /= divisor;
- allocation_chunks /= divisor;
- }
+ // Prevent overflow
+ if (kf_boost > 1028) {
+ int divisor = kf_boost >> 10;
+ kf_boost /= divisor;
+ allocation_chunks /= divisor;
+ }
- cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
+ cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
- // Calculate the number of bits to be spent on the key frame
- cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
+ // Calculate the number of bits to be spent on the key frame
+ cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
- // If the key frame is actually easier than the average for the
- // kf group (which does sometimes happen... eg a blank intro frame)
- // Then use an alternate calculation based on the kf error score
- // which should give a smaller key frame.
- if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key)
- {
- double alt_kf_grp_bits =
- ((double)cpi->twopass.bits_left *
- (kf_mod_err * (double)cpi->twopass.frames_to_key) /
- DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
+ // If the key frame is actually easier than the average for the
+ // kf group (which does sometimes happen... eg a blank intro frame)
+ // Then use an alternate calculation based on the kf error score
+ // which should give a smaller key frame.
+ if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) {
+ double alt_kf_grp_bits =
+ ((double)cpi->twopass.bits_left *
+ (kf_mod_err * (double)cpi->twopass.frames_to_key) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
- alt_kf_bits = (int)((double)kf_boost *
- (alt_kf_grp_bits / (double)allocation_chunks));
+ alt_kf_bits = (int)((double)kf_boost *
+ (alt_kf_grp_bits / (double)allocation_chunks));
- if (cpi->twopass.kf_bits > alt_kf_bits)
- {
- cpi->twopass.kf_bits = alt_kf_bits;
- }
- }
- // Else if it is much harder than other frames in the group make sure
- // it at least receives an allocation in keeping with its relative
- // error score
- else
- {
- alt_kf_bits =
- (int)((double)cpi->twopass.bits_left *
- (kf_mod_err /
- DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)));
-
- if (alt_kf_bits > cpi->twopass.kf_bits)
- {
- cpi->twopass.kf_bits = alt_kf_bits;
- }
- }
+ if (cpi->twopass.kf_bits > alt_kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+ // Else if it is much harder than other frames in the group make sure
+ // it at least receives an allocation in keeping with its relative
+ // error score
+ else {
+ alt_kf_bits =
+ (int)((double)cpi->twopass.bits_left *
+ (kf_mod_err /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)));
+
+ if (alt_kf_bits > cpi->twopass.kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
- cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
- cpi->twopass.kf_bits += cpi->min_frame_bandwidth; // Add in the minimum frame allowance
+ cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
+ cpi->twopass.kf_bits += cpi->min_frame_bandwidth; // Add in the minimum frame allowance
- cpi->per_frame_bandwidth = cpi->twopass.kf_bits; // Peer frame bit target for this frame
- cpi->target_bandwidth = cpi->twopass.kf_bits * cpi->output_frame_rate; // Convert to a per second bitrate
- }
+ cpi->per_frame_bandwidth = cpi->twopass.kf_bits; // Peer frame bit target for this frame
+ cpi->target_bandwidth = cpi->twopass.kf_bits * cpi->output_frame_rate; // Convert to a per second bitrate
+ }
- // Note the total error score of the kf group minus the key frame itself
- cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
+ // Note the total error score of the kf group minus the key frame itself
+ cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
- // Adjust the count of total modified error left.
- // The count of bits left is adjusted elsewhere based on real coded frame sizes
- cpi->twopass.modified_error_left -= kf_group_err;
+ // Adjust the count of total modified error left.
+ // The count of bits left is adjusted elsewhere based on real coded frame sizes
+ cpi->twopass.modified_error_left -= kf_group_err;
}
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index 59b30a762..695e9c69b 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -20,95 +20,94 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi);
void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
-void vp8_cmachine_specific_config(VP8_COMP *cpi)
-{
+void vp8_cmachine_specific_config(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
- cpi->rtcd.common = &cpi->common.rtcd;
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
-
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
- cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
- cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
- cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
-
- cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
- cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
- cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
- cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
- cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
-
- cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
- cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
- cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
- cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
- cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
+ cpi->rtcd.common = &cpi->common.rtcd;
+ cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
+ cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
+ cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
+ cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
+ cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
+
+ cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
+ cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
+ cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
+ cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
+ cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
+
+ cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
+ cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
+ cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
+ cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
+ cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
+
+ cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
+ cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
+ cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
+ cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
+ cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
#if ARCH_X86 || ARCH_X86_64
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
+ cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
#endif
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
-
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
-
- cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
- cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
-
- cpi->rtcd.encodemb.berr = vp8_block_error_c;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_c;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_c;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_c;
-
- cpi->rtcd.search.full_search = vp8_full_search_sad;
- cpi->rtcd.search.refining_search = vp8_refining_search_sad;
- cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
- cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
+ cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
+ cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
+ cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
+ cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
+ cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
+
+ cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
+ cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
+ cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
+ cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
+ cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
+ cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
+ cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
+ cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
+ cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
+
+ cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
+ cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
+
+ cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
+ cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
+
+ cpi->rtcd.encodemb.berr = vp8_block_error_c;
+ cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
+ cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;
+ cpi->rtcd.encodemb.subb = vp8_subtract_b_c;
+ cpi->rtcd.encodemb.submby = vp8_subtract_mby_c;
+ cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_c;
+
+ cpi->rtcd.search.full_search = vp8_full_search_sad;
+ cpi->rtcd.search.refining_search = vp8_refining_search_sad;
+ cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
+ cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
#if CONFIG_INTERNAL_STATS
- cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
- cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
+ cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
+ cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
#endif
#endif
- cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
- vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
+ cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
+ vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
#if ARCH_X86 || ARCH_X86_64
- vp8_arch_x86_encoder_init(cpi);
+ vp8_arch_x86_encoder_init(cpi);
#endif
#if ARCH_ARM
- vp8_arch_arm_encoder_init(cpi);
+ vp8_arch_arm_encoder_init(cpi);
#endif
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
}
diff --git a/vp8/encoder/lookahead.c b/vp8/encoder/lookahead.c
index b800d0136..ae21f9729 100644
--- a/vp8/encoder/lookahead.c
+++ b/vp8/encoder/lookahead.c
@@ -15,85 +15,78 @@
#define MAX_LAG_BUFFERS 25
-struct lookahead_ctx
-{
- unsigned int max_sz; /* Absolute size of the queue */
- unsigned int sz; /* Number of buffers currently in the queue */
- unsigned int read_idx; /* Read index */
- unsigned int write_idx; /* Write index */
- struct lookahead_entry *buf; /* Buffer list */
+struct lookahead_ctx {
+ unsigned int max_sz; /* Absolute size of the queue */
+ unsigned int sz; /* Number of buffers currently in the queue */
+ unsigned int read_idx; /* Read index */
+ unsigned int write_idx; /* Write index */
+ struct lookahead_entry *buf; /* Buffer list */
};
/* Return the buffer at the given absolute index and increment the index */
static struct lookahead_entry *
pop(struct lookahead_ctx *ctx,
- unsigned int *idx)
-{
- unsigned int index = *idx;
- struct lookahead_entry *buf = ctx->buf + index;
-
- assert(index < ctx->max_sz);
- if(++index >= ctx->max_sz)
- index -= ctx->max_sz;
- *idx = index;
- return buf;
+ unsigned int *idx) {
+ unsigned int index = *idx;
+ struct lookahead_entry *buf = ctx->buf + index;
+
+ assert(index < ctx->max_sz);
+ if (++index >= ctx->max_sz)
+ index -= ctx->max_sz;
+ *idx = index;
+ return buf;
}
void
-vp8_lookahead_destroy(struct lookahead_ctx *ctx)
-{
- if(ctx)
- {
- if(ctx->buf)
- {
- int i;
-
- for(i = 0; i < ctx->max_sz; i++)
- vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
- free(ctx->buf);
- }
- free(ctx);
+vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
+ if (ctx) {
+ if (ctx->buf) {
+ int i;
+
+ for (i = 0; i < ctx->max_sz; i++)
+ vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
+ free(ctx->buf);
}
+ free(ctx);
+ }
}
-struct lookahead_ctx*
+struct lookahead_ctx *
vp8_lookahead_init(unsigned int width,
unsigned int height,
- unsigned int depth)
-{
- struct lookahead_ctx *ctx = NULL;
- int i;
-
- /* Clamp the lookahead queue depth */
- if(depth < 1)
- depth = 1;
- else if(depth > MAX_LAG_BUFFERS)
- depth = MAX_LAG_BUFFERS;
-
- /* Align the buffer dimensions */
- width = (width + 15) & ~15;
- height = (height + 15) & ~15;
-
- /* Allocate the lookahead structures */
- ctx = calloc(1, sizeof(*ctx));
- if(ctx)
- {
- ctx->max_sz = depth;
- ctx->buf = calloc(depth, sizeof(*ctx->buf));
- if(!ctx->buf)
- goto bail;
- for(i=0; i<depth; i++)
- if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img,
- width, height, VP8BORDERINPIXELS))
- goto bail;
- }
- return ctx;
+ unsigned int depth) {
+ struct lookahead_ctx *ctx = NULL;
+ int i;
+
+ /* Clamp the lookahead queue depth */
+ if (depth < 1)
+ depth = 1;
+ else if (depth > MAX_LAG_BUFFERS)
+ depth = MAX_LAG_BUFFERS;
+
+ /* Align the buffer dimensions */
+ width = (width + 15) &~15;
+ height = (height + 15) &~15;
+
+ /* Allocate the lookahead structures */
+ ctx = calloc(1, sizeof(*ctx));
+ if (ctx) {
+ ctx->max_sz = depth;
+ ctx->buf = calloc(depth, sizeof(*ctx->buf));
+ if (!ctx->buf)
+ goto bail;
+ for (i = 0; i < depth; i++)
+ if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img,
+ width, height, VP8BORDERINPIXELS))
+ goto bail;
+ }
+ return ctx;
bail:
- vp8_lookahead_destroy(ctx);
- return NULL;
+ vp8_lookahead_destroy(ctx);
+ return NULL;
}
@@ -103,109 +96,96 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
int64_t ts_start,
int64_t ts_end,
unsigned int flags,
- unsigned char *active_map)
-{
- struct lookahead_entry* buf;
- int row, col, active_end;
- int mb_rows = (src->y_height + 15) >> 4;
- int mb_cols = (src->y_width + 15) >> 4;
-
- if(ctx->sz + 1 > ctx->max_sz)
- return 1;
- ctx->sz++;
- buf = pop(ctx, &ctx->write_idx);
-
- // Only do this partial copy if the following conditions are all met:
- // 1. Lookahead queue has has size of 1.
- // 2. Active map is provided.
- // 3. This is not a key frame, golden nor altref frame.
- if (ctx->max_sz == 1 && active_map && !flags)
- {
- for (row = 0; row < mb_rows; ++row)
- {
- col = 0;
-
- while (1)
- {
- // Find the first active macroblock in this row.
- for (; col < mb_cols; ++col)
- {
- if (active_map[col])
- break;
- }
-
- // No more active macroblock in this row.
- if (col == mb_cols)
- break;
-
- // Find the end of active region in this row.
- active_end = col;
-
- for (; active_end < mb_cols; ++active_end)
- {
- if (!active_map[active_end])
- break;
- }
-
- // Only copy this active region.
- vp8_copy_and_extend_frame_with_rect(src, &buf->img,
- row << 4,
- col << 4, 16,
- (active_end - col) << 4);
-
- // Start again from the end of this active region.
- col = active_end;
- }
-
- active_map += mb_cols;
+ unsigned char *active_map) {
+ struct lookahead_entry *buf;
+ int row, col, active_end;
+ int mb_rows = (src->y_height + 15) >> 4;
+ int mb_cols = (src->y_width + 15) >> 4;
+
+ if (ctx->sz + 1 > ctx->max_sz)
+ return 1;
+ ctx->sz++;
+ buf = pop(ctx, &ctx->write_idx);
+
+ // Only do this partial copy if the following conditions are all met:
+ // 1. Lookahead queue has has size of 1.
+ // 2. Active map is provided.
+ // 3. This is not a key frame, golden nor altref frame.
+ if (ctx->max_sz == 1 && active_map && !flags) {
+ for (row = 0; row < mb_rows; ++row) {
+ col = 0;
+
+ while (1) {
+ // Find the first active macroblock in this row.
+ for (; col < mb_cols; ++col) {
+ if (active_map[col])
+ break;
}
+
+ // No more active macroblock in this row.
+ if (col == mb_cols)
+ break;
+
+ // Find the end of active region in this row.
+ active_end = col;
+
+ for (; active_end < mb_cols; ++active_end) {
+ if (!active_map[active_end])
+ break;
+ }
+
+ // Only copy this active region.
+ vp8_copy_and_extend_frame_with_rect(src, &buf->img,
+ row << 4,
+ col << 4, 16,
+ (active_end - col) << 4);
+
+ // Start again from the end of this active region.
+ col = active_end;
+ }
+
+ active_map += mb_cols;
}
- else
- {
- vp8_copy_and_extend_frame(src, &buf->img);
- }
- buf->ts_start = ts_start;
- buf->ts_end = ts_end;
- buf->flags = flags;
- return 0;
+ } else {
+ vp8_copy_and_extend_frame(src, &buf->img);
+ }
+ buf->ts_start = ts_start;
+ buf->ts_end = ts_end;
+ buf->flags = flags;
+ return 0;
}
-struct lookahead_entry*
+struct lookahead_entry *
vp8_lookahead_pop(struct lookahead_ctx *ctx,
- int drain)
-{
- struct lookahead_entry* buf = NULL;
-
- if(ctx->sz && (drain || ctx->sz == ctx->max_sz))
- {
- buf = pop(ctx, &ctx->read_idx);
- ctx->sz--;
- }
- return buf;
+ int drain) {
+ struct lookahead_entry *buf = NULL;
+
+ if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) {
+ buf = pop(ctx, &ctx->read_idx);
+ ctx->sz--;
+ }
+ return buf;
}
-struct lookahead_entry*
+struct lookahead_entry *
vp8_lookahead_peek(struct lookahead_ctx *ctx,
- int index)
-{
- struct lookahead_entry* buf = NULL;
-
- assert(index < ctx->max_sz);
- if(index < ctx->sz)
- {
- index += ctx->read_idx;
- if(index >= ctx->max_sz)
- index -= ctx->max_sz;
- buf = ctx->buf + index;
- }
- return buf;
+ int index) {
+ struct lookahead_entry *buf = NULL;
+
+ assert(index < ctx->max_sz);
+ if (index < ctx->sz) {
+ index += ctx->read_idx;
+ if (index >= ctx->max_sz)
+ index -= ctx->max_sz;
+ buf = ctx->buf + index;
+ }
+ return buf;
}
unsigned int
-vp8_lookahead_depth(struct lookahead_ctx *ctx)
-{
- return ctx->sz;
+vp8_lookahead_depth(struct lookahead_ctx *ctx) {
+ return ctx->sz;
}
diff --git a/vp8/encoder/lookahead.h b/vp8/encoder/lookahead.h
index afb3fd4a9..3c7abf901 100644
--- a/vp8/encoder/lookahead.h
+++ b/vp8/encoder/lookahead.h
@@ -12,12 +12,11 @@
#include "vpx_scale/yv12config.h"
#include "vpx/vpx_integer.h"
-struct lookahead_entry
-{
- YV12_BUFFER_CONFIG img;
- int64_t ts_start;
- int64_t ts_end;
- unsigned int flags;
+struct lookahead_entry {
+ YV12_BUFFER_CONFIG img;
+ int64_t ts_start;
+ int64_t ts_end;
+ unsigned int flags;
};
@@ -30,10 +29,10 @@ struct lookahead_ctx;
*
*
*/
-struct lookahead_ctx* vp8_lookahead_init(unsigned int width,
+struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
unsigned int height,
unsigned int depth
- );
+ );
/**\brief Destroys the lookahead stage
@@ -77,7 +76,7 @@ vp8_lookahead_push(struct lookahead_ctx *ctx,
* \retval NULL, if drain not set and queue not of the configured depth
*
*/
-struct lookahead_entry*
+struct lookahead_entry *
vp8_lookahead_pop(struct lookahead_ctx *ctx,
int drain);
@@ -90,7 +89,7 @@ vp8_lookahead_pop(struct lookahead_ctx *ctx,
* \retval NULL, if no buffer exists at the specified index
*
*/
-struct lookahead_entry*
+struct lookahead_entry *
vp8_lookahead_peek(struct lookahead_ctx *ctx,
int index);
diff --git a/vp8/encoder/mbgraph.c b/vp8/encoder/mbgraph.c
index cad27500a..4cd3ea233 100644
--- a/vp8/encoder/mbgraph.c
+++ b/vp8/encoder/mbgraph.c
@@ -20,549 +20,509 @@
static unsigned int do_16x16_motion_iteration
(
- VP8_COMP *cpi,
- int_mv *ref_mv,
- int_mv *dst_mv
-)
-{
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
- BLOCK *b = &x->block[0];
- BLOCKD *d = &xd->block[0];
- vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
- unsigned int best_err;
- int step_param, further_steps;
- static int dummy_cost[2*mv_max+1];
- int *mvcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
- int *mvsadcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
+ VP8_COMP *cpi,
+ int_mv *ref_mv,
+ int_mv *dst_mv
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &xd->block[0];
+ vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+ unsigned int best_err;
+ int step_param, further_steps;
+ static int dummy_cost[2 * mv_max + 1];
+ int *mvcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
+ int *mvsadcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
#if CONFIG_HIGH_PRECISION_MV
- static int dummy_cost_hp[2*mv_max_hp+1];
- int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp+1], &dummy_cost_hp[mv_max_hp+1] };
- int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp+1], &dummy_cost_hp[mv_max_hp+1] };
+ static int dummy_cost_hp[2 * mv_max_hp + 1];
+ int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
+ int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
#endif
- int col_min = (ref_mv->as_mv.col>>3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.col & 7)?1:0);
- int row_min = (ref_mv->as_mv.row>>3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.row & 7)?1:0);
- int col_max = (ref_mv->as_mv.col>>3) + MAX_FULL_PEL_VAL;
- int row_max = (ref_mv->as_mv.row>>3) + MAX_FULL_PEL_VAL;
- int tmp_col_min = x->mv_col_min;
- int tmp_col_max = x->mv_col_max;
- int tmp_row_min = x->mv_row_min;
- int tmp_row_max = x->mv_row_max;
- int_mv ref_full;
-
- // Further step/diamond searches as necessary
- if (cpi->Speed < 8)
- {
- step_param = cpi->sf.first_step + ((cpi->Speed > 5) ? 1 : 0);
- further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
- }
- else
- {
- step_param = cpi->sf.first_step + 2;
- further_steps = 0;
- }
-
- /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
- if (x->mv_col_min < col_min )
- x->mv_col_min = col_min;
- if (x->mv_col_max > col_max )
- x->mv_col_max = col_max;
- if (x->mv_row_min < row_min )
- x->mv_row_min = row_min;
- if (x->mv_row_max > row_max )
- x->mv_row_max = row_max;
-
- ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
- ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
-
- /*cpi->sf.search_method == HEX*/
- best_err = vp8_hex_search(x, b, d,
- &ref_full, dst_mv,
- step_param,
- x->errorperbit,
- &v_fn_ptr,
+ int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.col & 7) ? 1 : 0);
+ int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.row & 7) ? 1 : 0);
+ int col_max = (ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+ int_mv ref_full;
+
+ // Further step/diamond searches as necessary
+ if (cpi->Speed < 8) {
+ step_param = cpi->sf.first_step + ((cpi->Speed > 5) ? 1 : 0);
+ further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
+ } else {
+ step_param = cpi->sf.first_step + 2;
+ further_steps = 0;
+ }
+
+ /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
+ if (x->mv_col_min < col_min)
+ x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max)
+ x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min)
+ x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max)
+ x->mv_row_max = row_max;
+
+ ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
+
+ /*cpi->sf.search_method == HEX*/
+ best_err = vp8_hex_search(x, b, d,
+ &ref_full, dst_mv,
+ step_param,
+ x->errorperbit,
+ &v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
- xd->allow_high_precision_mv?mvsadcost_hp:mvsadcost, xd->allow_high_precision_mv?mvcost_hp:mvcost,
+ xd->allow_high_precision_mv ? mvsadcost_hp : mvsadcost, xd->allow_high_precision_mv ? mvcost_hp : mvcost,
#else
- mvsadcost, mvcost,
+ mvsadcost, mvcost,
#endif
- ref_mv);
-
- // Try sub-pixel MC
- //if (bestsme > error_thresh && bestsme < INT_MAX)
- {
- int distortion;
- unsigned int sse;
- best_err = cpi->find_fractional_mv_step(x, b, d,
- dst_mv, ref_mv,
- x->errorperbit, &v_fn_ptr,
+ ref_mv);
+
+ // Try sub-pixel MC
+ // if (bestsme > error_thresh && bestsme < INT_MAX)
+ {
+ int distortion;
+ unsigned int sse;
+ best_err = cpi->find_fractional_mv_step(x, b, d,
+ dst_mv, ref_mv,
+ x->errorperbit, &v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
- xd->allow_high_precision_mv?mvcost_hp:mvcost,
+ xd->allow_high_precision_mv ? mvcost_hp : mvcost,
#else
- mvcost,
+ mvcost,
#endif
- &distortion, &sse);
- }
+ & distortion, &sse);
+ }
#if CONFIG_PRED_FILTER
- // Disable the prediction filter
- xd->mode_info_context->mbmi.pred_filter_enabled = 0;
+ // Disable the prediction filter
+ xd->mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
- vp8_set_mbmode_and_mvs(x, NEWMV, dst_mv);
- vp8_build_inter16x16_predictors_mby(xd);
- //VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- best_err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (xd->dst.y_buffer, xd->dst.y_stride,
- xd->predictor, 16, INT_MAX);
+ vp8_set_mbmode_and_mvs(x, NEWMV, dst_mv);
+ vp8_build_inter16x16_predictors_mby(xd);
+ // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
+ best_err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
+ (xd->dst.y_buffer, xd->dst.y_stride,
+ xd->predictor, 16, INT_MAX);
- /* restore UMV window */
- x->mv_col_min = tmp_col_min;
- x->mv_col_max = tmp_col_max;
- x->mv_row_min = tmp_row_min;
- x->mv_row_max = tmp_row_max;
+ /* restore UMV window */
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
- return best_err;
+ return best_err;
}
static int do_16x16_motion_search
(
- VP8_COMP *cpi,
- int_mv *ref_mv,
- int_mv *dst_mv,
- YV12_BUFFER_CONFIG *buf,
- int buf_mb_y_offset,
- YV12_BUFFER_CONFIG *ref,
- int mb_y_offset
-)
-{
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
- unsigned int err, tmp_err;
- int_mv tmp_mv;
- int n;
-
- for (n = 0; n < 16; n++) {
- BLOCKD *d = &xd->block[n];
- BLOCK *b = &x->block[n];
-
- b->base_src = &buf->y_buffer;
- b->src_stride = buf->y_stride;
- b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
-
- d->base_pre = &ref->y_buffer;
- d->pre_stride = ref->y_stride;
- d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
- }
-
- // Try zero MV first
- // FIXME should really use something like near/nearest MV and/or MV prediction
- xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
- xd->pre.y_stride = ref->y_stride;
- //VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (ref->y_buffer + mb_y_offset,
- ref->y_stride, xd->dst.y_buffer,
- xd->dst.y_stride, INT_MAX);
- dst_mv->as_int = 0;
-
- // Test last reference frame using the previous best mv as the
- // starting point (best reference) for the search
- tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv);
- if (tmp_err < err)
- {
- err = tmp_err;
- dst_mv->as_int = tmp_mv.as_int;
- }
-
- // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
- if (ref_mv->as_int)
- {
- int tmp_err;
- int_mv zero_ref_mv, tmp_mv;
-
- zero_ref_mv.as_int = 0;
- tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv);
- if (tmp_err < err)
- {
- dst_mv->as_int = tmp_mv.as_int;
- err = tmp_err;
- }
+ VP8_COMP *cpi,
+ int_mv *ref_mv,
+ int_mv *dst_mv,
+ YV12_BUFFER_CONFIG *buf,
+ int buf_mb_y_offset,
+ YV12_BUFFER_CONFIG *ref,
+ int mb_y_offset
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned int err, tmp_err;
+ int_mv tmp_mv;
+ int n;
+
+ for (n = 0; n < 16; n++) {
+ BLOCKD *d = &xd->block[n];
+ BLOCK *b = &x->block[n];
+
+ b->base_src = &buf->y_buffer;
+ b->src_stride = buf->y_stride;
+ b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
+
+ d->base_pre = &ref->y_buffer;
+ d->pre_stride = ref->y_stride;
+ d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
+ }
+
+ // Try zero MV first
+ // FIXME should really use something like near/nearest MV and/or MV prediction
+ xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
+ xd->pre.y_stride = ref->y_stride;
+ // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
+ err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
+ (ref->y_buffer + mb_y_offset,
+ ref->y_stride, xd->dst.y_buffer,
+ xd->dst.y_stride, INT_MAX);
+ dst_mv->as_int = 0;
+
+ // Test last reference frame using the previous best mv as the
+ // starting point (best reference) for the search
+ tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv);
+ if (tmp_err < err) {
+ err = tmp_err;
+ dst_mv->as_int = tmp_mv.as_int;
+ }
+
+ // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ if (ref_mv->as_int) {
+ int tmp_err;
+ int_mv zero_ref_mv, tmp_mv;
+
+ zero_ref_mv.as_int = 0;
+ tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv);
+ if (tmp_err < err) {
+ dst_mv->as_int = tmp_mv.as_int;
+ err = tmp_err;
}
+ }
- return err;
+ return err;
}
static int do_16x16_zerozero_search
(
- VP8_COMP *cpi,
- int_mv *dst_mv,
- YV12_BUFFER_CONFIG *buf,
- int buf_mb_y_offset,
- YV12_BUFFER_CONFIG *ref,
- int mb_y_offset
-)
-{
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
- unsigned int err;
- int n;
-
- for (n = 0; n < 16; n++) {
- BLOCKD *d = &xd->block[n];
- BLOCK *b = &x->block[n];
-
- b->base_src = &buf->y_buffer;
- b->src_stride = buf->y_stride;
- b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
-
- d->base_pre = &ref->y_buffer;
- d->pre_stride = ref->y_stride;
- d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
- }
-
- // Try zero MV first
- // FIXME should really use something like near/nearest MV and/or MV prediction
- xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
- xd->pre.y_stride = ref->y_stride;
- //VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (ref->y_buffer + mb_y_offset,
- ref->y_stride, xd->dst.y_buffer,
- xd->dst.y_stride, INT_MAX);
-
- dst_mv->as_int = 0;
-
- return err;
+ VP8_COMP *cpi,
+ int_mv *dst_mv,
+ YV12_BUFFER_CONFIG *buf,
+ int buf_mb_y_offset,
+ YV12_BUFFER_CONFIG *ref,
+ int mb_y_offset
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned int err;
+ int n;
+
+ for (n = 0; n < 16; n++) {
+ BLOCKD *d = &xd->block[n];
+ BLOCK *b = &x->block[n];
+
+ b->base_src = &buf->y_buffer;
+ b->src_stride = buf->y_stride;
+ b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
+
+ d->base_pre = &ref->y_buffer;
+ d->pre_stride = ref->y_stride;
+ d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
+ }
+
+ // Try zero MV first
+ // FIXME should really use something like near/nearest MV and/or MV prediction
+ xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
+ xd->pre.y_stride = ref->y_stride;
+ // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
+ err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
+ (ref->y_buffer + mb_y_offset,
+ ref->y_stride, xd->dst.y_buffer,
+ xd->dst.y_stride, INT_MAX);
+
+ dst_mv->as_int = 0;
+
+ return err;
}
static int find_best_16x16_intra
(
- VP8_COMP *cpi,
- YV12_BUFFER_CONFIG *buf,
- int mb_y_offset,
- MB_PREDICTION_MODE *pbest_mode
-)
-{
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
- MB_PREDICTION_MODE best_mode = -1, mode;
- int best_err = INT_MAX;
-
- // calculate SATD for each intra prediction mode;
- // we're intentionally not doing 4x4, we just want a rough estimate
- for (mode = DC_PRED; mode <= TM_PRED; mode++)
- {
- unsigned int err;
-
- xd->mode_info_context->mbmi.mode = mode;
- RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mby)(xd);
- //VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (xd->predictor, 16,
- buf->y_buffer + mb_y_offset,
- buf->y_stride, best_err);
- // find best
- if (err < best_err)
- {
- best_err = err;
- best_mode = mode;
- }
+ VP8_COMP *cpi,
+ YV12_BUFFER_CONFIG *buf,
+ int mb_y_offset,
+ MB_PREDICTION_MODE *pbest_mode
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_PREDICTION_MODE best_mode = -1, mode;
+ int best_err = INT_MAX;
+
+ // calculate SATD for each intra prediction mode;
+ // we're intentionally not doing 4x4, we just want a rough estimate
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ unsigned int err;
+
+ xd->mode_info_context->mbmi.mode = mode;
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mby)(xd);
+ // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
+ err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
+ (xd->predictor, 16,
+ buf->y_buffer + mb_y_offset,
+ buf->y_stride, best_err);
+ // find best
+ if (err < best_err) {
+ best_err = err;
+ best_mode = mode;
}
+ }
- if (pbest_mode)
- *pbest_mode = best_mode;
+ if (pbest_mode)
+ *pbest_mode = best_mode;
- return best_err;
+ return best_err;
}
static void update_mbgraph_mb_stats
(
- VP8_COMP *cpi,
- MBGRAPH_MB_STATS *stats,
- YV12_BUFFER_CONFIG *buf,
- int mb_y_offset,
- YV12_BUFFER_CONFIG *golden_ref,
- int_mv *prev_golden_ref_mv,
- int gld_y_offset,
- YV12_BUFFER_CONFIG *alt_ref,
- int_mv *prev_alt_ref_mv,
- int arf_y_offset
-)
-{
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
- int intra_error;
-
- // FIXME in practice we're completely ignoring chroma here
- xd->dst.y_buffer = buf->y_buffer + mb_y_offset;
-
- // do intra 16x16 prediction
- intra_error = find_best_16x16_intra(cpi, buf, mb_y_offset, &stats->ref[INTRA_FRAME].m.mode);
- if (intra_error <= 0)
- intra_error = 1;
- stats->ref[INTRA_FRAME].err = intra_error;
-
- // Golden frame MV search, if it exists and is different than last frame
- if (golden_ref)
- {
- int g_motion_error = do_16x16_motion_search(cpi, prev_golden_ref_mv,
- &stats->ref[GOLDEN_FRAME].m.mv,
- buf, mb_y_offset,
- golden_ref, gld_y_offset);
- stats->ref[GOLDEN_FRAME].err = g_motion_error;
- }
- else
- {
- stats->ref[GOLDEN_FRAME].err = INT_MAX;
- stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
- }
-
- // Alt-ref frame MV search, if it exists and is different than last/golden frame
- if (alt_ref)
- {
- //int a_motion_error = do_16x16_motion_search(cpi, prev_alt_ref_mv,
- // &stats->ref[ALTREF_FRAME].m.mv,
- // buf, mb_y_offset,
- // alt_ref, arf_y_offset);
-
- int a_motion_error =
- do_16x16_zerozero_search( cpi,
- &stats->ref[ALTREF_FRAME].m.mv,
- buf, mb_y_offset,
- alt_ref, arf_y_offset);
-
- stats->ref[ALTREF_FRAME].err = a_motion_error;
- }
- else
- {
- stats->ref[ALTREF_FRAME].err = INT_MAX;
- stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
- }
+ VP8_COMP *cpi,
+ MBGRAPH_MB_STATS *stats,
+ YV12_BUFFER_CONFIG *buf,
+ int mb_y_offset,
+ YV12_BUFFER_CONFIG *golden_ref,
+ int_mv *prev_golden_ref_mv,
+ int gld_y_offset,
+ YV12_BUFFER_CONFIG *alt_ref,
+ int_mv *prev_alt_ref_mv,
+ int arf_y_offset
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int intra_error;
+
+ // FIXME in practice we're completely ignoring chroma here
+ xd->dst.y_buffer = buf->y_buffer + mb_y_offset;
+
+ // do intra 16x16 prediction
+ intra_error = find_best_16x16_intra(cpi, buf, mb_y_offset, &stats->ref[INTRA_FRAME].m.mode);
+ if (intra_error <= 0)
+ intra_error = 1;
+ stats->ref[INTRA_FRAME].err = intra_error;
+
+ // Golden frame MV search, if it exists and is different than last frame
+ if (golden_ref) {
+ int g_motion_error = do_16x16_motion_search(cpi, prev_golden_ref_mv,
+ &stats->ref[GOLDEN_FRAME].m.mv,
+ buf, mb_y_offset,
+ golden_ref, gld_y_offset);
+ stats->ref[GOLDEN_FRAME].err = g_motion_error;
+ } else {
+ stats->ref[GOLDEN_FRAME].err = INT_MAX;
+ stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
+ }
+
+ // Alt-ref frame MV search, if it exists and is different than last/golden frame
+ if (alt_ref) {
+ // int a_motion_error = do_16x16_motion_search(cpi, prev_alt_ref_mv,
+ // &stats->ref[ALTREF_FRAME].m.mv,
+ // buf, mb_y_offset,
+ // alt_ref, arf_y_offset);
+
+ int a_motion_error =
+ do_16x16_zerozero_search(cpi,
+ &stats->ref[ALTREF_FRAME].m.mv,
+ buf, mb_y_offset,
+ alt_ref, arf_y_offset);
+
+ stats->ref[ALTREF_FRAME].err = a_motion_error;
+ } else {
+ stats->ref[ALTREF_FRAME].err = INT_MAX;
+ stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
+ }
}
static void update_mbgraph_frame_stats
(
- VP8_COMP *cpi,
- MBGRAPH_FRAME_STATS *stats,
- YV12_BUFFER_CONFIG *buf,
- YV12_BUFFER_CONFIG *golden_ref,
- YV12_BUFFER_CONFIG *alt_ref
-)
-{
- MACROBLOCK * const x = &cpi->mb;
- VP8_COMMON * const cm = &cpi->common;
- MACROBLOCKD * const xd = &x->e_mbd;
- int mb_col, mb_row, offset = 0;
- int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
- int_mv arf_top_mv, gld_top_mv;
- MODE_INFO mi_local;
+ VP8_COMP *cpi,
+ MBGRAPH_FRAME_STATS *stats,
+ YV12_BUFFER_CONFIG *buf,
+ YV12_BUFFER_CONFIG *golden_ref,
+ YV12_BUFFER_CONFIG *alt_ref
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int mb_col, mb_row, offset = 0;
+ int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
+ int_mv arf_top_mv, gld_top_mv;
+ MODE_INFO mi_local;
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ arf_top_mv.as_int = 0;
+ gld_top_mv.as_int = 0;
+ x->mv_row_min = -(VP8BORDERINPIXELS - 16 - INTERP_EXTEND);
+ x->mv_row_max = (cm->mb_rows - 1) * 16 + VP8BORDERINPIXELS - 16 - INTERP_EXTEND;
+ xd->up_available = 0;
+ xd->dst.y_stride = buf->y_stride;
+ xd->pre.y_stride = buf->y_stride;
+ xd->dst.uv_stride = buf->uv_stride;
+ xd->mode_info_context = &mi_local;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ int_mv arf_left_mv, gld_left_mv;
+ int mb_y_in_offset = mb_y_offset;
+ int arf_y_in_offset = arf_y_offset;
+ int gld_y_in_offset = gld_y_offset;
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
- arf_top_mv.as_int = 0;
- gld_top_mv.as_int = 0;
- x->mv_row_min = -(VP8BORDERINPIXELS - 16 - INTERP_EXTEND);
- x->mv_row_max = (cm->mb_rows - 1) * 16 + VP8BORDERINPIXELS - 16 - INTERP_EXTEND;
- xd->up_available = 0;
- xd->dst.y_stride = buf->y_stride;
- xd->pre.y_stride = buf->y_stride;
- xd->dst.uv_stride = buf->uv_stride;
- xd->mode_info_context = &mi_local;
-
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
- {
- int_mv arf_left_mv, gld_left_mv;
- int mb_y_in_offset = mb_y_offset;
- int arf_y_in_offset = arf_y_offset;
- int gld_y_in_offset = gld_y_offset;
-
- // Set up limit values for motion vectors to prevent them extending outside the UMV borders
- arf_left_mv.as_int = arf_top_mv.as_int;
- gld_left_mv.as_int = gld_top_mv.as_int;
- x->mv_col_min = -(VP8BORDERINPIXELS - 16 - INTERP_EXTEND);
- x->mv_col_max = (cm->mb_cols - 1) * 16 + VP8BORDERINPIXELS - 16 - INTERP_EXTEND;
- xd->left_available = 0;
-
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
- MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
-
- update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
- golden_ref, &gld_left_mv, gld_y_in_offset,
- alt_ref, &arf_left_mv, arf_y_in_offset);
- arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
- gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
- if (mb_col == 0)
- {
- arf_top_mv.as_int = arf_left_mv.as_int;
- gld_top_mv.as_int = gld_left_mv.as_int;
- }
- xd->left_available = 1;
- mb_y_in_offset += 16;
- gld_y_in_offset += 16;
- arf_y_in_offset += 16;
- x->mv_col_min -= 16;
- x->mv_col_max -= 16;
- }
- xd->up_available = 1;
- mb_y_offset += buf->y_stride * 16;
- gld_y_offset += golden_ref->y_stride * 16;
- if (alt_ref)
- arf_y_offset += alt_ref->y_stride * 16;
- x->mv_row_min -= 16;
- x->mv_row_max -= 16;
- offset += cm->mb_cols;
+ arf_left_mv.as_int = arf_top_mv.as_int;
+ gld_left_mv.as_int = gld_top_mv.as_int;
+ x->mv_col_min = -(VP8BORDERINPIXELS - 16 - INTERP_EXTEND);
+ x->mv_col_max = (cm->mb_cols - 1) * 16 + VP8BORDERINPIXELS - 16 - INTERP_EXTEND;
+ xd->left_available = 0;
+
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
+
+ update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
+ golden_ref, &gld_left_mv, gld_y_in_offset,
+ alt_ref, &arf_left_mv, arf_y_in_offset);
+ arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
+ gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
+ if (mb_col == 0) {
+ arf_top_mv.as_int = arf_left_mv.as_int;
+ gld_top_mv.as_int = gld_left_mv.as_int;
+ }
+ xd->left_available = 1;
+ mb_y_in_offset += 16;
+ gld_y_in_offset += 16;
+ arf_y_in_offset += 16;
+ x->mv_col_min -= 16;
+ x->mv_col_max -= 16;
}
+ xd->up_available = 1;
+ mb_y_offset += buf->y_stride * 16;
+ gld_y_offset += golden_ref->y_stride * 16;
+ if (alt_ref)
+ arf_y_offset += alt_ref->y_stride * 16;
+ x->mv_row_min -= 16;
+ x->mv_row_max -= 16;
+ offset += cm->mb_cols;
+ }
}
// Test for small magnitude (<= 1 pel mvs)
-int small_mv( MV mv )
-{
- if ( (abs( (int)mv.col ) > 2) || (abs( (int)mv.row ) > 2) )
- return FALSE;
- else
- return TRUE;
+int small_mv(MV mv) {
+ if ((abs((int)mv.col) > 2) || (abs((int)mv.row) > 2))
+ return FALSE;
+ else
+ return TRUE;
}
-//void separate_arf_mbs_byzz
+// void separate_arf_mbs_byzz
void separate_arf_mbs
(
- VP8_COMP *cpi
-)
-{
- VP8_COMMON * const cm = &cpi->common;
- int mb_col, mb_row, offset, i;
- int ncnt[4];
- int n_frames = cpi->mbgraph_n_frames;
-
- int * arf_not_zz;
-
- CHECK_MEM_ERROR(arf_not_zz,
- vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
-
- vpx_memset(arf_not_zz, 0, sizeof(arf_not_zz));
-
- // We are not interested in results beyond the alt ref itself.
- if ( n_frames > cpi->frames_till_gf_update_due )
- n_frames = cpi->frames_till_gf_update_due;
-
- // defer cost to reference frames
- for (i = n_frames - 1; i >= 0; i--)
- {
- MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
-
- for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
- offset += cm->mb_cols, mb_row++)
- {
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
- MBGRAPH_MB_STATS *mb_stats =
- &frame_stats->mb_stats[offset + mb_col];
-
- int altref_err = mb_stats->ref[ALTREF_FRAME].err;
- int intra_err = mb_stats->ref[INTRA_FRAME ].err;
- int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
-
- // Test for altref vs intra and gf and that its mv was 0,0.
- if ( (altref_err > 1000) ||
- (altref_err > intra_err) ||
- (altref_err > golden_err) )
- {
- arf_not_zz[offset + mb_col]++;
- }
- }
- }
- }
+ VP8_COMP *cpi
+) {
+ VP8_COMMON *const cm = &cpi->common;
+ int mb_col, mb_row, offset, i;
+ int ncnt[4];
+ int n_frames = cpi->mbgraph_n_frames;
+
+ int *arf_not_zz;
+
+ CHECK_MEM_ERROR(arf_not_zz,
+ vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
+
+ vpx_memset(arf_not_zz, 0, sizeof(arf_not_zz));
+
+ // We are not interested in results beyond the alt ref itself.
+ if (n_frames > cpi->frames_till_gf_update_due)
+ n_frames = cpi->frames_till_gf_update_due;
+
+ // defer cost to reference frames
+ for (i = n_frames - 1; i >= 0; i--) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
- vpx_memset(ncnt, 0, sizeof(ncnt));
for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
- offset += cm->mb_cols, mb_row++)
- {
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
- // If any of the blocks in the sequence failed then the MB
- // goes in segment 0
- if ( arf_not_zz[offset + mb_col] )
- {
- ncnt[0]++;
- cpi->segmentation_map[offset + mb_col] = 0;
- }
- else
- {
- ncnt[1]++;
- cpi->segmentation_map[offset + mb_col] = 1;
- }
+ offset += cm->mb_cols, mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ MBGRAPH_MB_STATS *mb_stats =
+ &frame_stats->mb_stats[offset + mb_col];
+
+ int altref_err = mb_stats->ref[ALTREF_FRAME].err;
+ int intra_err = mb_stats->ref[INTRA_FRAME ].err;
+ int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
+
+ // Test for altref vs intra and gf and that its mv was 0,0.
+ if ((altref_err > 1000) ||
+ (altref_err > intra_err) ||
+ (altref_err > golden_err)) {
+ arf_not_zz[offset + mb_col]++;
}
+ }
}
-
- // Only bother with segmentation if over 10% of the MBs in static segment
- //if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
- if ( 1 )
- {
- // Note % of blocks that are marked as static
- if ( cm->MBs )
- cpi->static_mb_pct = (ncnt[1] * 100) / cm->MBs;
-
- // This error case should not be reachable as this function should
- // never be called with the common data structure unititialized.
- else
- cpi->static_mb_pct = 0;
-
- cpi->seg0_cnt = ncnt[0];
- vp8_enable_segmentation((VP8_PTR) cpi);
+ }
+
+ vpx_memset(ncnt, 0, sizeof(ncnt));
+ for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
+ offset += cm->mb_cols, mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ // If any of the blocks in the sequence failed then the MB
+ // goes in segment 0
+ if (arf_not_zz[offset + mb_col]) {
+ ncnt[0]++;
+ cpi->segmentation_map[offset + mb_col] = 0;
+ } else {
+ ncnt[1]++;
+ cpi->segmentation_map[offset + mb_col] = 1;
+ }
}
+ }
+
+ // Only bother with segmentation if over 10% of the MBs in static segment
+ // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
+ if (1) {
+ // Note % of blocks that are marked as static
+ if (cm->MBs)
+ cpi->static_mb_pct = (ncnt[1] * 100) / cm->MBs;
+
+ // This error case should not be reachable as this function should
+ // never be called with the common data structure unititialized.
else
- {
- cpi->static_mb_pct = 0;
- vp8_disable_segmentation((VP8_PTR) cpi);
- }
+ cpi->static_mb_pct = 0;
+
+ cpi->seg0_cnt = ncnt[0];
+ vp8_enable_segmentation((VP8_PTR) cpi);
+ } else {
+ cpi->static_mb_pct = 0;
+ vp8_disable_segmentation((VP8_PTR) cpi);
+ }
- // Free localy allocated storage
- vpx_free(arf_not_zz);
+ // Free localy allocated storage
+ vpx_free(arf_not_zz);
}
void vp8_update_mbgraph_stats
(
- VP8_COMP *cpi
-)
-{
- VP8_COMMON * const cm = &cpi->common;
- int i, n_frames = vp8_lookahead_depth(cpi->lookahead);
- YV12_BUFFER_CONFIG *golden_ref = &cm->yv12_fb[cm->gld_fb_idx];
-
- // we need to look ahead beyond where the ARF transitions into
- // being a GF - so exit if we don't look ahead beyond that
- if (n_frames <= cpi->frames_till_gf_update_due)
- return;
- if( n_frames > cpi->common.frames_till_alt_ref_frame)
- n_frames = cpi->common.frames_till_alt_ref_frame;
- if (n_frames > MAX_LAG_BUFFERS)
- n_frames = MAX_LAG_BUFFERS;
-
- cpi->mbgraph_n_frames = n_frames;
- for (i = 0; i < n_frames; i++)
- {
- MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
- vpx_memset(frame_stats->mb_stats, 0,
- cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
- }
-
- // do motion search to find contribution of each reference to data
- // later on in this GF group
- // FIXME really, the GF/last MC search should be done forward, and
- // the ARF MC search backwards, to get optimal results for MV caching
- for (i = 0; i < n_frames; i++)
- {
- MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
- struct lookahead_entry *q_cur =
- vp8_lookahead_peek(cpi->lookahead, i);
-
- assert(q_cur != NULL);
-
- update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
- golden_ref, cpi->Source);
- }
-
- vp8_clear_system_state(); //__asm emms;
-
- separate_arf_mbs(cpi);
+ VP8_COMP *cpi
+) {
+ VP8_COMMON *const cm = &cpi->common;
+ int i, n_frames = vp8_lookahead_depth(cpi->lookahead);
+ YV12_BUFFER_CONFIG *golden_ref = &cm->yv12_fb[cm->gld_fb_idx];
+
+ // we need to look ahead beyond where the ARF transitions into
+ // being a GF - so exit if we don't look ahead beyond that
+ if (n_frames <= cpi->frames_till_gf_update_due)
+ return;
+ if (n_frames > cpi->common.frames_till_alt_ref_frame)
+ n_frames = cpi->common.frames_till_alt_ref_frame;
+ if (n_frames > MAX_LAG_BUFFERS)
+ n_frames = MAX_LAG_BUFFERS;
+
+ cpi->mbgraph_n_frames = n_frames;
+ for (i = 0; i < n_frames; i++) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+ vpx_memset(frame_stats->mb_stats, 0,
+ cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
+ }
+
+ // do motion search to find contribution of each reference to data
+ // later on in this GF group
+ // FIXME really, the GF/last MC search should be done forward, and
+ // the ARF MC search backwards, to get optimal results for MV caching
+ for (i = 0; i < n_frames; i++) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+ struct lookahead_entry *q_cur =
+ vp8_lookahead_peek(cpi->lookahead, i);
+
+ assert(q_cur != NULL);
+
+ update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
+ golden_ref, cpi->Source);
+ }
+
+ vp8_clear_system_state(); // __asm emms;
+
+ separate_arf_mbs(cpi);
}
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 698528d95..0b08ed3a9 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -23,178 +23,169 @@ static int mv_mode_cts [4] [2];
#endif
#if CONFIG_HIGH_PRECISION_MV
-int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp)
-{
- // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
- // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
- // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
- // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp==0)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp==0)]) * Weight) >> 7;
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp) {
+ // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
+ // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
+ // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
+ // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)]) * Weight) >> 7;
}
#else
-int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
-{
- // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
- // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
- // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
- // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
+ // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
+ // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
+ // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
+ // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
}
#endif
#if CONFIG_HIGH_PRECISION_MV
-static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit, int ishp)
-{
- // Ignore costing if mvcost is NULL
- if (mvcost)
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp==0)] +
- mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp==0)])
- * error_per_bit + 128) >> 8;
- return 0;
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit, int ishp) {
+ // Ignore costing if mvcost is NULL
+ if (mvcost)
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
+ mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)])
+ * error_per_bit + 128) >> 8;
+ return 0;
}
#else
-static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
-{
- // Ignore costing if mvcost is NULL
- if (mvcost)
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
- mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
- * error_per_bit + 128) >> 8;
- return 0;
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit) {
+ // Ignore costing if mvcost is NULL
+ if (mvcost)
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
+ mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
+ * error_per_bit + 128) >> 8;
+ return 0;
}
#endif
-static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
-{
- // Calculate sad error cost on full pixel basis.
- // Ignore costing if mvcost is NULL
- if (mvsadcost)
- return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
- mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
- * error_per_bit + 128) >> 8;
- return 0;
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit) {
+ // Calculate sad error cost on full pixel basis.
+ // Ignore costing if mvcost is NULL
+ if (mvsadcost)
+ return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
+ mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
+ * error_per_bit + 128) >> 8;
+ return 0;
}
-void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
-{
- int Len;
- int search_site_count = 0;
+void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
+ int Len;
+ int search_site_count = 0;
- // Generate offsets for 4 search sites per step.
- Len = MAX_FIRST_STEP;
+ // Generate offsets for 4 search sites per step.
+ Len = MAX_FIRST_STEP;
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ while (Len > 0) {
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = 0;
- x->ss[search_site_count].offset = 0;
+ x->ss[search_site_count].offset = -Len;
search_site_count++;
- while (Len > 0)
- {
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = 0;
- x->ss[search_site_count].mv.row = -Len;
- x->ss[search_site_count].offset = -Len * stride;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = 0;
- x->ss[search_site_count].mv.row = Len;
- x->ss[search_site_count].offset = Len * stride;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = -Len;
- x->ss[search_site_count].mv.row = 0;
- x->ss[search_site_count].offset = -Len;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = Len;
- x->ss[search_site_count].mv.row = 0;
- x->ss[search_site_count].offset = Len;
- search_site_count++;
-
- // Contract.
- Len /= 2;
- }
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = Len;
+ search_site_count++;
- x->ss_count = search_site_count;
- x->searches_per_step = 4;
+ // Contract.
+ Len /= 2;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 4;
}
-void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
-{
- int Len;
- int search_site_count = 0;
+void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
+ int Len;
+ int search_site_count = 0;
+
+ // Generate offsets for 8 search sites per step.
+ Len = MAX_FIRST_STEP;
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ while (Len > 0) {
- // Generate offsets for 8 search sites per step.
- Len = MAX_FIRST_STEP;
+ // Compute offsets for search sites.
x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -Len;
x->ss[search_site_count].mv.row = 0;
- x->ss[search_site_count].offset = 0;
+ x->ss[search_site_count].offset = -Len;
search_site_count++;
- while (Len > 0)
- {
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = 0;
- x->ss[search_site_count].mv.row = -Len;
- x->ss[search_site_count].offset = -Len * stride;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = 0;
- x->ss[search_site_count].mv.row = Len;
- x->ss[search_site_count].offset = Len * stride;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = -Len;
- x->ss[search_site_count].mv.row = 0;
- x->ss[search_site_count].offset = -Len;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = Len;
- x->ss[search_site_count].mv.row = 0;
- x->ss[search_site_count].offset = Len;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = -Len;
- x->ss[search_site_count].mv.row = -Len;
- x->ss[search_site_count].offset = -Len * stride - Len;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = Len;
- x->ss[search_site_count].mv.row = -Len;
- x->ss[search_site_count].offset = -Len * stride + Len;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = -Len;
- x->ss[search_site_count].mv.row = Len;
- x->ss[search_site_count].offset = Len * stride - Len;
- search_site_count++;
-
- // Compute offsets for search sites.
- x->ss[search_site_count].mv.col = Len;
- x->ss[search_site_count].mv.row = Len;
- x->ss[search_site_count].offset = Len * stride + Len;
- search_site_count++;
-
-
- // Contract.
- Len /= 2;
- }
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = Len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -Len;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride - Len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = -Len;
+ x->ss[search_site_count].offset = -Len * stride + Len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -Len;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride - Len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = Len;
+ x->ss[search_site_count].mv.row = Len;
+ x->ss[search_site_count].offset = Len * stride + Len;
+ search_site_count++;
- x->ss_count = search_site_count;
- x->searches_per_step = 8;
+
+ // Contract.
+ Len /= 2;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 8;
}
/*
@@ -240,217 +231,209 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
int *mvcost[2], int *distortion,
- unsigned int *sse1)
-{
- unsigned char *z = (*(b->base_src) + b->src);
- MACROBLOCKD *xd = &x->e_mbd;
-
- int rr, rc, br, bc, hstep;
- int tr, tc;
- unsigned int besterr = INT_MAX;
- unsigned int left, right, up, down, diag;
- unsigned int sse;
- unsigned int whichdir;
- unsigned int halfiters = 4;
- unsigned int quarteriters = 4;
+ unsigned int *sse1) {
+ unsigned char *z = (*(b->base_src) + b->src);
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int rr, rc, br, bc, hstep;
+ int tr, tc;
+ unsigned int besterr = INT_MAX;
+ unsigned int left, right, up, down, diag;
+ unsigned int sse;
+ unsigned int whichdir;
+ unsigned int halfiters = 4;
+ unsigned int quarteriters = 4;
#if CONFIG_HIGH_PRECISION_MV
- unsigned int eighthiters = 4;
+ unsigned int eighthiters = 4;
#endif
- int thismse;
- int maxc, minc, maxr, minr;
- int y_stride;
- int offset;
+ int thismse;
+ int maxc, minc, maxr, minr;
+ int y_stride;
+ int offset;
#if ARCH_X86 || ARCH_X86_64
- unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- unsigned char *y;
- int buf_r1, buf_r2, buf_c1, buf_c2;
-
- // Clamping to avoid out-of-range data access
- buf_r1 = ((bestmv->as_mv.row - INTERP_EXTEND) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):INTERP_EXTEND;
- buf_r2 = ((bestmv->as_mv.row + INTERP_EXTEND) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):INTERP_EXTEND;
- buf_c1 = ((bestmv->as_mv.col - INTERP_EXTEND) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):INTERP_EXTEND;
- buf_c2 = ((bestmv->as_mv.col + INTERP_EXTEND) > x->mv_col_max)?(x->mv_col_max - bestmv->as_mv.col):INTERP_EXTEND;
- y_stride = 32;
-
- /* Copy to intermediate buffer before searching. */
- vfp->copymem(y0 - buf_c1 - d->pre_stride*buf_r1, d->pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
- y = xd->y_buf + y_stride*buf_r1 +buf_c1;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
+ int buf_r1, buf_r2, buf_c1, buf_c2;
+
+ // Clamping to avoid out-of-range data access
+ buf_r1 = ((bestmv->as_mv.row - INTERP_EXTEND) < x->mv_row_min) ? (bestmv->as_mv.row - x->mv_row_min) : INTERP_EXTEND;
+ buf_r2 = ((bestmv->as_mv.row + INTERP_EXTEND) > x->mv_row_max) ? (x->mv_row_max - bestmv->as_mv.row) : INTERP_EXTEND;
+ buf_c1 = ((bestmv->as_mv.col - INTERP_EXTEND) < x->mv_col_min) ? (bestmv->as_mv.col - x->mv_col_min) : INTERP_EXTEND;
+ buf_c2 = ((bestmv->as_mv.col + INTERP_EXTEND) > x->mv_col_max) ? (x->mv_col_max - bestmv->as_mv.col) : INTERP_EXTEND;
+ y_stride = 32;
+
+ /* Copy to intermediate buffer before searching. */
+ vfp->copymem(y0 - buf_c1 - d->pre_stride * buf_r1, d->pre_stride, xd->y_buf, y_stride, 16 + buf_r1 + buf_r2);
+ y = xd->y_buf + y_stride * buf_r1 + buf_c1;
#else
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- y_stride = d->pre_stride;
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
#endif
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- rr = ref_mv->as_mv.row; rc = ref_mv->as_mv.col;
- br = bestmv->as_mv.row << 3; bc = bestmv->as_mv.col << 3;
- hstep = 4;
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
- }
- else
+ if (xd->allow_high_precision_mv) {
+ rr = ref_mv->as_mv.row;
+ rc = ref_mv->as_mv.col;
+ br = bestmv->as_mv.row << 3;
+ bc = bestmv->as_mv.col << 3;
+ hstep = 4;
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
+ } else
+#endif
+ {
+ rr = ref_mv->as_mv.row >> 1;
+ rc = ref_mv->as_mv.col >> 1;
+ br = bestmv->as_mv.row << 2;
+ bc = bestmv->as_mv.col << 2;
+ hstep = 2;
+ minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
+ maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
+ minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
+ maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
+ }
+
+ tr = br;
+ tc = bc;
+
+
+ offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = besterr;
+#if CONFIG_HIGH_PRECISION_MV
+ besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+#else
+ besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
#endif
- {
- rr = ref_mv->as_mv.row >> 1; rc = ref_mv->as_mv.col >> 1;
- br = bestmv->as_mv.row << 2; bc = bestmv->as_mv.col << 2;
- hstep = 2;
- minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
- maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
- minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
- maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
- }
- tr = br;
- tc = bc;
+ // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
+ while (--halfiters) {
+ // 1/2 pel
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
- offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
- // central mv
- bestmv->as_mv.row <<= 3;
- bestmv->as_mv.col <<= 3;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
- // calculate central point error
- besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
- *distortion = besterr;
-#if CONFIG_HIGH_PRECISION_MV
- besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-#else
- besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
-#endif
+ tr = br;
+ tc = bc;
+ }
- // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
- while (--halfiters)
- {
- // 1/2 pel
- CHECK_BETTER(left, tr, tc - hstep);
- CHECK_BETTER(right, tr, tc + hstep);
- CHECK_BETTER(up, tr - hstep, tc);
- CHECK_BETTER(down, tr + hstep, tc);
+ // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
+ // 1/4 pel
+ hstep >>= 1;
+ while (--quarteriters) {
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
- switch (whichdir)
- {
- case 0:
- CHECK_BETTER(diag, tr - hstep, tc - hstep);
- break;
- case 1:
- CHECK_BETTER(diag, tr - hstep, tc + hstep);
- break;
- case 2:
- CHECK_BETTER(diag, tr + hstep, tc - hstep);
- break;
- case 3:
- CHECK_BETTER(diag, tr + hstep, tc + hstep);
- break;
- }
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
- // no reason to check the same one again.
- if (tr == br && tc == bc)
- break;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
- tr = br;
- tc = bc;
- }
+ tr = br;
+ tc = bc;
+ }
- // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
- // 1/4 pel
+#if CONFIG_HIGH_PRECISION_MV
+ if (x->e_mbd.allow_high_precision_mv) {
hstep >>= 1;
- while (--quarteriters)
- {
- CHECK_BETTER(left, tr, tc - hstep);
- CHECK_BETTER(right, tr, tc + hstep);
- CHECK_BETTER(up, tr - hstep, tc);
- CHECK_BETTER(down, tr + hstep, tc);
+ while (--eighthiters) {
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
- switch (whichdir)
- {
+ switch (whichdir) {
case 0:
- CHECK_BETTER(diag, tr - hstep, tc - hstep);
- break;
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
case 1:
- CHECK_BETTER(diag, tr - hstep, tc + hstep);
- break;
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
case 2:
- CHECK_BETTER(diag, tr + hstep, tc - hstep);
- break;
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
case 3:
- CHECK_BETTER(diag, tr + hstep, tc + hstep);
- break;
- }
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
- // no reason to check the same one again.
- if (tr == br && tc == bc)
- break;
-
- tr = br;
- tc = bc;
- }
-
-#if CONFIG_HIGH_PRECISION_MV
- if (x->e_mbd.allow_high_precision_mv)
- {
- hstep >>= 1;
- while (--eighthiters)
- {
- CHECK_BETTER(left, tr, tc - hstep);
- CHECK_BETTER(right, tr, tc + hstep);
- CHECK_BETTER(up, tr - hstep, tc);
- CHECK_BETTER(down, tr + hstep, tc);
-
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
-
- switch (whichdir)
- {
- case 0:
- CHECK_BETTER(diag, tr - hstep, tc - hstep);
- break;
- case 1:
- CHECK_BETTER(diag, tr - hstep, tc + hstep);
- break;
- case 2:
- CHECK_BETTER(diag, tr + hstep, tc - hstep);
- break;
- case 3:
- CHECK_BETTER(diag, tr + hstep, tc + hstep);
- break;
- }
-
- // no reason to check the same one again.
- if (tr == br && tc == bc)
- break;
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
- tr = br;
- tc = bc;
- }
+ tr = br;
+ tc = bc;
}
+ }
#endif
#if CONFIG_HIGH_PRECISION_MV
- if (x->e_mbd.allow_high_precision_mv)
- {
- bestmv->as_mv.row = br;
- bestmv->as_mv.col = bc;
- }
- else
+ if (x->e_mbd.allow_high_precision_mv) {
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+ } else
#endif /* CONFIG_HIGH_PRECISION_MV */
- {
- bestmv->as_mv.row = br << 1;
- bestmv->as_mv.col = bc << 1;
- }
+ {
+ bestmv->as_mv.row = br << 1;
+ bestmv->as_mv.col = bc << 1;
+ }
- if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) ||
- (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3)))
- return INT_MAX;
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
- return besterr;
+ return besterr;
}
#undef MVC
#undef PRE
@@ -479,559 +462,494 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
int *mvcost[2], int *distortion,
- unsigned int *sse1)
-{
- int bestmse = INT_MAX;
- int_mv startmv;
- int_mv this_mv;
+ unsigned int *sse1) {
+ int bestmse = INT_MAX;
+ int_mv startmv;
+ int_mv this_mv;
#if CONFIG_HIGH_PRECISION_MV
- int_mv orig_mv;
- int yrow_movedback=0, ycol_movedback=0;
+ int_mv orig_mv;
+ int yrow_movedback = 0, ycol_movedback = 0;
#endif
- unsigned char *z = (*(b->base_src) + b->src);
- int left, right, up, down, diag;
- unsigned int sse;
- int whichdir ;
- int thismse;
- int y_stride;
- MACROBLOCKD *xd = &x->e_mbd;
+ unsigned char *z = (*(b->base_src) + b->src);
+ int left, right, up, down, diag;
+ unsigned int sse;
+ int whichdir;
+ int thismse;
+ int y_stride;
+ MACROBLOCKD *xd = &x->e_mbd;
#if ARCH_X86 || ARCH_X86_64
- unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- unsigned char *y;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
- y_stride = 32;
- /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
- y = xd->y_buf + y_stride + 1;
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
#else
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- y_stride = d->pre_stride;
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
#endif
- // central mv
- bestmv->as_mv.row <<= 3;
- bestmv->as_mv.col <<= 3;
- startmv = *bestmv;
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+ startmv = *bestmv;
#if CONFIG_HIGH_PRECISION_MV
- orig_mv = *bestmv;
+ orig_mv = *bestmv;
#endif
- // calculate central point error
- bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
- *distortion = bestmse;
+ // calculate central point error
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = bestmse;
#if CONFIG_HIGH_PRECISION_MV
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
#endif
- // go left then right and check error
- this_mv.as_mv.row = startmv.as_mv.row;
- this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (left < bestmse)
- {
- *bestmv = this_mv;
- bestmse = left;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
+ this_mv.as_mv.col += 8;
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (right < bestmse)
- {
- *bestmv = this_mv;
- bestmse = right;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- // go up then down and check error
- this_mv.as_mv.col = startmv.as_mv.col;
- this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (up < bestmse)
- {
- *bestmv = this_mv;
- bestmse = up;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
+ this_mv.as_mv.row += 8;
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (down < bestmse)
- {
- *bestmv = this_mv;
- bestmse = down;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- // now check 1 more diagonal
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
- //for(whichdir =0;whichdir<4;whichdir++)
- //{
- this_mv = startmv;
+ // now check 1 more diagonal
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ // for(whichdir =0;whichdir<4;whichdir++)
+ // {
+ this_mv = startmv;
- switch (whichdir)
- {
+ switch (whichdir) {
case 0:
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
- break;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
+ break;
case 1:
- this_mv.as_mv.col += 4;
- this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
- break;
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
+ break;
case 2:
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
- break;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
+ break;
case 3:
default:
- this_mv.as_mv.col += 4;
- this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
- break;
- }
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
+ break;
+ }
#if CONFIG_HIGH_PRECISION_MV
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
// }
- // time to check quarter pels.
- if (bestmv->as_mv.row < startmv.as_mv.row)
- {
- y -= y_stride;
+ // time to check quarter pels.
+ if (bestmv->as_mv.row < startmv.as_mv.row) {
+ y -= y_stride;
#if CONFIG_HIGH_PRECISION_MV
- yrow_movedback = 1;
+ yrow_movedback = 1;
#endif
- }
+ }
- if (bestmv->as_mv.col < startmv.as_mv.col)
- {
- y--;
+ if (bestmv->as_mv.col < startmv.as_mv.col) {
+ y--;
#if CONFIG_HIGH_PRECISION_MV
- ycol_movedback = 1;
+ ycol_movedback = 1;
#endif
- }
+ }
- startmv = *bestmv;
+ startmv = *bestmv;
- // go left then right and check error
- this_mv.as_mv.row = startmv.as_mv.row;
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col = startmv.as_mv.col - 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col = startmv.as_mv.col - 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ }
#if CONFIG_HIGH_PRECISION_MV
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (left < bestmse)
- {
- *bestmv = this_mv;
- bestmse = left;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.col += 4;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ this_mv.as_mv.col += 4;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (right < bestmse)
- {
- *bestmv = this_mv;
- bestmse = right;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- // go up then down and check error
- this_mv.as_mv.col = startmv.as_mv.col;
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
- if (startmv.as_mv.row & 7)
- {
- this_mv.as_mv.row = startmv.as_mv.row - 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
- }
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row = startmv.as_mv.row - 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
+ }
#if CONFIG_HIGH_PRECISION_MV
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (up < bestmse)
- {
- *bestmv = this_mv;
- bestmse = up;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.row += 4;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (down < bestmse)
- {
- *bestmv = this_mv;
- bestmse = down;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- // now check 1 more diagonal
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ // now check 1 more diagonal
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
// for(whichdir=0;whichdir<4;whichdir++)
// {
- this_mv = startmv;
+ this_mv = startmv;
- switch (whichdir)
- {
+ switch (whichdir) {
case 0:
- if (startmv.as_mv.row & 7)
- {
- this_mv.as_mv.row -= 2;
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 2;
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);;
- }
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);;
}
- else
- {
- this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
-
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - y_stride - 1, y_stride, SP(6), SP(6), z, b->src_stride, &sse);
- }
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - y_stride - 1, y_stride, SP(6), SP(6), z, b->src_stride, &sse);
}
+ }
- break;
+ break;
case 1:
- this_mv.as_mv.col += 2;
+ this_mv.as_mv.col += 2;
- if (startmv.as_mv.row & 7)
- {
- this_mv.as_mv.row -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
- }
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
+ }
- break;
+ break;
case 2:
- this_mv.as_mv.row += 2;
+ this_mv.as_mv.row += 2;
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ }
- break;
+ break;
case 3:
- this_mv.as_mv.col += 2;
- this_mv.as_mv.row += 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- break;
- }
+ this_mv.as_mv.col += 2;
+ this_mv.as_mv.row += 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ break;
+ }
#if CONFIG_HIGH_PRECISION_MV
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
#if CONFIG_HIGH_PRECISION_MV
- if (!x->e_mbd.allow_high_precision_mv)
- return bestmse;
-
- /* Now do 1/8th pixel */
- if (bestmv->as_mv.row < orig_mv.as_mv.row && !yrow_movedback)
- {
- y -= y_stride;
- yrow_movedback = 1;
- }
-
- if (bestmv->as_mv.col < orig_mv.as_mv.col && !ycol_movedback)
- {
- y--;
- ycol_movedback = 1;
- }
-
- startmv = *bestmv;
+ if (!x->e_mbd.allow_high_precision_mv)
+ return bestmse;
- // go left then right and check error
- this_mv.as_mv.row = startmv.as_mv.row;
+ /* Now do 1/8th pixel */
+ if (bestmv->as_mv.row < orig_mv.as_mv.row && !yrow_movedback) {
+ y -= y_stride;
+ yrow_movedback = 1;
+ }
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col = startmv.as_mv.col - 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
+ if (bestmv->as_mv.col < orig_mv.as_mv.col && !ycol_movedback) {
+ y--;
+ ycol_movedback = 1;
+ }
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ startmv = *bestmv;
- if (left < bestmse)
- {
- *bestmv = this_mv;
- bestmse = left;
- *distortion = thismse;
- *sse1 = sse;
- }
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
- this_mv.as_mv.col += 2;
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col = startmv.as_mv.col - 1;
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-
- if (right < bestmse)
- {
- *bestmv = this_mv;
- bestmse = right;
- *distortion = thismse;
- *sse1 = sse;
- }
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ }
+
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row = startmv.as_mv.row - 1;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
+ }
- // go up then down and check error
- this_mv.as_mv.col = startmv.as_mv.col;
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
- if (startmv.as_mv.row & 7)
- {
- this_mv.as_mv.row = startmv.as_mv.row - 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
- }
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ this_mv.as_mv.row += 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
- if (up < bestmse)
- {
- *bestmv = this_mv;
- bestmse = up;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.row += 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
-
- if (down < bestmse)
- {
- *bestmv = this_mv;
- bestmse = down;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- // now check 1 more diagonal
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ // now check 1 more diagonal
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
// for(whichdir=0;whichdir<4;whichdir++)
// {
- this_mv = startmv;
+ this_mv = startmv;
- switch (whichdir)
- {
+ switch (whichdir) {
case 0:
- if (startmv.as_mv.row & 7)
- {
- this_mv.as_mv.row -= 1;
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 1;
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col -= 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);;
- }
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 1;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);;
}
- else
- {
- this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
-
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col -= 1;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - y_stride - 1, y_stride, SP(7), SP(7), z, b->src_stride, &sse);
- }
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 1;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - y_stride - 1, y_stride, SP(7), SP(7), z, b->src_stride, &sse);
}
+ }
- break;
+ break;
case 1:
- this_mv.as_mv.col += 1;
+ this_mv.as_mv.col += 1;
- if (startmv.as_mv.row & 7)
- {
- this_mv.as_mv.row -= 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
- }
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 1;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
+ }
- break;
+ break;
case 2:
- this_mv.as_mv.row += 1;
+ this_mv.as_mv.row += 1;
- if (startmv.as_mv.col & 7)
- {
- this_mv.as_mv.col -= 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
- else
- {
- this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- }
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 1;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ }
- break;
+ break;
case 3:
- this_mv.as_mv.col += 1;
- this_mv.as_mv.row += 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- break;
- }
+ this_mv.as_mv.col += 1;
+ this_mv.as_mv.row += 1;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ break;
+ }
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
#endif /* CONFIG_HIGH_PRECISION_MV */
- return bestmse;
+ return bestmse;
}
#undef SP
@@ -1041,369 +959,343 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
int *mvcost[2], int *distortion,
- unsigned int *sse1)
-{
- int bestmse = INT_MAX;
- int_mv startmv;
- int_mv this_mv;
- unsigned char *z = (*(b->base_src) + b->src);
- int left, right, up, down, diag;
- unsigned int sse;
- int whichdir ;
- int thismse;
- int y_stride;
- MACROBLOCKD *xd = &x->e_mbd;
+ unsigned int *sse1) {
+ int bestmse = INT_MAX;
+ int_mv startmv;
+ int_mv this_mv;
+ unsigned char *z = (*(b->base_src) + b->src);
+ int left, right, up, down, diag;
+ unsigned int sse;
+ int whichdir;
+ int thismse;
+ int y_stride;
+ MACROBLOCKD *xd = &x->e_mbd;
#if ARCH_X86 || ARCH_X86_64
- unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- unsigned char *y;
+ unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ unsigned char *y;
- y_stride = 32;
- /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
- y = xd->y_buf + y_stride + 1;
+ y_stride = 32;
+ /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
+ vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
+ y = xd->y_buf + y_stride + 1;
#else
- unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
- y_stride = d->pre_stride;
+ unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
+ y_stride = d->pre_stride;
#endif
- // central mv
- bestmv->as_mv.row <<= 3;
- bestmv->as_mv.col <<= 3;
- startmv = *bestmv;
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+ startmv = *bestmv;
- // calculate central point error
- bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
- *distortion = bestmse;
+ // calculate central point error
+ bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ *distortion = bestmse;
#if CONFIG_HIGH_PRECISION_MV
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
+ bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
#endif
- // go left then right and check error
- this_mv.as_mv.row = startmv.as_mv.row;
- this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (left < bestmse)
- {
- *bestmv = this_mv;
- bestmse = left;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
+ this_mv.as_mv.col += 8;
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (right < bestmse)
- {
- *bestmv = this_mv;
- bestmse = right;
- *distortion = thismse;
- *sse1 = sse;
- }
-
- // go up then down and check error
- this_mv.as_mv.col = startmv.as_mv.col;
- this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (up < bestmse)
- {
- *bestmv = this_mv;
- bestmse = up;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
+ this_mv.as_mv.row += 8;
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
#if CONFIG_HIGH_PRECISION_MV
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (down < bestmse)
- {
- *bestmv = this_mv;
- bestmse = down;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- // now check 1 more diagonal -
- whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
- this_mv = startmv;
+ // now check 1 more diagonal -
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ this_mv = startmv;
- switch (whichdir)
- {
+ switch (whichdir) {
case 0:
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
- break;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
+ break;
case 1:
- this_mv.as_mv.col += 4;
- this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
- break;
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
+ break;
case 2:
- this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
- this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
- break;
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
+ break;
case 3:
default:
- this_mv.as_mv.col += 4;
- this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
- break;
- }
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
+ break;
+ }
#if CONFIG_HIGH_PRECISION_MV
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
#else
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
#endif
- if (diag < bestmse)
- {
- *bestmv = this_mv;
- bestmse = diag;
- *distortion = thismse;
- *sse1 = sse;
- }
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
- return bestmse;
+ return bestmse;
}
#define CHECK_BOUNDS(range) \
-{\
+ {\
all_in = 1;\
all_in &= ((br-range) >= x->mv_row_min);\
all_in &= ((br+range) <= x->mv_row_max);\
all_in &= ((bc-range) >= x->mv_col_min);\
all_in &= ((bc+range) <= x->mv_col_max);\
-}
+ }
#define CHECK_POINT \
-{\
+ {\
if (this_mv.as_mv.col < x->mv_col_min) continue;\
if (this_mv.as_mv.col > x->mv_col_max) continue;\
if (this_mv.as_mv.row < x->mv_row_min) continue;\
if (this_mv.as_mv.row > x->mv_row_max) continue;\
-}
+ }
#define CHECK_BETTER \
-{\
+ {\
if (thissad < bestsad)\
{\
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
- if (thissad < bestsad)\
- {\
- bestsad = thissad;\
- best_site = i;\
- }\
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
+ if (thissad < bestsad)\
+ {\
+ bestsad = thissad;\
+ best_site = i;\
+ }\
}\
-}
-
-static const MV next_chkpts[6][3] =
-{
- {{ -2, 0}, { -1, -2}, {1, -2}},
- {{ -1, -2}, {1, -2}, {2, 0}},
- {{1, -2}, {2, 0}, {1, 2}},
- {{2, 0}, {1, 2}, { -1, 2}},
- {{1, 2}, { -1, 2}, { -2, 0}},
- {{ -1, 2}, { -2, 0}, { -1, -2}}
+ }
+
+static const MV next_chkpts[6][3] = {
+ {{ -2, 0}, { -1, -2}, {1, -2}},
+ {{ -1, -2}, {1, -2}, {2, 0}},
+ {{1, -2}, {2, 0}, {1, 2}},
+ {{2, 0}, {1, 2}, { -1, 2}},
+ {{1, 2}, { -1, 2}, { -2, 0}},
+ {{ -1, 2}, { -2, 0}, { -1, -2}}
};
int vp8_hex_search
(
- MACROBLOCK *x,
- BLOCK *b,
- BLOCKD *d,
- int_mv *ref_mv,
- int_mv *best_mv,
- int search_param,
- int sad_per_bit,
- const vp8_variance_fn_ptr_t *vfp,
- int *mvsadcost[2],
- int *mvcost[2],
- int_mv *center_mv
-)
-{
- MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
- MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ;
- int i, j;
-
- unsigned char *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
- int in_what_stride = d->pre_stride;
- int br, bc;
- int_mv this_mv;
- unsigned int bestsad = 0x7fffffff;
- unsigned int thissad;
- unsigned char *base_offset;
- unsigned char *this_offset;
- int k = -1;
- int all_in;
- int best_site = -1;
-
- int_mv fcenter_mv;
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- // adjust ref_mv to make sure it is within MV range
- vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
- br = ref_mv->as_mv.row;
- bc = ref_mv->as_mv.col;
-
- // Work out the start point for the search
- base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
- this_offset = base_offset + (br * (d->pre_stride)) + bc;
- this_mv.as_mv.row = br;
- this_mv.as_mv.col = bc;
- bestsad = vfp->sdf( what, what_stride, this_offset,
- in_what_stride, 0x7fffffff)
+ MACROBLOCK *x,
+ BLOCK *b,
+ BLOCKD *d,
+ int_mv *ref_mv,
+ int_mv *best_mv,
+ int search_param,
+ int sad_per_bit,
+ const vp8_variance_fn_ptr_t *vfp,
+ int *mvsadcost[2],
+ int *mvcost[2],
+ int_mv *center_mv
+) {
+ MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} };
+ MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}};
+ int i, j;
+
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ int in_what_stride = d->pre_stride;
+ int br, bc;
+ int_mv this_mv;
+ unsigned int bestsad = 0x7fffffff;
+ unsigned int thissad;
+ unsigned char *base_offset;
+ unsigned char *this_offset;
+ int k = -1;
+ int all_in;
+ int best_site = -1;
+
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // adjust ref_mv to make sure it is within MV range
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ br = ref_mv->as_mv.row;
+ bc = ref_mv->as_mv.col;
+
+ // Work out the start point for the search
+ base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
+ this_offset = base_offset + (br * (d->pre_stride)) + bc;
+ this_mv.as_mv.row = br;
+ this_mv.as_mv.col = bc;
+ bestsad = vfp->sdf(what, what_stride, this_offset,
+ in_what_stride, 0x7fffffff)
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // hex search
- //j=0
+ // hex search
+ // j=0
+ CHECK_BOUNDS(2)
+
+ if (all_in) {
+ for (i = 0; i < 6; i++) {
+ this_mv.as_mv.row = br + hex[i].row;
+ this_mv.as_mv.col = bc + hex[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 6; i++) {
+ this_mv.as_mv.row = br + hex[i].row;
+ this_mv.as_mv.col = bc + hex[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site == -1)
+ goto cal_neighbors;
+ else {
+ br += hex[best_site].row;
+ bc += hex[best_site].col;
+ k = best_site;
+ }
+
+ for (j = 1; j < 127; j++) {
+ best_site = -1;
CHECK_BOUNDS(2)
- if(all_in)
- {
- for (i = 0; i < 6; i++)
- {
- this_mv.as_mv.row = br + hex[i].row;
- this_mv.as_mv.col = bc + hex[i].col;
- this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
- thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
- CHECK_BETTER
- }
- }else
- {
- for (i = 0; i < 6; i++)
- {
- this_mv.as_mv.row = br + hex[i].row;
- this_mv.as_mv.col = bc + hex[i].col;
- CHECK_POINT
- this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
- thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
- CHECK_BETTER
- }
+ if (all_in) {
+ for (i = 0; i < 3; i++) {
+ this_mv.as_mv.row = br + next_chkpts[k][i].row;
+ this_mv.as_mv.col = bc + next_chkpts[k][i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 3; i++) {
+ this_mv.as_mv.row = br + next_chkpts[k][i].row;
+ this_mv.as_mv.col = bc + next_chkpts[k][i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
}
if (best_site == -1)
- goto cal_neighbors;
- else
- {
- br += hex[best_site].row;
- bc += hex[best_site].col;
- k = best_site;
+ break;
+ else {
+ br += next_chkpts[k][best_site].row;
+ bc += next_chkpts[k][best_site].col;
+ k += 5 + best_site;
+ if (k >= 12) k -= 12;
+ else if (k >= 6) k -= 6;
}
+ }
- for (j = 1; j < 127; j++)
- {
- best_site = -1;
- CHECK_BOUNDS(2)
-
- if(all_in)
- {
- for (i = 0; i < 3; i++)
- {
- this_mv.as_mv.row = br + next_chkpts[k][i].row;
- this_mv.as_mv.col = bc + next_chkpts[k][i].col;
- this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
- thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
- CHECK_BETTER
- }
- }else
- {
- for (i = 0; i < 3; i++)
- {
- this_mv.as_mv.row = br + next_chkpts[k][i].row;
- this_mv.as_mv.col = bc + next_chkpts[k][i].col;
- CHECK_POINT
- this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
- thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
- CHECK_BETTER
- }
- }
-
- if (best_site == -1)
- break;
- else
- {
- br += next_chkpts[k][best_site].row;
- bc += next_chkpts[k][best_site].col;
- k += 5 + best_site;
- if (k >= 12) k -= 12;
- else if (k >= 6) k -= 6;
- }
- }
-
- // check 4 1-away neighbors
+ // check 4 1-away neighbors
cal_neighbors:
- for (j = 0; j < 32; j++)
- {
- best_site = -1;
- CHECK_BOUNDS(1)
-
- if(all_in)
- {
- for (i = 0; i < 4; i++)
- {
- this_mv.as_mv.row = br + neighbors[i].row;
- this_mv.as_mv.col = bc + neighbors[i].col;
- this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
- thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
- CHECK_BETTER
- }
- }else
- {
- for (i = 0; i < 4; i++)
- {
- this_mv.as_mv.row = br + neighbors[i].row;
- this_mv.as_mv.col = bc + neighbors[i].col;
- CHECK_POINT
- this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
- thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
- CHECK_BETTER
- }
- }
+ for (j = 0; j < 32; j++) {
+ best_site = -1;
+ CHECK_BOUNDS(1)
+
+ if (all_in) {
+ for (i = 0; i < 4; i++) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ }
- if (best_site == -1)
- break;
- else
- {
- br += neighbors[best_site].row;
- bc += neighbors[best_site].col;
- }
+ if (best_site == -1)
+ break;
+ else {
+ br += neighbors[best_site].row;
+ bc += neighbors[best_site].col;
}
+ }
- best_mv->as_mv.row = br;
- best_mv->as_mv.col = bc;
+ best_mv->as_mv.row = br;
+ best_mv->as_mv.col = bc;
- return bestsad;
+ return bestsad;
}
#undef CHECK_BOUNDS
#undef CHECK_POINT
@@ -1411,1040 +1303,948 @@ cal_neighbors:
int vp8_diamond_search_sad
(
- MACROBLOCK *x,
- BLOCK *b,
- BLOCKD *d,
- int_mv *ref_mv,
- int_mv *best_mv,
- int search_param,
- int sad_per_bit,
- int *num00,
- vp8_variance_fn_ptr_t *fn_ptr,
- int *mvcost[2],
- int_mv *center_mv
-)
-{
- int i, j, step;
-
- unsigned char *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
- unsigned char *in_what;
- int in_what_stride = d->pre_stride;
- unsigned char *best_address;
-
- int tot_steps;
- int_mv this_mv;
-
- int bestsad = INT_MAX;
- int best_site = 0;
- int last_site = 0;
-
- int ref_row;
- int ref_col;
- int this_row_offset;
- int this_col_offset;
- search_site *ss;
-
- unsigned char *check_here;
- int thissad;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ MACROBLOCK *x,
+ BLOCK *b,
+ BLOCKD *d,
+ int_mv *ref_mv,
+ int_mv *best_mv,
+ int search_param,
+ int sad_per_bit,
+ int *num00,
+ vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2],
+ int_mv *center_mv
+) {
+ int i, j, step;
+
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int in_what_stride = d->pre_stride;
+ unsigned char *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ int bestsad = INT_MAX;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row;
+ int ref_col;
+ int this_row_offset;
+ int this_col_offset;
+ search_site *ss;
+
+ unsigned char *check_here;
+ int thissad;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
- ref_row = ref_mv->as_mv.row;
- ref_col = ref_mv->as_mv.col;
- *num00 = 0;
- best_mv->as_mv.row = ref_row;
- best_mv->as_mv.col = ref_col;
-
- // Work out the start point for the search
- in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
- best_address = in_what;
-
- // Check the starting position
- bestsad = fn_ptr->sdf(what, what_stride, in_what,
- in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
-
- // search_param determines the length of the initial step and hence the number of iterations
- // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
- ss = &x->ss[search_param * x->searches_per_step];
- tot_steps = (x->ss_count / x->searches_per_step) - search_param;
-
- i = 1;
-
- for (step = 0; step < tot_steps ; step++)
- {
- for (j = 0 ; j < x->searches_per_step ; j++)
- {
- // Trap illegal vectors
- this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
- this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
-
- if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
- (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
-
- {
- check_here = ss[i].offset + best_address;
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.row = this_row_offset;
- this_mv.as_mv.col = this_col_offset;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_site = i;
- }
- }
- }
-
- i++;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Work out the start point for the search
+ in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
+ best_address = in_what;
+
+ // Check the starting position
+ bestsad = fn_ptr->sdf(what, what_stride, in_what,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ // search_param determines the length of the initial step and hence the number of iterations
+ // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; step++) {
+ for (j = 0; j < x->searches_per_step; j++) {
+ // Trap illegal vectors
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
+
+ {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
}
+ }
- if (best_site != last_site)
- {
- best_mv->as_mv.row += ss[best_site].mv.row;
- best_mv->as_mv.col += ss[best_site].mv.col;
- best_address += ss[best_site].offset;
- last_site = best_site;
- }
- else if (best_address == in_what)
- (*num00)++;
+ i++;
}
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+ } else if (best_address == in_what)
+ (*num00)++;
+ }
- if (bestsad == INT_MAX)
- return INT_MAX;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
}
int vp8_diamond_search_sadx4
(
- MACROBLOCK *x,
- BLOCK *b,
- BLOCKD *d,
- int_mv *ref_mv,
- int_mv *best_mv,
- int search_param,
- int sad_per_bit,
- int *num00,
- vp8_variance_fn_ptr_t *fn_ptr,
- int *mvcost[2],
- int_mv *center_mv
-)
-{
- int i, j, step;
-
- unsigned char *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
- unsigned char *in_what;
- int in_what_stride = d->pre_stride;
- unsigned char *best_address;
-
- int tot_steps;
- int_mv this_mv;
-
- int bestsad = INT_MAX;
- int best_site = 0;
- int last_site = 0;
-
- int ref_row;
- int ref_col;
- int this_row_offset;
- int this_col_offset;
- search_site *ss;
-
- unsigned char *check_here;
- unsigned int thissad;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ MACROBLOCK *x,
+ BLOCK *b,
+ BLOCKD *d,
+ int_mv *ref_mv,
+ int_mv *best_mv,
+ int search_param,
+ int sad_per_bit,
+ int *num00,
+ vp8_variance_fn_ptr_t *fn_ptr,
+ int *mvcost[2],
+ int_mv *center_mv
+) {
+ int i, j, step;
+
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int in_what_stride = d->pre_stride;
+ unsigned char *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ int bestsad = INT_MAX;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row;
+ int ref_col;
+ int this_row_offset;
+ int this_col_offset;
+ search_site *ss;
+
+ unsigned char *check_here;
+ unsigned int thissad;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
- ref_row = ref_mv->as_mv.row;
- ref_col = ref_mv->as_mv.col;
- *num00 = 0;
- best_mv->as_mv.row = ref_row;
- best_mv->as_mv.col = ref_col;
-
- // Work out the start point for the search
- in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
- best_address = in_what;
-
- // Check the starting position
- bestsad = fn_ptr->sdf(what, what_stride,
- in_what, in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
-
- // search_param determines the length of the initial step and hence the number of iterations
- // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
- ss = &x->ss[search_param * x->searches_per_step];
- tot_steps = (x->ss_count / x->searches_per_step) - search_param;
-
- i = 1;
-
- for (step = 0; step < tot_steps ; step++)
- {
- int all_in = 1, t;
-
- // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
- // checking 4 bounds for each points.
- all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
- all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
- all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
- all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
-
- if (all_in)
- {
- unsigned int sad_array[4];
-
- for (j = 0 ; j < x->searches_per_step ; j += 4)
- {
- unsigned char *block_offset[4];
-
- for (t = 0; t < 4; t++)
- block_offset[t] = ss[i+t].offset + best_address;
-
- fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
-
- for (t = 0; t < 4; t++, i++)
- {
- if (sad_array[t] < bestsad)
- {
- this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
- this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
- sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (sad_array[t] < bestsad)
- {
- bestsad = sad_array[t];
- best_site = i;
- }
- }
- }
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Work out the start point for the search
+ in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
+ best_address = in_what;
+
+ // Check the starting position
+ bestsad = fn_ptr->sdf(what, what_stride,
+ in_what, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ // search_param determines the length of the initial step and hence the number of iterations
+ // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; step++) {
+ int all_in = 1, t;
+
+ // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
+ // checking 4 bounds for each points.
+ all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min);
+ all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max);
+ all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min);
+ all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+
+ for (j = 0; j < x->searches_per_step; j += 4) {
+ unsigned char *block_offset[4];
+
+ for (t = 0; t < 4; t++)
+ block_offset[t] = ss[i + t].offset + best_address;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
+
+ for (t = 0; t < 4; t++, i++) {
+ if (sad_array[t] < bestsad) {
+ this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
+ this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
+ sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (sad_array[t] < bestsad) {
+ bestsad = sad_array[t];
+ best_site = i;
}
+ }
}
- else
- {
- for (j = 0 ; j < x->searches_per_step ; j++)
- {
- // Trap illegal vectors
- this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
- this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
-
- if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
- (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
- {
- check_here = ss[i].offset + best_address;
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.row = this_row_offset;
- this_mv.as_mv.col = this_col_offset;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_site = i;
- }
- }
- }
- i++;
+ }
+ } else {
+ for (j = 0; j < x->searches_per_step; j++) {
+ // Trap illegal vectors
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
}
+ }
}
-
- if (best_site != last_site)
- {
- best_mv->as_mv.row += ss[best_site].mv.row;
- best_mv->as_mv.col += ss[best_site].mv.col;
- best_address += ss[best_site].offset;
- last_site = best_site;
- }
- else if (best_address == in_what)
- (*num00)++;
+ i++;
+ }
}
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+ } else if (best_address == in_what)
+ (*num00)++;
+ }
- if (bestsad == INT_MAX)
- return INT_MAX;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
}
int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
- int_mv *center_mv)
-{
- unsigned char *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
- unsigned char *in_what;
- int in_what_stride = d->pre_stride;
- int mv_stride = d->pre_stride;
- unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.as_mv.first;
- int_mv this_mv;
- int bestsad = INT_MAX;
- int r, c;
-
- unsigned char *check_here;
- int thissad;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
-
- int row_min = ref_row - distance;
- int row_max = ref_row + distance;
- int col_min = ref_col - distance;
- int col_max = ref_col + distance;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ int_mv *center_mv) {
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int in_what_stride = d->pre_stride;
+ int mv_stride = d->pre_stride;
+ unsigned char *bestaddress;
+ int_mv *best_mv = &d->bmi.as_mv.first;
+ int_mv this_mv;
+ int bestsad = INT_MAX;
+ int r, c;
+
+ unsigned char *check_here;
+ int thissad;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- // Work out the mid point for the search
- in_what = *(d->base_pre) + d->pre;
- bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
+ // Work out the mid point for the search
+ in_what = *(d->base_pre) + d->pre;
+ bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
- best_mv->as_mv.row = ref_row;
- best_mv->as_mv.col = ref_col;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
- // Baseline value at the centre
- bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
- in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
- if (col_min < x->mv_col_min)
- col_min = x->mv_col_min;
+ // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ if (col_min < x->mv_col_min)
+ col_min = x->mv_col_min;
- if (col_max > x->mv_col_max)
- col_max = x->mv_col_max;
+ if (col_max > x->mv_col_max)
+ col_max = x->mv_col_max;
- if (row_min < x->mv_row_min)
- row_min = x->mv_row_min;
+ if (row_min < x->mv_row_min)
+ row_min = x->mv_row_min;
- if (row_max > x->mv_row_max)
- row_max = x->mv_row_max;
+ if (row_max > x->mv_row_max)
+ row_max = x->mv_row_max;
- for (r = row_min; r < row_max ; r++)
- {
- this_mv.as_mv.row = r;
- check_here = r * mv_stride + in_what + col_min;
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
- for (c = col_min; c < col_max; c++)
- {
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
+ for (c = col_min; c < col_max; c++) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
- this_mv.as_mv.col = c;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_mv->as_mv.row = r;
- best_mv->as_mv.col = c;
- bestaddress = check_here;
- }
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
- check_here++;
- }
+ check_here++;
}
+ }
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
- if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
+ if (bestsad < INT_MAX)
+ return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
- else
- return INT_MAX;
+ else
+ return INT_MAX;
}
int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
- int_mv *center_mv)
-{
- unsigned char *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
- unsigned char *in_what;
- int in_what_stride = d->pre_stride;
- int mv_stride = d->pre_stride;
- unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.as_mv.first;
- int_mv this_mv;
- int bestsad = INT_MAX;
- int r, c;
-
- unsigned char *check_here;
- unsigned int thissad;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
-
- int row_min = ref_row - distance;
- int row_max = ref_row + distance;
- int col_min = ref_col - distance;
- int col_max = ref_col + distance;
-
- unsigned int sad_array[3];
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ int_mv *center_mv) {
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int in_what_stride = d->pre_stride;
+ int mv_stride = d->pre_stride;
+ unsigned char *bestaddress;
+ int_mv *best_mv = &d->bmi.as_mv.first;
+ int_mv this_mv;
+ int bestsad = INT_MAX;
+ int r, c;
+
+ unsigned char *check_here;
+ unsigned int thissad;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ unsigned int sad_array[3];
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- // Work out the mid point for the search
- in_what = *(d->base_pre) + d->pre;
- bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
-
- best_mv->as_mv.row = ref_row;
- best_mv->as_mv.col = ref_col;
-
- // Baseline value at the centre
- bestsad = fn_ptr->sdf(what, what_stride,
- bestaddress, in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
-
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
- if (col_min < x->mv_col_min)
- col_min = x->mv_col_min;
-
- if (col_max > x->mv_col_max)
- col_max = x->mv_col_max;
-
- if (row_min < x->mv_row_min)
- row_min = x->mv_row_min;
-
- if (row_max > x->mv_row_max)
- row_max = x->mv_row_max;
-
- for (r = row_min; r < row_max ; r++)
- {
- this_mv.as_mv.row = r;
- check_here = r * mv_stride + in_what + col_min;
- c = col_min;
-
- while ((c + 2) < col_max)
- {
- int i;
-
- fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
-
- for (i = 0; i < 3; i++)
- {
- thissad = sad_array[i];
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.col = c;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_mv->as_mv.row = r;
- best_mv->as_mv.col = c;
- bestaddress = check_here;
- }
- }
-
- check_here++;
- c++;
- }
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = *(d->base_pre) + d->pre;
+ bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride,
+ bestaddress, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ if (col_min < x->mv_col_min)
+ col_min = x->mv_col_min;
+
+ if (col_max > x->mv_col_max)
+ col_max = x->mv_col_max;
+
+ if (row_min < x->mv_row_min)
+ row_min = x->mv_row_min;
+
+ if (row_max > x->mv_row_max)
+ row_max = x->mv_row_max;
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+ c = col_min;
+
+ while ((c + 2) < col_max) {
+ int i;
+
+ fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
+
+ for (i = 0; i < 3; i++) {
+ thissad = sad_array[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
}
- while (c < col_max)
- {
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.col = c;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_mv->as_mv.row = r;
- best_mv->as_mv.col = c;
- bestaddress = check_here;
- }
- }
+ check_here++;
+ c++;
+ }
+ }
+
+ while (c < col_max) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
- check_here ++;
- c ++;
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
}
+ }
+ check_here++;
+ c++;
}
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
- if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
+ if (bestsad < INT_MAX)
+ return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
- else
- return INT_MAX;
+ else
+ return INT_MAX;
}
int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
- int_mv *center_mv)
-{
- unsigned char *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
- unsigned char *in_what;
- int in_what_stride = d->pre_stride;
- int mv_stride = d->pre_stride;
- unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.as_mv.first;
- int_mv this_mv;
- int bestsad = INT_MAX;
- int r, c;
-
- unsigned char *check_here;
- unsigned int thissad;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int ref_row = ref_mv->as_mv.row;
- int ref_col = ref_mv->as_mv.col;
-
- int row_min = ref_row - distance;
- int row_max = ref_row + distance;
- int col_min = ref_col - distance;
- int col_max = ref_col + distance;
-
- DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
- unsigned int sad_array[3];
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ int_mv *center_mv) {
+ unsigned char *what = (*(b->base_src) + b->src);
+ int what_stride = b->src_stride;
+ unsigned char *in_what;
+ int in_what_stride = d->pre_stride;
+ int mv_stride = d->pre_stride;
+ unsigned char *bestaddress;
+ int_mv *best_mv = &d->bmi.as_mv.first;
+ int_mv this_mv;
+ int bestsad = INT_MAX;
+ int r, c;
+
+ unsigned char *check_here;
+ unsigned int thissad;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
+ unsigned int sad_array[3];
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- // Work out the mid point for the search
- in_what = *(d->base_pre) + d->pre;
- bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
-
- best_mv->as_mv.row = ref_row;
- best_mv->as_mv.col = ref_col;
-
- // Baseline value at the centre
- bestsad = fn_ptr->sdf(what, what_stride,
- bestaddress, in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
-
- // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
- if (col_min < x->mv_col_min)
- col_min = x->mv_col_min;
-
- if (col_max > x->mv_col_max)
- col_max = x->mv_col_max;
-
- if (row_min < x->mv_row_min)
- row_min = x->mv_row_min;
-
- if (row_max > x->mv_row_max)
- row_max = x->mv_row_max;
-
- for (r = row_min; r < row_max ; r++)
- {
- this_mv.as_mv.row = r;
- check_here = r * mv_stride + in_what + col_min;
- c = col_min;
-
- while ((c + 7) < col_max)
- {
- int i;
-
- fn_ptr->sdx8f(what, what_stride, check_here , in_what_stride, sad_array8);
-
- for (i = 0; i < 8; i++)
- {
- thissad = (unsigned int)sad_array8[i];
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.col = c;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_mv->as_mv.row = r;
- best_mv->as_mv.col = c;
- bestaddress = check_here;
- }
- }
-
- check_here++;
- c++;
- }
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = *(d->base_pre) + d->pre;
+ bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride,
+ bestaddress, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ if (col_min < x->mv_col_min)
+ col_min = x->mv_col_min;
+
+ if (col_max > x->mv_col_max)
+ col_max = x->mv_col_max;
+
+ if (row_min < x->mv_row_min)
+ row_min = x->mv_row_min;
+
+ if (row_max > x->mv_row_max)
+ row_max = x->mv_row_max;
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+ c = col_min;
+
+ while ((c + 7) < col_max) {
+ int i;
+
+ fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8);
+
+ for (i = 0; i < 8; i++) {
+ thissad = (unsigned int)sad_array8[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
}
- while ((c + 2) < col_max)
- {
- int i;
-
- fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
-
- for (i = 0; i < 3; i++)
- {
- thissad = sad_array[i];
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.col = c;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_mv->as_mv.row = r;
- best_mv->as_mv.col = c;
- bestaddress = check_here;
- }
- }
-
- check_here++;
- c++;
- }
+ check_here++;
+ c++;
+ }
+ }
+
+ while ((c + 2) < col_max) {
+ int i;
+
+ fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
+
+ for (i = 0; i < 3; i++) {
+ thissad = sad_array[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
}
- while (c < col_max)
- {
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.col = c;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_mv->as_mv.row = r;
- best_mv->as_mv.col = c;
- bestaddress = check_here;
- }
- }
+ check_here++;
+ c++;
+ }
+ }
- check_here ++;
- c ++;
+ while (c < col_max) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
}
+ }
+
+ check_here++;
+ c++;
}
+ }
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
- if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
+ if (bestsad < INT_MAX)
+ return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
- else
- return INT_MAX;
+ else
+ return INT_MAX;
}
int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int error_per_bit, int search_range,
vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
- int_mv *center_mv)
-{
- MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
- int i, j;
- short this_row_offset, this_col_offset;
-
- int what_stride = b->src_stride;
- int in_what_stride = d->pre_stride;
- unsigned char *what = (*(b->base_src) + b->src);
- unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
- (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
- unsigned char *check_here;
- unsigned int thissad;
- int_mv this_mv;
- unsigned int bestsad = INT_MAX;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ int_mv *center_mv) {
+ MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ int i, j;
+ short this_row_offset, this_col_offset;
+
+ int what_stride = b->src_stride;
+ int in_what_stride = d->pre_stride;
+ unsigned char *what = (*(b->base_src) + b->src);
+ unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
+ (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
+ unsigned char *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
-
- for (i=0; i<search_range; i++)
- {
- int best_site = -1;
-
- for (j = 0 ; j < 4 ; j++)
- {
- this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
- this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
-
- if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
- (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
- {
- check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.row = this_row_offset;
- this_mv.as_mv.col = this_col_offset;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_site = j;
- }
- }
- }
- }
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+
+ for (j = 0; j < 4; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
- if (best_site == -1)
- break;
- else
- {
- ref_mv->as_mv.row += neighbors[best_site].row;
- ref_mv->as_mv.col += neighbors[best_site].col;
- best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
}
+ }
}
- this_mv.as_mv.row = ref_mv->as_mv.row << 3;
- this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+ if (best_site == -1)
+ break;
+ else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col;
+ }
+ }
- if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
- else
- return INT_MAX;
+ else
+ return INT_MAX;
}
int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int error_per_bit,
int search_range, vp8_variance_fn_ptr_t *fn_ptr,
- int *mvcost[2], int_mv *center_mv)
-{
- MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
- int i, j;
- short this_row_offset, this_col_offset;
-
- int what_stride = b->src_stride;
- int in_what_stride = d->pre_stride;
- unsigned char *what = (*(b->base_src) + b->src);
- unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
- (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
- unsigned char *check_here;
- unsigned int thissad;
- int_mv this_mv;
- unsigned int bestsad = INT_MAX;
- MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- int_mv fcenter_mv;
+ int *mvcost[2], int_mv *center_mv) {
+ MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ int i, j;
+ short this_row_offset, this_col_offset;
+
+ int what_stride = b->src_stride;
+ int in_what_stride = d->pre_stride;
+ unsigned char *what = (*(b->base_src) + b->src);
+ unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
+ (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
+ unsigned char *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
+ int_mv fcenter_mv;
#if CONFIG_HIGH_PRECISION_MV
- if (xd->allow_high_precision_mv)
- {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
+ if (xd->allow_high_precision_mv) {
+ mvsadcost[0] = x->mvsadcost_hp[0];
+ mvsadcost[1] = x->mvsadcost_hp[1];
+ }
#endif
- fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
- fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
-
- bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
-
- for (i=0; i<search_range; i++)
- {
- int best_site = -1;
- int all_in = 1;
-
- all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
- all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
- all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
- all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
-
- if(all_in)
- {
- unsigned int sad_array[4];
- unsigned char *block_offset[4];
- block_offset[0] = best_address - in_what_stride;
- block_offset[1] = best_address - 1;
- block_offset[2] = best_address + 1;
- block_offset[3] = best_address + in_what_stride;
-
- fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
-
- for (j = 0; j < 4; j++)
- {
- if (sad_array[j] < bestsad)
- {
- this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
- this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
- sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
-
- if (sad_array[j] < bestsad)
- {
- bestsad = sad_array[j];
- best_site = j;
- }
- }
- }
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+ int all_in = 1;
+
+ all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
+ all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
+ all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
+ all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+ unsigned char *block_offset[4];
+ block_offset[0] = best_address - in_what_stride;
+ block_offset[1] = best_address - 1;
+ block_offset[2] = best_address + 1;
+ block_offset[3] = best_address + in_what_stride;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
+
+ for (j = 0; j < 4; j++) {
+ if (sad_array[j] < bestsad) {
+ this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
+ this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
+ sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ if (sad_array[j] < bestsad) {
+ bestsad = sad_array[j];
+ best_site = j;
+ }
}
- else
- {
- for (j = 0 ; j < 4 ; j++)
- {
- this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
- this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
-
- if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
- (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
- {
- check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
- thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
-
- if (thissad < bestsad)
- {
- this_mv.as_mv.row = this_row_offset;
- this_mv.as_mv.col = this_col_offset;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
-
- if (thissad < bestsad)
- {
- bestsad = thissad;
- best_site = j;
- }
- }
- }
+ }
+ } else {
+ for (j = 0; j < 4; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
}
+ }
}
+ }
+ }
- if (best_site == -1)
- break;
- else
- {
- ref_mv->as_mv.row += neighbors[best_site].row;
- ref_mv->as_mv.col += neighbors[best_site].col;
- best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
- }
+ if (best_site == -1)
+ break;
+ else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col;
}
+ }
- this_mv.as_mv.row = ref_mv->as_mv.row << 3;
- this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
- if (bestsad < INT_MAX)
- return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
+ if (bestsad < INT_MAX)
+ return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
#if CONFIG_HIGH_PRECISION_MV
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
#else
- + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
+ + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
#endif
- else
- return INT_MAX;
+ else
+ return INT_MAX;
}
#ifdef ENTROPY_STATS
-void print_mode_context(void)
-{
- FILE *f = fopen("modecont.c", "a");
- int i, j;
-
- fprintf(f, "#include \"entropy.h\"\n");
- fprintf(f, "const int vp8_mode_contexts[6][4] =");
- fprintf(f, "{\n");
- for (j = 0; j < 6; j++)
- {
- fprintf(f, " {/* %d */ ", j);
- fprintf(f, " ");
- for (i = 0; i < 4; i++)
- {
- int this_prob;
- int count;
-
- // context probs
- count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
- if (count)
- this_prob = 256 * mv_ref_ct[j][i][0] / count;
- else
- this_prob = 128;
-
- if (this_prob == 0)
- this_prob = 1;
- fprintf(f, "%5d, ", this_prob);
- }
- fprintf(f, " },\n");
- }
-
- fprintf(f, "};\n");
- fclose(f);
+void print_mode_context(void) {
+ FILE *f = fopen("modecont.c", "a");
+ int i, j;
+
+ fprintf(f, "#include \"entropy.h\"\n");
+ fprintf(f, "const int vp8_mode_contexts[6][4] =");
+ fprintf(f, "{\n");
+ for (j = 0; j < 6; j++) {
+ fprintf(f, " {/* %d */ ", j);
+ fprintf(f, " ");
+ for (i = 0; i < 4; i++) {
+ int this_prob;
+ int count;
+
+ // context probs
+ count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
+ if (count)
+ this_prob = 256 * mv_ref_ct[j][i][0] / count;
+ else
+ this_prob = 128;
+
+ if (this_prob == 0)
+ this_prob = 1;
+ fprintf(f, "%5d, ", this_prob);
+ }
+ fprintf(f, " },\n");
+ }
+
+ fprintf(f, "};\n");
+ fclose(f);
}
/* MV ref count ENTROPY_STATS stats code */
-void init_mv_ref_counts()
-{
- vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
- vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
+void init_mv_ref_counts() {
+ vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
+ vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
}
-void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
-{
- if (m == ZEROMV)
- {
- ++mv_ref_ct [ct[0]] [0] [0];
- ++mv_mode_cts[0][0];
- }
- else
- {
- ++mv_ref_ct [ct[0]] [0] [1];
- ++mv_mode_cts[0][1];
-
- if (m == NEARESTMV)
- {
- ++mv_ref_ct [ct[1]] [1] [0];
- ++mv_mode_cts[1][0];
- }
- else
- {
- ++mv_ref_ct [ct[1]] [1] [1];
- ++mv_mode_cts[1][1];
-
- if (m == NEARMV)
- {
- ++mv_ref_ct [ct[2]] [2] [0];
- ++mv_mode_cts[2][0];
- }
- else
- {
- ++mv_ref_ct [ct[2]] [2] [1];
- ++mv_mode_cts[2][1];
-
- if (m == NEWMV)
- {
- ++mv_ref_ct [ct[3]] [3] [0];
- ++mv_mode_cts[3][0];
- }
- else
- {
- ++mv_ref_ct [ct[3]] [3] [1];
- ++mv_mode_cts[3][1];
- }
- }
+void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4]) {
+ if (m == ZEROMV) {
+ ++mv_ref_ct [ct[0]] [0] [0];
+ ++mv_mode_cts[0][0];
+ } else {
+ ++mv_ref_ct [ct[0]] [0] [1];
+ ++mv_mode_cts[0][1];
+
+ if (m == NEARESTMV) {
+ ++mv_ref_ct [ct[1]] [1] [0];
+ ++mv_mode_cts[1][0];
+ } else {
+ ++mv_ref_ct [ct[1]] [1] [1];
+ ++mv_mode_cts[1][1];
+
+ if (m == NEARMV) {
+ ++mv_ref_ct [ct[2]] [2] [0];
+ ++mv_mode_cts[2][0];
+ } else {
+ ++mv_ref_ct [ct[2]] [2] [1];
+ ++mv_mode_cts[2][1];
+
+ if (m == NEWMV) {
+ ++mv_ref_ct [ct[3]] [3] [0];
+ ++mv_mode_cts[3][0];
+ } else {
+ ++mv_ref_ct [ct[3]] [3] [1];
+ ++mv_mode_cts[3][1];
}
+ }
}
+ }
}
#endif/* END MV ref count ENTROPY_STATS stats code */
diff --git a/vp8/encoder/mcomp.h b/vp8/encoder/mcomp.h
index f1314533f..d7fd137ca 100644
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -36,71 +36,71 @@ extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
extern int vp8_hex_search
(
- MACROBLOCK *x,
- BLOCK *b,
- BLOCKD *d,
- int_mv *ref_mv,
- int_mv *best_mv,
- int search_param,
- int error_per_bit,
- const vp8_variance_fn_ptr_t *vf,
- int *mvsadcost[2],
- int *mvcost[2],
- int_mv *center_mv
+ MACROBLOCK *x,
+ BLOCK *b,
+ BLOCKD *d,
+ int_mv *ref_mv,
+ int_mv *best_mv,
+ int search_param,
+ int error_per_bit,
+ const vp8_variance_fn_ptr_t *vf,
+ int *mvsadcost[2],
+ int *mvcost[2],
+ int_mv *center_mv
);
typedef int (fractional_mv_step_fp)
- (MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv,
- int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2],
- int *distortion, unsigned int *sse);
+(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2],
+ int *distortion, unsigned int *sse);
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step;
extern fractional_mv_step_fp vp8_find_best_half_pixel_step;
extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
#define prototype_full_search_sad(sym)\
- int (sym)\
- (\
- MACROBLOCK *x, \
- BLOCK *b, \
- BLOCKD *d, \
- int_mv *ref_mv, \
- int sad_per_bit, \
- int distance, \
- vp8_variance_fn_ptr_t *fn_ptr, \
- int *mvcost[2], \
- int_mv *center_mv \
- )
+ int (sym)\
+ (\
+ MACROBLOCK *x, \
+ BLOCK *b, \
+ BLOCKD *d, \
+ int_mv *ref_mv, \
+ int sad_per_bit, \
+ int distance, \
+ vp8_variance_fn_ptr_t *fn_ptr, \
+ int *mvcost[2], \
+ int_mv *center_mv \
+ )
#define prototype_refining_search_sad(sym)\
- int (sym)\
- (\
- MACROBLOCK *x, \
- BLOCK *b, \
- BLOCKD *d, \
- int_mv *ref_mv, \
- int sad_per_bit, \
- int distance, \
- vp8_variance_fn_ptr_t *fn_ptr, \
- int *mvcost[2], \
- int_mv *center_mv \
- )
+ int (sym)\
+ (\
+ MACROBLOCK *x, \
+ BLOCK *b, \
+ BLOCKD *d, \
+ int_mv *ref_mv, \
+ int sad_per_bit, \
+ int distance, \
+ vp8_variance_fn_ptr_t *fn_ptr, \
+ int *mvcost[2], \
+ int_mv *center_mv \
+ )
#define prototype_diamond_search_sad(sym)\
- int (sym)\
- (\
- MACROBLOCK *x, \
- BLOCK *b, \
- BLOCKD *d, \
- int_mv *ref_mv, \
- int_mv *best_mv, \
- int search_param, \
- int sad_per_bit, \
- int *num00, \
- vp8_variance_fn_ptr_t *fn_ptr, \
- int *mvcost[2], \
- int_mv *center_mv \
- )
+ int (sym)\
+ (\
+ MACROBLOCK *x, \
+ BLOCK *b, \
+ BLOCKD *d, \
+ int_mv *ref_mv, \
+ int_mv *best_mv, \
+ int search_param, \
+ int sad_per_bit, \
+ int *num00, \
+ vp8_variance_fn_ptr_t *fn_ptr, \
+ int *mvcost[2], \
+ int_mv *center_mv \
+ )
#if ARCH_X86 || ARCH_X86_64
#include "x86/mcomp_x86.h"
@@ -134,11 +134,10 @@ extern prototype_refining_search_sad(vp8_search_refining_search);
#endif
extern prototype_diamond_search_sad(vp8_search_diamond_search);
-typedef struct
-{
- prototype_full_search_sad(*full_search);
- prototype_refining_search_sad(*refining_search);
- prototype_diamond_search_sad(*diamond_search);
+typedef struct {
+ prototype_full_search_sad(*full_search);
+ prototype_refining_search_sad(*refining_search);
+ prototype_diamond_search_sad(*diamond_search);
} vp8_search_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
diff --git a/vp8/encoder/modecosts.c b/vp8/encoder/modecosts.c
index a156d6205..7a9c19a82 100644
--- a/vp8/encoder/modecosts.c
+++ b/vp8/encoder/modecosts.c
@@ -15,45 +15,40 @@
#include "vp8/common/entropymode.h"
-void vp8_init_mode_costs(VP8_COMP *c)
-{
- VP8_COMMON *x = &c->common;
- {
- const vp8_tree_p T = vp8_bmode_tree;
-
- int i = 0;
-
- do
- {
- int j = 0;
-
- do
- {
- vp8_cost_tokens((int *)c->mb.bmode_costs[i][j], x->kf_bmode_prob[i][j], T);
- }
- while (++j < VP8_BINTRAMODES);
- }
- while (++i < VP8_BINTRAMODES);
-
- vp8_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
- }
+void vp8_init_mode_costs(VP8_COMP *c) {
+ VP8_COMMON *x = &c->common;
+ {
+ const vp8_tree_p T = vp8_bmode_tree;
+
+ int i = 0;
+
+ do {
+ int j = 0;
+
+ do {
+ vp8_cost_tokens((int *)c->mb.bmode_costs[i][j], x->kf_bmode_prob[i][j], T);
+ } while (++j < VP8_BINTRAMODES);
+ } while (++i < VP8_BINTRAMODES);
+
+ vp8_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
+ }
#if CONFIG_ADAPTIVE_ENTROPY
- vp8_cost_tokens((int *)c->mb.inter_bmode_costs,
- vp8_sub_mv_ref_prob, vp8_sub_mv_ref_tree);
+ vp8_cost_tokens((int *)c->mb.inter_bmode_costs,
+ vp8_sub_mv_ref_prob, vp8_sub_mv_ref_tree);
#else
- vp8_cost_tokens(c->mb.inter_bmode_costs,
- vp8_sub_mv_ref_prob, vp8_sub_mv_ref_tree);
+ vp8_cost_tokens(c->mb.inter_bmode_costs,
+ vp8_sub_mv_ref_prob, vp8_sub_mv_ref_tree);
#endif
- vp8_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
- vp8_cost_tokens(c->mb.mbmode_cost[0],
- x->kf_ymode_prob[c->common.kf_ymode_probs_index],
- vp8_kf_ymode_tree);
- vp8_cost_tokens(c->mb.intra_uv_mode_cost[1],
- x->fc.uv_mode_prob[VP8_YMODES-1], vp8_uv_mode_tree);
- vp8_cost_tokens(c->mb.intra_uv_mode_cost[0],
- x->kf_uv_mode_prob[VP8_YMODES-1], vp8_uv_mode_tree);
- vp8_cost_tokens(c->mb.i8x8_mode_costs,
- x->fc.i8x8_mode_prob,vp8_i8x8_mode_tree);
+ vp8_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
+ vp8_cost_tokens(c->mb.mbmode_cost[0],
+ x->kf_ymode_prob[c->common.kf_ymode_probs_index],
+ vp8_kf_ymode_tree);
+ vp8_cost_tokens(c->mb.intra_uv_mode_cost[1],
+ x->fc.uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
+ vp8_cost_tokens(c->mb.intra_uv_mode_cost[0],
+ x->kf_uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
+ vp8_cost_tokens(c->mb.i8x8_mode_costs,
+ x->fc.i8x8_mode_prob, vp8_i8x8_mode_tree);
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index bfcaf746d..3eb25d255 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -84,14 +84,14 @@ extern const int vp8_gf_interval_table[101];
#if CONFIG_ENHANCED_INTERP
#define SEARCH_BEST_FILTER 0 /* to search exhaustively for best filter */
#define RESET_FOREACH_FILTER 0 /* whether to reset the encoder state
- before trying each new filter */
+before trying each new filter */
#endif
#if CONFIG_HIGH_PRECISION_MV
#define ALTREF_HIGH_PRECISION_MV 1 /* whether to use high precision mv for altref computation */
#define HIGH_PRECISION_MV_QTHRESH 200 /* Q threshold for use of high precision mv */
- /* Choose a very high value for now so
- * that HIGH_PRECISION is always chosen
- */
+/* Choose a very high value for now so
+ * that HIGH_PRECISION is always chosen
+ */
#endif
#if CONFIG_INTERNAL_STATS
@@ -99,28 +99,28 @@ extern const int vp8_gf_interval_table[101];
extern double vp8_calc_ssim
(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- int lumamask,
- double *weight,
- const vp8_variance_rtcd_vtable_t *rtcd
+ YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest,
+ int lumamask,
+ double *weight,
+ const vp8_variance_rtcd_vtable_t *rtcd
);
extern double vp8_calc_ssimg
(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *ssim_y,
- double *ssim_u,
- double *ssim_v,
- const vp8_variance_rtcd_vtable_t *rtcd
+ YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest,
+ double *ssim_y,
+ double *ssim_u,
+ double *ssim_v,
+ const vp8_variance_rtcd_vtable_t *rtcd
);
#endif
-//#define OUTPUT_YUV_REC
+// #define OUTPUT_YUV_REC
#ifdef OUTPUT_YUV_SRC
FILE *yuv_file;
@@ -158,13 +158,13 @@ extern unsigned __int64 Sectionbits[500];
#endif
#ifdef MODE_STATS
extern INT64 Sectionbits[500];
-extern unsigned int y_modes[VP8_YMODES] ;
+extern unsigned int y_modes[VP8_YMODES];
extern unsigned int i8x8_modes[VP8_I8X8_MODES];
-extern unsigned int uv_modes[VP8_UV_MODES] ;
+extern unsigned int uv_modes[VP8_UV_MODES];
extern unsigned int uv_modes_y[VP8_YMODES][VP8_UV_MODES];
extern unsigned int b_modes[B_MODE_COUNT];
-extern unsigned int inter_y_modes[MB_MODE_COUNT] ;
-extern unsigned int inter_uv_modes[VP8_UV_MODES] ;
+extern unsigned int inter_y_modes[MB_MODE_COUNT];
+extern unsigned int inter_uv_modes[VP8_UV_MODES];
extern unsigned int inter_b_modes[B_MODE_COUNT];
#endif
@@ -190,1229 +190,1160 @@ static int inter_minq[QINDEX_RANGE];
// formulaic approach to facilitate easier adjustment of the Q tables.
// The formulae were derived from computing a 3rd order polynomial best
// fit to the original data (after plotting real maxq vs minq (not q index))
-int calculate_minq_index( double maxq,
- double x3, double x2, double x, double c )
-{
- int i;
- double minqtarget;
- double thisq;
-
- minqtarget = ( (x3 * maxq * maxq * maxq) +
- (x2 * maxq * maxq) +
- (x * maxq) +
- c );
-
- if ( minqtarget > maxq )
- minqtarget = maxq;
-
- for ( i = 0; i < QINDEX_RANGE; i++ )
- {
- thisq = vp8_convert_qindex_to_q(i);
- if ( minqtarget <= vp8_convert_qindex_to_q(i) )
- return i;
- }
- return QINDEX_RANGE-1;
+int calculate_minq_index(double maxq,
+ double x3, double x2, double x, double c) {
+ int i;
+ double minqtarget;
+ double thisq;
+
+ minqtarget = ((x3 * maxq * maxq * maxq) +
+ (x2 * maxq * maxq) +
+ (x * maxq) +
+ c);
+
+ if (minqtarget > maxq)
+ minqtarget = maxq;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ thisq = vp8_convert_qindex_to_q(i);
+ if (minqtarget <= vp8_convert_qindex_to_q(i))
+ return i;
+ }
+ return QINDEX_RANGE - 1;
}
-void init_minq_luts()
-{
- int i;
- double maxq;
-
- for ( i = 0; i < QINDEX_RANGE; i++ )
- {
- maxq = vp8_convert_qindex_to_q(i);
-
-
- kf_low_motion_minq[i] = calculate_minq_index( maxq,
- 0.0000003,
- -0.000015,
- 0.074,
- 0.0 );
- kf_high_motion_minq[i] = calculate_minq_index( maxq,
- 0.0000004,
- -0.000125,
- 0.14,
- 0.0 );
- gf_low_motion_minq[i] = calculate_minq_index( maxq,
- 0.0000015,
- -0.0009,
- 0.33,
- 0.0 );
- gf_high_motion_minq[i] = calculate_minq_index( maxq,
- 0.0000021,
- -0.00125,
- 0.45,
- 0.0 );
- inter_minq[i] = calculate_minq_index( maxq,
- 0.00000271,
- -0.00113,
- 0.697,
- 0.0 );
-
- }
+void init_minq_luts() {
+ int i;
+ double maxq;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ maxq = vp8_convert_qindex_to_q(i);
+
+
+ kf_low_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000003,
+ -0.000015,
+ 0.074,
+ 0.0);
+ kf_high_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000004,
+ -0.000125,
+ 0.14,
+ 0.0);
+ gf_low_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000015,
+ -0.0009,
+ 0.33,
+ 0.0);
+ gf_high_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000021,
+ -0.00125,
+ 0.45,
+ 0.0);
+ inter_minq[i] = calculate_minq_index(maxq,
+ 0.00000271,
+ -0.00113,
+ 0.697,
+ 0.0);
+
+ }
}
-void init_base_skip_probs()
-{
- int i;
- double q;
- int skip_prob, t;
+void init_base_skip_probs() {
+ int i;
+ double q;
+ int skip_prob, t;
- for ( i = 0; i < QINDEX_RANGE; i++ )
- {
- q = vp8_convert_qindex_to_q(i);
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ q = vp8_convert_qindex_to_q(i);
- // Exponential decay caluclation of baseline skip prob with clamping
- // Based on crude best fit of old table.
- t = (int)( 564.25 * pow( 2.71828, (-0.012*q) ) );
+ // Exponential decay caluclation of baseline skip prob with clamping
+ // Based on crude best fit of old table.
+ t = (int)(564.25 * pow(2.71828, (-0.012 * q)));
- skip_prob = t;
- if ( skip_prob < 1 )
- skip_prob = 1;
- else if ( skip_prob > 255 )
- skip_prob = 255;
+ skip_prob = t;
+ if (skip_prob < 1)
+ skip_prob = 1;
+ else if (skip_prob > 255)
+ skip_prob = 255;
#if CONFIG_NEWENTROPY
- vp8cx_base_skip_false_prob[i][1] = skip_prob;
-
- skip_prob = t * 0.75;
- if ( skip_prob < 1 )
- skip_prob = 1;
- else if ( skip_prob > 255 )
- skip_prob = 255;
- vp8cx_base_skip_false_prob[i][2] = skip_prob;
-
- skip_prob = t * 1.25;
- if ( skip_prob < 1 )
- skip_prob = 1;
- else if ( skip_prob > 255 )
- skip_prob = 255;
- vp8cx_base_skip_false_prob[i][0] = skip_prob;
+ vp8cx_base_skip_false_prob[i][1] = skip_prob;
+
+ skip_prob = t * 0.75;
+ if (skip_prob < 1)
+ skip_prob = 1;
+ else if (skip_prob > 255)
+ skip_prob = 255;
+ vp8cx_base_skip_false_prob[i][2] = skip_prob;
+
+ skip_prob = t * 1.25;
+ if (skip_prob < 1)
+ skip_prob = 1;
+ else if (skip_prob > 255)
+ skip_prob = 255;
+ vp8cx_base_skip_false_prob[i][0] = skip_prob;
#else
- vp8cx_base_skip_false_prob[i] = skip_prob;
+ vp8cx_base_skip_false_prob[i] = skip_prob;
#endif
- }
+ }
}
-void update_base_skip_probs(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
+void update_base_skip_probs(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
- if (cm->frame_type != KEY_FRAME)
- {
- update_skip_probs(cpi);
+ if (cm->frame_type != KEY_FRAME) {
+ update_skip_probs(cpi);
- if (cm->refresh_alt_ref_frame)
- {
+ if (cm->refresh_alt_ref_frame) {
#if CONFIG_NEWENTROPY
- int k;
- for (k=0; k<MBSKIP_CONTEXTS; ++k)
- cpi->last_skip_false_probs[2][k] = cm->mbskip_pred_probs[k];
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ cpi->last_skip_false_probs[2][k] = cm->mbskip_pred_probs[k];
#else
- cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
+ cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
#endif
- cpi->last_skip_probs_q[2] = cm->base_qindex;
- }
- else if (cpi->common.refresh_golden_frame)
- {
+ cpi->last_skip_probs_q[2] = cm->base_qindex;
+ } else if (cpi->common.refresh_golden_frame) {
#if CONFIG_NEWENTROPY
- int k;
- for (k=0; k<MBSKIP_CONTEXTS; ++k)
- cpi->last_skip_false_probs[1][k] = cm->mbskip_pred_probs[k];
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ cpi->last_skip_false_probs[1][k] = cm->mbskip_pred_probs[k];
#else
- cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
+ cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
#endif
- cpi->last_skip_probs_q[1] = cm->base_qindex;
- }
- else
- {
+ cpi->last_skip_probs_q[1] = cm->base_qindex;
+ } else {
#if CONFIG_NEWENTROPY
- int k;
- for (k=0; k<MBSKIP_CONTEXTS; ++k)
- cpi->last_skip_false_probs[0][k] = cm->mbskip_pred_probs[k];
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ cpi->last_skip_false_probs[0][k] = cm->mbskip_pred_probs[k];
#else
- cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
+ cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
#endif
- cpi->last_skip_probs_q[0] = cm->base_qindex;
+ cpi->last_skip_probs_q[0] = cm->base_qindex;
- // update the baseline table for the current q
+ // update the baseline table for the current q
#if CONFIG_NEWENTROPY
- for (k=0; k<MBSKIP_CONTEXTS; ++k)
- cpi->base_skip_false_prob[cm->base_qindex][k] =
- cm->mbskip_pred_probs[k];
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ cpi->base_skip_false_prob[cm->base_qindex][k] =
+ cm->mbskip_pred_probs[k];
#else
- cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
+ cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
#endif
- }
}
+ }
}
-void vp8_initialize()
-{
- static int init_done = 0;
-
- if (!init_done)
- {
- vp8_scale_machine_specific_config();
- vp8_initialize_common();
- //vp8_dmachine_specific_config();
- vp8_tokenize_initialize();
- vp8_init_quant_tables();
- vp8_init_me_luts();
- init_minq_luts();
- init_base_skip_probs();
- init_done = 1;
- }
+void vp8_initialize() {
+ static int init_done = 0;
+
+ if (!init_done) {
+ vp8_scale_machine_specific_config();
+ vp8_initialize_common();
+ // vp8_dmachine_specific_config();
+ vp8_tokenize_initialize();
+ vp8_init_quant_tables();
+ vp8_init_me_luts();
+ init_minq_luts();
+ init_base_skip_probs();
+ init_done = 1;
+ }
}
#ifdef PACKET_TESTING
extern FILE *vpxlogc;
#endif
-static void setup_features(VP8_COMP *cpi)
-{
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
+static void setup_features(VP8_COMP *cpi) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
- // Set up default state for MB feature flags
+ // Set up default state for MB feature flags
- xd->segmentation_enabled = 0; // Default segmentation disabled
+ xd->segmentation_enabled = 0; // Default segmentation disabled
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
- vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
- clearall_segfeatures( xd );
+ clearall_segfeatures(xd);
- xd->mode_ref_lf_delta_enabled = 0;
- xd->mode_ref_lf_delta_update = 0;
- vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
- vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
- vpx_memset(xd->last_ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
- vpx_memset(xd->last_mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+ xd->mode_ref_lf_delta_enabled = 0;
+ xd->mode_ref_lf_delta_update = 0;
+ vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+ vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+ vpx_memset(xd->last_ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+ vpx_memset(xd->last_mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
- set_default_lf_deltas(cpi);
+ set_default_lf_deltas(cpi);
}
-static void dealloc_compressor_data(VP8_COMP *cpi)
-{
- vpx_free(cpi->tplist);
- cpi->tplist = NULL;
+static void dealloc_compressor_data(VP8_COMP *cpi) {
+ vpx_free(cpi->tplist);
+ cpi->tplist = NULL;
- // Delete last frame MV storage buffers
- vpx_free(cpi->lfmv);
- cpi->lfmv = 0;
+ // Delete last frame MV storage buffers
+ vpx_free(cpi->lfmv);
+ cpi->lfmv = 0;
- vpx_free(cpi->lf_ref_frame_sign_bias);
- cpi->lf_ref_frame_sign_bias = 0;
+ vpx_free(cpi->lf_ref_frame_sign_bias);
+ cpi->lf_ref_frame_sign_bias = 0;
- vpx_free(cpi->lf_ref_frame);
- cpi->lf_ref_frame = 0;
+ vpx_free(cpi->lf_ref_frame);
+ cpi->lf_ref_frame = 0;
- // Delete sementation map
- vpx_free(cpi->segmentation_map);
- cpi->segmentation_map = 0;
- vpx_free(cpi->common.last_frame_seg_map);
- cpi->common.last_frame_seg_map = 0;
- vpx_free(cpi->coding_context.last_frame_seg_map_copy);
- cpi->coding_context.last_frame_seg_map_copy = 0;
+ // Delete sementation map
+ vpx_free(cpi->segmentation_map);
+ cpi->segmentation_map = 0;
+ vpx_free(cpi->common.last_frame_seg_map);
+ cpi->common.last_frame_seg_map = 0;
+ vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+ cpi->coding_context.last_frame_seg_map_copy = 0;
- vpx_free(cpi->active_map);
- cpi->active_map = 0;
+ vpx_free(cpi->active_map);
+ cpi->active_map = 0;
- vp8_de_alloc_frame_buffers(&cpi->common);
+ vp8_de_alloc_frame_buffers(&cpi->common);
- vp8_yv12_de_alloc_frame_buffer(&cpi->last_frame_uf);
- vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
+ vp8_yv12_de_alloc_frame_buffer(&cpi->last_frame_uf);
+ vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
#if VP8_TEMPORAL_ALT_REF
- vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
+ vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
#endif
- vp8_lookahead_destroy(cpi->lookahead);
+ vp8_lookahead_destroy(cpi->lookahead);
- vpx_free(cpi->tok);
- cpi->tok = 0;
+ vpx_free(cpi->tok);
+ cpi->tok = 0;
- // Structure used to monitor GF usage
- vpx_free(cpi->gf_active_flags);
- cpi->gf_active_flags = 0;
+ // Structure used to monitor GF usage
+ vpx_free(cpi->gf_active_flags);
+ cpi->gf_active_flags = 0;
- // Activity mask based per mb zbin adjustments
- vpx_free(cpi->mb_activity_map);
- cpi->mb_activity_map = 0;
- vpx_free(cpi->mb_norm_activity_map);
- cpi->mb_norm_activity_map = 0;
+ // Activity mask based per mb zbin adjustments
+ vpx_free(cpi->mb_activity_map);
+ cpi->mb_activity_map = 0;
+ vpx_free(cpi->mb_norm_activity_map);
+ cpi->mb_norm_activity_map = 0;
- vpx_free(cpi->mb.pip);
- cpi->mb.pip = 0;
+ vpx_free(cpi->mb.pip);
+ cpi->mb.pip = 0;
- vpx_free(cpi->twopass.total_stats);
- cpi->twopass.total_stats = 0;
+ vpx_free(cpi->twopass.total_stats);
+ cpi->twopass.total_stats = 0;
- vpx_free(cpi->twopass.total_left_stats);
- cpi->twopass.total_left_stats = 0;
+ vpx_free(cpi->twopass.total_left_stats);
+ cpi->twopass.total_left_stats = 0;
- vpx_free(cpi->twopass.this_frame_stats);
- cpi->twopass.this_frame_stats = 0;
+ vpx_free(cpi->twopass.this_frame_stats);
+ cpi->twopass.this_frame_stats = 0;
}
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a target value
// target q value
-static int compute_qdelta( VP8_COMP *cpi, double qstart, double qtarget )
-{
- int i;
- int start_index = cpi->worst_quality;
- int target_index = cpi->worst_quality;
-
- // Convert the average q value to an index.
- for ( i = cpi->best_quality; i < cpi->worst_quality; i++ )
- {
- start_index = i;
- if ( vp8_convert_qindex_to_q(i) >= qstart )
- break;
- }
-
- // Convert the q target to an index
- for ( i = cpi->best_quality; i < cpi->worst_quality; i++ )
- {
- target_index = i;
- if ( vp8_convert_qindex_to_q(i) >= qtarget )
- break;
- }
-
- return target_index - start_index;
+static int compute_qdelta(VP8_COMP *cpi, double qstart, double qtarget) {
+ int i;
+ int start_index = cpi->worst_quality;
+ int target_index = cpi->worst_quality;
+
+ // Convert the average q value to an index.
+ for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
+ start_index = i;
+ if (vp8_convert_qindex_to_q(i) >= qstart)
+ break;
+ }
+
+ // Convert the q target to an index
+ for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
+ target_index = i;
+ if (vp8_convert_qindex_to_q(i) >= qtarget)
+ break;
+ }
+
+ return target_index - start_index;
}
-static void init_seg_features(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
+static void init_seg_features(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int high_q = (int)(cpi->avg_q > 48.0);
- int qi_delta;
+ int high_q = (int)(cpi->avg_q > 48.0);
+ int qi_delta;
- // Disable and clear down for KF
- if ( cm->frame_type == KEY_FRAME )
- {
- // Clear down the global segmentation map
- vpx_memset( cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
- cpi->static_mb_pct = 0;
+ // Disable and clear down for KF
+ if (cm->frame_type == KEY_FRAME) {
+ // Clear down the global segmentation map
+ vpx_memset(cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ cpi->static_mb_pct = 0;
- // Disable segmentation
- vp8_disable_segmentation((VP8_PTR)cpi);
+ // Disable segmentation
+ vp8_disable_segmentation((VP8_PTR)cpi);
- // Clear down the segment features.
- clearall_segfeatures(xd);
- }
+ // Clear down the segment features.
+ clearall_segfeatures(xd);
+ }
- // If this is an alt ref frame
- else if ( cm->refresh_alt_ref_frame )
- {
- // Clear down the global segmentation map
- vpx_memset( cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
- cpi->static_mb_pct = 0;
+ // If this is an alt ref frame
+ else if (cm->refresh_alt_ref_frame) {
+ // Clear down the global segmentation map
+ vpx_memset(cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ cpi->static_mb_pct = 0;
- // Disable segmentation and individual segment features by default
- vp8_disable_segmentation((VP8_PTR)cpi);
- clearall_segfeatures(xd);
+ // Disable segmentation and individual segment features by default
+ vp8_disable_segmentation((VP8_PTR)cpi);
+ clearall_segfeatures(xd);
- // Scan frames from current to arf frame.
- // This function re-enables segmentation if appropriate.
- vp8_update_mbgraph_stats(cpi);
+ // Scan frames from current to arf frame.
+ // This function re-enables segmentation if appropriate.
+ vp8_update_mbgraph_stats(cpi);
- // If segmentation was enabled set those features needed for the
- // arf itself.
- if ( xd->segmentation_enabled )
- {
- xd->update_mb_segmentation_map = 1;
- xd->update_mb_segmentation_data = 1;
+ // If segmentation was enabled set those features needed for the
+ // arf itself.
+ if (xd->segmentation_enabled) {
+ xd->update_mb_segmentation_map = 1;
+ xd->update_mb_segmentation_data = 1;
- qi_delta = compute_qdelta( cpi, cpi->avg_q, (cpi->avg_q * 0.875) );
- set_segdata( xd, 1, SEG_LVL_ALT_Q, (qi_delta - 2) );
- set_segdata( xd, 1, SEG_LVL_ALT_LF, -2 );
+ qi_delta = compute_qdelta(cpi, cpi->avg_q, (cpi->avg_q * 0.875));
+ set_segdata(xd, 1, SEG_LVL_ALT_Q, (qi_delta - 2));
+ set_segdata(xd, 1, SEG_LVL_ALT_LF, -2);
- enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
- enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
+ enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
+ enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
- // Where relevant assume segment data is delta data
- xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+ // Where relevant assume segment data is delta data
+ xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
- }
}
- // All other frames if segmentation has been enabled
- else if ( xd->segmentation_enabled )
- {
-/*
- int i;
+ }
+ // All other frames if segmentation has been enabled
+ else if (xd->segmentation_enabled) {
+ /*
+ int i;
- // clears prior frame seg lev refs
- for (i = 0; i < MAX_MB_SEGMENTS; i++)
- {
- // only do it if the force drop the background stuff is off
- if(!segfeature_active(xd, i, SEG_LVL_MODE))
+ // clears prior frame seg lev refs
+ for (i = 0; i < MAX_MB_SEGMENTS; i++)
{
- disable_segfeature(xd,i,SEG_LVL_REF_FRAME);
- set_segdata( xd,i, SEG_LVL_REF_FRAME, 0xffffff);
+ // only do it if the force drop the background stuff is off
+ if(!segfeature_active(xd, i, SEG_LVL_MODE))
+ {
+ disable_segfeature(xd,i,SEG_LVL_REF_FRAME);
+ set_segdata( xd,i, SEG_LVL_REF_FRAME, 0xffffff);
+ }
}
- }
-*/
+ */
- // First normal frame in a valid gf or alt ref group
- if ( cpi->common.frames_since_golden == 0 )
- {
- // Set up segment features for normal frames in an af group
- if ( cpi->source_alt_ref_active )
- {
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 1;
- xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+ // First normal frame in a valid gf or alt ref group
+ if (cpi->common.frames_since_golden == 0) {
+ // Set up segment features for normal frames in an af group
+ if (cpi->source_alt_ref_active) {
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 1;
+ xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
- qi_delta = compute_qdelta( cpi, cpi->avg_q,
- (cpi->avg_q * 1.125) );
- set_segdata( xd, 1, SEG_LVL_ALT_Q, (qi_delta + 2) );
- set_segdata( xd, 1, SEG_LVL_ALT_Q, 0 );
- enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
+ qi_delta = compute_qdelta(cpi, cpi->avg_q,
+ (cpi->avg_q * 1.125));
+ set_segdata(xd, 1, SEG_LVL_ALT_Q, (qi_delta + 2));
+ set_segdata(xd, 1, SEG_LVL_ALT_Q, 0);
+ enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
- set_segdata( xd, 1, SEG_LVL_ALT_LF, -2 );
- enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
+ set_segdata(xd, 1, SEG_LVL_ALT_LF, -2);
+ enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
- // Segment coding disabled for compred testing
- if ( high_q || (cpi->static_mb_pct == 100) )
- {
- //set_segref(xd, 1, LAST_FRAME);
- set_segref(xd, 1, ALTREF_FRAME);
- enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
+ // Segment coding disabled for compred testing
+ if (high_q || (cpi->static_mb_pct == 100)) {
+ // set_segref(xd, 1, LAST_FRAME);
+ set_segref(xd, 1, ALTREF_FRAME);
+ enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
- set_segdata( xd, 1, SEG_LVL_MODE, ZEROMV );
- enable_segfeature(xd, 1, SEG_LVL_MODE);
+ set_segdata(xd, 1, SEG_LVL_MODE, ZEROMV);
+ enable_segfeature(xd, 1, SEG_LVL_MODE);
- // EOB segment coding not fixed for 8x8 yet
- set_segdata( xd, 1, SEG_LVL_EOB, 0 );
- enable_segfeature(xd, 1, SEG_LVL_EOB);
- }
- }
- // Disable segmentation and clear down features if alt ref
- // is not active for this group
- else
- {
- vp8_disable_segmentation((VP8_PTR)cpi);
-
- vpx_memset( cpi->segmentation_map, 0,
- (cm->mb_rows * cm->mb_cols));
+ // EOB segment coding not fixed for 8x8 yet
+ set_segdata(xd, 1, SEG_LVL_EOB, 0);
+ enable_segfeature(xd, 1, SEG_LVL_EOB);
+ }
+ }
+ // Disable segmentation and clear down features if alt ref
+ // is not active for this group
+ else {
+ vp8_disable_segmentation((VP8_PTR)cpi);
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
+ vpx_memset(cpi->segmentation_map, 0,
+ (cm->mb_rows * cm->mb_cols));
- clearall_segfeatures(xd);
- }
- }
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
- // Special case where we are coding over the top of a previous
- // alt ref frame
- // Segment coding disabled for compred testing
- else if ( cpi->is_src_frame_alt_ref )
- {
- // Enable mode and ref frame features for segment 0 as well
- enable_segfeature(xd, 0, SEG_LVL_REF_FRAME);
- enable_segfeature(xd, 0, SEG_LVL_MODE);
- enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
- enable_segfeature(xd, 1, SEG_LVL_MODE);
-
- // All mbs should use ALTREF_FRAME, ZEROMV exclusively
- clear_segref(xd, 0);
- set_segref(xd, 0, ALTREF_FRAME);
- clear_segref(xd, 1);
- set_segref(xd, 1, ALTREF_FRAME);
- set_segdata( xd, 0, SEG_LVL_MODE, ZEROMV );
- set_segdata( xd, 1, SEG_LVL_MODE, ZEROMV );
-
- // Skip all MBs if high Q
- if ( high_q )
- {
- enable_segfeature(xd, 0, SEG_LVL_EOB);
- set_segdata( xd, 0, SEG_LVL_EOB, 0 );
- enable_segfeature(xd, 1, SEG_LVL_EOB);
- set_segdata( xd, 1, SEG_LVL_EOB, 0 );
- }
- // Enable data udpate
- xd->update_mb_segmentation_data = 1;
- }
- // All other frames.
- else
- {
- // No updates.. leave things as they are.
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
- }
- }
+ clearall_segfeatures(xd);
+ }
+ }
+
+ // Special case where we are coding over the top of a previous
+ // alt ref frame
+ // Segment coding disabled for compred testing
+ else if (cpi->is_src_frame_alt_ref) {
+ // Enable mode and ref frame features for segment 0 as well
+ enable_segfeature(xd, 0, SEG_LVL_REF_FRAME);
+ enable_segfeature(xd, 0, SEG_LVL_MODE);
+ enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
+ enable_segfeature(xd, 1, SEG_LVL_MODE);
+
+ // All mbs should use ALTREF_FRAME, ZEROMV exclusively
+ clear_segref(xd, 0);
+ set_segref(xd, 0, ALTREF_FRAME);
+ clear_segref(xd, 1);
+ set_segref(xd, 1, ALTREF_FRAME);
+ set_segdata(xd, 0, SEG_LVL_MODE, ZEROMV);
+ set_segdata(xd, 1, SEG_LVL_MODE, ZEROMV);
+
+ // Skip all MBs if high Q
+ if (high_q) {
+ enable_segfeature(xd, 0, SEG_LVL_EOB);
+ set_segdata(xd, 0, SEG_LVL_EOB, 0);
+ enable_segfeature(xd, 1, SEG_LVL_EOB);
+ set_segdata(xd, 1, SEG_LVL_EOB, 0);
+ }
+ // Enable data udpate
+ xd->update_mb_segmentation_data = 1;
+ }
+ // All other frames.
+ else {
+ // No updates.. leave things as they are.
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ }
+ }
}
// DEBUG: Print out the segment id of each MB in the current frame.
-static void print_seg_map(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = & cpi->common;
- int row,col;
- int map_index = 0;
- FILE *statsfile;
+static void print_seg_map(VP8_COMP *cpi) {
+ VP8_COMMON *cm = & cpi->common;
+ int row, col;
+ int map_index = 0;
+ FILE *statsfile;
- statsfile = fopen("segmap.stt", "a");
+ statsfile = fopen("segmap.stt", "a");
- fprintf(statsfile, "%10d\n",
- cm->current_video_frame );
+ fprintf(statsfile, "%10d\n",
+ cm->current_video_frame);
- for ( row = 0; row < cpi->common.mb_rows; row++ )
- {
- for ( col = 0; col < cpi->common.mb_cols; col++ )
- {
- fprintf(statsfile, "%10d",
- cpi->segmentation_map[map_index]);
- map_index++;
- }
- fprintf(statsfile, "\n");
+ for (row = 0; row < cpi->common.mb_rows; row++) {
+ for (col = 0; col < cpi->common.mb_cols; col++) {
+ fprintf(statsfile, "%10d",
+ cpi->segmentation_map[map_index]);
+ map_index++;
}
fprintf(statsfile, "\n");
+ }
+ fprintf(statsfile, "\n");
- fclose(statsfile);
+ fclose(statsfile);
}
-static void set_default_lf_deltas(VP8_COMP *cpi)
-{
- cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
- cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
+static void set_default_lf_deltas(VP8_COMP *cpi) {
+ cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
+ cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
- vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
- vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+ vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+ vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
- // Test of ref frame deltas
- cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
- cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
- cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
- cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
+ // Test of ref frame deltas
+ cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
+ cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
+ cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
+ cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
- cpi->mb.e_mbd.mode_lf_deltas[0] = 4; // BPRED
- cpi->mb.e_mbd.mode_lf_deltas[1] = -2; // Zero
- cpi->mb.e_mbd.mode_lf_deltas[2] = 2; // New mv
- cpi->mb.e_mbd.mode_lf_deltas[3] = 4; // Split mv
+ cpi->mb.e_mbd.mode_lf_deltas[0] = 4; // BPRED
+ cpi->mb.e_mbd.mode_lf_deltas[1] = -2; // Zero
+ cpi->mb.e_mbd.mode_lf_deltas[2] = 2; // New mv
+ cpi->mb.e_mbd.mode_lf_deltas[3] = 4; // Split mv
}
-void vp8_set_speed_features(VP8_COMP *cpi)
-{
- SPEED_FEATURES *sf = &cpi->sf;
- int Mode = cpi->compressor_speed;
- int Speed = cpi->Speed;
- int i;
- VP8_COMMON *cm = &cpi->common;
-
- // Only modes 0 and 1 supported for now in experimental code basae
- if ( Mode > 1 )
- Mode = 1;
-
- // Initialise default mode frequency sampling variables
- for (i = 0; i < MAX_MODES; i ++)
- {
- cpi->mode_check_freq[i] = 0;
- cpi->mode_test_hit_counts[i] = 0;
- cpi->mode_chosen_counts[i] = 0;
- }
-
- // best quality defaults
- sf->RD = 1;
- sf->search_method = NSTEP;
- sf->improved_dct = 1;
- sf->auto_filter = 1;
- sf->recode_loop = 1;
- sf->quarter_pixel_search = 1;
- sf->half_pixel_search = 1;
- sf->iterative_sub_pixel = 1;
+void vp8_set_speed_features(VP8_COMP *cpi) {
+ SPEED_FEATURES *sf = &cpi->sf;
+ int Mode = cpi->compressor_speed;
+ int Speed = cpi->Speed;
+ int i;
+ VP8_COMMON *cm = &cpi->common;
+
+ // Only modes 0 and 1 supported for now in experimental code basae
+ if (Mode > 1)
+ Mode = 1;
+
+ // Initialise default mode frequency sampling variables
+ for (i = 0; i < MAX_MODES; i ++) {
+ cpi->mode_check_freq[i] = 0;
+ cpi->mode_test_hit_counts[i] = 0;
+ cpi->mode_chosen_counts[i] = 0;
+ }
+
+ // best quality defaults
+ sf->RD = 1;
+ sf->search_method = NSTEP;
+ sf->improved_dct = 1;
+ sf->auto_filter = 1;
+ sf->recode_loop = 1;
+ sf->quarter_pixel_search = 1;
+ sf->half_pixel_search = 1;
+ sf->iterative_sub_pixel = 1;
#if CONFIG_LOSSLESS
- sf->optimize_coefficients = 0;
+ sf->optimize_coefficients = 0;
#else
- sf->optimize_coefficients = 1;
+ sf->optimize_coefficients = 1;
#endif
- sf->no_skip_block4x4_search = 1;
+ sf->no_skip_block4x4_search = 1;
- sf->first_step = 0;
- sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
- sf->improved_mv_pred = 1;
+ sf->first_step = 0;
+ sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+ sf->improved_mv_pred = 1;
- // default thresholds to 0
- for (i = 0; i < MAX_MODES; i++)
- sf->thresh_mult[i] = 0;
+ // default thresholds to 0
+ for (i = 0; i < MAX_MODES; i++)
+ sf->thresh_mult[i] = 0;
- switch (Mode)
- {
+ switch (Mode) {
case 0: // best quality mode
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_ZEROMV ] = 0;
- sf->thresh_mult[THR_ZEROMV_FILT ] = 0;
- sf->thresh_mult[THR_ZEROG ] = 0;
- sf->thresh_mult[THR_ZEROG_FILT ] = 0;
- sf->thresh_mult[THR_ZEROA ] = 0;
- sf->thresh_mult[THR_ZEROA_FILT ] = 0;
- sf->thresh_mult[THR_NEARESTMV ] = 0;
- sf->thresh_mult[THR_NEARESTMV_FILT] = 0;
- sf->thresh_mult[THR_NEARESTG ] = 0;
- sf->thresh_mult[THR_NEARESTG_FILT ] = 0;
- sf->thresh_mult[THR_NEARESTA ] = 0;
- sf->thresh_mult[THR_NEARESTA_FILT ] = 0;
- sf->thresh_mult[THR_NEARMV ] = 0;
- sf->thresh_mult[THR_NEARMV_FILT ] = 0;
- sf->thresh_mult[THR_NEARG ] = 0;
- sf->thresh_mult[THR_NEARG_FILT ] = 0;
- sf->thresh_mult[THR_NEARA ] = 0;
- sf->thresh_mult[THR_NEARA_FILT ] = 0;
-
- sf->thresh_mult[THR_DC ] = 0;
-
- sf->thresh_mult[THR_V_PRED ] = 1000;
- sf->thresh_mult[THR_H_PRED ] = 1000;
- sf->thresh_mult[THR_B_PRED ] = 2000;
- sf->thresh_mult[THR_I8X8_PRED] = 2000;
- sf->thresh_mult[THR_TM ] = 1000;
-
- sf->thresh_mult[THR_NEWMV ] = 1000;
- sf->thresh_mult[THR_NEWG ] = 1000;
- sf->thresh_mult[THR_NEWA ] = 1000;
- sf->thresh_mult[THR_NEWMV_FILT ] = 1000;
- sf->thresh_mult[THR_NEWG_FILT ] = 1000;
- sf->thresh_mult[THR_NEWA_FILT ] = 1000;
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_ZEROMV_FILT ] = 0;
+ sf->thresh_mult[THR_ZEROG ] = 0;
+ sf->thresh_mult[THR_ZEROG_FILT ] = 0;
+ sf->thresh_mult[THR_ZEROA ] = 0;
+ sf->thresh_mult[THR_ZEROA_FILT ] = 0;
+ sf->thresh_mult[THR_NEARESTMV ] = 0;
+ sf->thresh_mult[THR_NEARESTMV_FILT] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTG_FILT ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+ sf->thresh_mult[THR_NEARESTA_FILT ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 0;
+ sf->thresh_mult[THR_NEARMV_FILT ] = 0;
+ sf->thresh_mult[THR_NEARG ] = 0;
+ sf->thresh_mult[THR_NEARG_FILT ] = 0;
+ sf->thresh_mult[THR_NEARA ] = 0;
+ sf->thresh_mult[THR_NEARA_FILT ] = 0;
+
+ sf->thresh_mult[THR_DC ] = 0;
+
+ sf->thresh_mult[THR_V_PRED ] = 1000;
+ sf->thresh_mult[THR_H_PRED ] = 1000;
+ sf->thresh_mult[THR_B_PRED ] = 2000;
+ sf->thresh_mult[THR_I8X8_PRED] = 2000;
+ sf->thresh_mult[THR_TM ] = 1000;
+
+ sf->thresh_mult[THR_NEWMV ] = 1000;
+ sf->thresh_mult[THR_NEWG ] = 1000;
+ sf->thresh_mult[THR_NEWA ] = 1000;
+ sf->thresh_mult[THR_NEWMV_FILT ] = 1000;
+ sf->thresh_mult[THR_NEWG_FILT ] = 1000;
+ sf->thresh_mult[THR_NEWA_FILT ] = 1000;
#else
- sf->thresh_mult[THR_ZEROMV ] = 0;
- sf->thresh_mult[THR_ZEROG ] = 0;
- sf->thresh_mult[THR_ZEROA ] = 0;
- sf->thresh_mult[THR_NEARESTMV] = 0;
- sf->thresh_mult[THR_NEARESTG ] = 0;
- sf->thresh_mult[THR_NEARESTA ] = 0;
- sf->thresh_mult[THR_NEARMV ] = 0;
- sf->thresh_mult[THR_NEARG ] = 0;
- sf->thresh_mult[THR_NEARA ] = 0;
-
- sf->thresh_mult[THR_DC ] = 0;
-
- sf->thresh_mult[THR_V_PRED ] = 1000;
- sf->thresh_mult[THR_H_PRED ] = 1000;
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_ZEROG ] = 0;
+ sf->thresh_mult[THR_ZEROA ] = 0;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 0;
+ sf->thresh_mult[THR_NEARG ] = 0;
+ sf->thresh_mult[THR_NEARA ] = 0;
+
+ sf->thresh_mult[THR_DC ] = 0;
+
+ sf->thresh_mult[THR_V_PRED ] = 1000;
+ sf->thresh_mult[THR_H_PRED ] = 1000;
#if CONFIG_NEWINTRAMODES
- sf->thresh_mult[THR_D45_PRED ] = 1000;
- sf->thresh_mult[THR_D135_PRED] = 1000;
- sf->thresh_mult[THR_D117_PRED] = 1000;
- sf->thresh_mult[THR_D153_PRED] = 1000;
- sf->thresh_mult[THR_D27_PRED ] = 1000;
- sf->thresh_mult[THR_D63_PRED ] = 1000;
+ sf->thresh_mult[THR_D45_PRED ] = 1000;
+ sf->thresh_mult[THR_D135_PRED] = 1000;
+ sf->thresh_mult[THR_D117_PRED] = 1000;
+ sf->thresh_mult[THR_D153_PRED] = 1000;
+ sf->thresh_mult[THR_D27_PRED ] = 1000;
+ sf->thresh_mult[THR_D63_PRED ] = 1000;
#endif
- sf->thresh_mult[THR_B_PRED ] = 2000;
- sf->thresh_mult[THR_I8X8_PRED] = 2000;
- sf->thresh_mult[THR_TM ] = 1000;
+ sf->thresh_mult[THR_B_PRED ] = 2000;
+ sf->thresh_mult[THR_I8X8_PRED] = 2000;
+ sf->thresh_mult[THR_TM ] = 1000;
- sf->thresh_mult[THR_NEWMV ] = 1000;
- sf->thresh_mult[THR_NEWG ] = 1000;
- sf->thresh_mult[THR_NEWA ] = 1000;
+ sf->thresh_mult[THR_NEWMV ] = 1000;
+ sf->thresh_mult[THR_NEWG ] = 1000;
+ sf->thresh_mult[THR_NEWA ] = 1000;
#endif
- sf->thresh_mult[THR_SPLITMV ] = 2500;
- sf->thresh_mult[THR_SPLITG ] = 5000;
- sf->thresh_mult[THR_SPLITA ] = 5000;
-
- sf->thresh_mult[THR_COMP_ZEROLG ] = 0;
- sf->thresh_mult[THR_COMP_NEARESTLG] = 0;
- sf->thresh_mult[THR_COMP_NEARLG ] = 0;
- sf->thresh_mult[THR_COMP_ZEROLA ] = 0;
- sf->thresh_mult[THR_COMP_NEARESTLA] = 0;
- sf->thresh_mult[THR_COMP_NEARLA ] = 0;
- sf->thresh_mult[THR_COMP_ZEROGA ] = 0;
- sf->thresh_mult[THR_COMP_NEARESTGA] = 0;
- sf->thresh_mult[THR_COMP_NEARGA ] = 0;
-
- sf->thresh_mult[THR_COMP_NEWLG ] = 1000;
- sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
- sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
-
- sf->thresh_mult[THR_COMP_SPLITLA ] = 2500;
- sf->thresh_mult[THR_COMP_SPLITGA ] = 5000;
- sf->thresh_mult[THR_COMP_SPLITLG ] = 5000;
-
- sf->first_step = 0;
- sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+ sf->thresh_mult[THR_SPLITMV ] = 2500;
+ sf->thresh_mult[THR_SPLITG ] = 5000;
+ sf->thresh_mult[THR_SPLITA ] = 5000;
+
+ sf->thresh_mult[THR_COMP_ZEROLG ] = 0;
+ sf->thresh_mult[THR_COMP_NEARESTLG] = 0;
+ sf->thresh_mult[THR_COMP_NEARLG ] = 0;
+ sf->thresh_mult[THR_COMP_ZEROLA ] = 0;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 0;
+ sf->thresh_mult[THR_COMP_NEARLA ] = 0;
+ sf->thresh_mult[THR_COMP_ZEROGA ] = 0;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = 0;
+ sf->thresh_mult[THR_COMP_NEARGA ] = 0;
+
+ sf->thresh_mult[THR_COMP_NEWLG ] = 1000;
+ sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
+ sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
+
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 2500;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 5000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 5000;
+
+ sf->first_step = 0;
+ sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
#if CONFIG_ENHANCED_INTERP
- sf->search_best_filter = SEARCH_BEST_FILTER;
+ sf->search_best_filter = SEARCH_BEST_FILTER;
#endif
- break;
+ break;
case 1:
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTMV] = 0;
- sf->thresh_mult[THR_NEARESTMV_FILT] = 0;
- sf->thresh_mult[THR_ZEROMV ] = 0;
- sf->thresh_mult[THR_ZEROMV_FILT ] = 0;
- sf->thresh_mult[THR_DC ] = 0;
- sf->thresh_mult[THR_NEARMV ] = 0;
- sf->thresh_mult[THR_NEARMV_FILT ] = 0;
- sf->thresh_mult[THR_V_PRED ] = 1000;
- sf->thresh_mult[THR_H_PRED ] = 1000;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTMV_FILT] = 0;
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_ZEROMV_FILT ] = 0;
+ sf->thresh_mult[THR_DC ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 0;
+ sf->thresh_mult[THR_NEARMV_FILT ] = 0;
+ sf->thresh_mult[THR_V_PRED ] = 1000;
+ sf->thresh_mult[THR_H_PRED ] = 1000;
#if CONFIG_NEWINTRAMODES
- sf->thresh_mult[THR_D45_PRED ] = 1000;
- sf->thresh_mult[THR_D135_PRED] = 1000;
- sf->thresh_mult[THR_D117_PRED] = 1000;
- sf->thresh_mult[THR_D153_PRED] = 1000;
- sf->thresh_mult[THR_D27_PRED ] = 1000;
- sf->thresh_mult[THR_D63_PRED ] = 1000;
+ sf->thresh_mult[THR_D45_PRED ] = 1000;
+ sf->thresh_mult[THR_D135_PRED] = 1000;
+ sf->thresh_mult[THR_D117_PRED] = 1000;
+ sf->thresh_mult[THR_D153_PRED] = 1000;
+ sf->thresh_mult[THR_D27_PRED ] = 1000;
+ sf->thresh_mult[THR_D63_PRED ] = 1000;
#endif
- sf->thresh_mult[THR_B_PRED ] = 2500;
- sf->thresh_mult[THR_I8X8_PRED] = 2500;
- sf->thresh_mult[THR_TM ] = 1000;
-
- sf->thresh_mult[THR_NEARESTG ] = 1000;
- sf->thresh_mult[THR_NEARESTG_FILT ] = 1000;
- sf->thresh_mult[THR_NEARESTA ] = 1000;
- sf->thresh_mult[THR_NEARESTA_FILT ] = 1000;
-
- sf->thresh_mult[THR_ZEROG ] = 1000;
- sf->thresh_mult[THR_ZEROA ] = 1000;
- sf->thresh_mult[THR_NEARG ] = 1000;
- sf->thresh_mult[THR_NEARA ] = 1000;
- sf->thresh_mult[THR_ZEROG_FILT ] = 1000;
- sf->thresh_mult[THR_ZEROA_FILT ] = 1000;
- sf->thresh_mult[THR_NEARG_FILT ] = 1000;
- sf->thresh_mult[THR_NEARA_FILT ] = 1000;
-
- sf->thresh_mult[THR_ZEROMV ] = 0;
- sf->thresh_mult[THR_ZEROG ] = 0;
- sf->thresh_mult[THR_ZEROA ] = 0;
- sf->thresh_mult[THR_NEARESTMV] = 0;
- sf->thresh_mult[THR_NEARESTG ] = 0;
- sf->thresh_mult[THR_NEARESTA ] = 0;
- sf->thresh_mult[THR_NEARMV ] = 0;
- sf->thresh_mult[THR_NEARG ] = 0;
- sf->thresh_mult[THR_NEARA ] = 0;
- sf->thresh_mult[THR_ZEROMV_FILT ] = 0;
- sf->thresh_mult[THR_ZEROG_FILT ] = 0;
- sf->thresh_mult[THR_ZEROA_FILT ] = 0;
- sf->thresh_mult[THR_NEARESTMV_FILT] = 0;
- sf->thresh_mult[THR_NEARESTG_FILT ] = 0;
- sf->thresh_mult[THR_NEARESTA_FILT ] = 0;
- sf->thresh_mult[THR_NEARMV_FILT ] = 0;
- sf->thresh_mult[THR_NEARG_FILT ] = 0;
- sf->thresh_mult[THR_NEARA_FILT ] = 0;
-
- sf->thresh_mult[THR_NEWMV ] = 1000;
- sf->thresh_mult[THR_NEWG ] = 1000;
- sf->thresh_mult[THR_NEWA ] = 1000;
- sf->thresh_mult[THR_NEWMV_FILT ] = 1000;
- sf->thresh_mult[THR_NEWG_FILT ] = 1000;
- sf->thresh_mult[THR_NEWA_FILT ] = 1000;
+ sf->thresh_mult[THR_B_PRED ] = 2500;
+ sf->thresh_mult[THR_I8X8_PRED] = 2500;
+ sf->thresh_mult[THR_TM ] = 1000;
+
+ sf->thresh_mult[THR_NEARESTG ] = 1000;
+ sf->thresh_mult[THR_NEARESTG_FILT ] = 1000;
+ sf->thresh_mult[THR_NEARESTA ] = 1000;
+ sf->thresh_mult[THR_NEARESTA_FILT ] = 1000;
+
+ sf->thresh_mult[THR_ZEROG ] = 1000;
+ sf->thresh_mult[THR_ZEROA ] = 1000;
+ sf->thresh_mult[THR_NEARG ] = 1000;
+ sf->thresh_mult[THR_NEARA ] = 1000;
+ sf->thresh_mult[THR_ZEROG_FILT ] = 1000;
+ sf->thresh_mult[THR_ZEROA_FILT ] = 1000;
+ sf->thresh_mult[THR_NEARG_FILT ] = 1000;
+ sf->thresh_mult[THR_NEARA_FILT ] = 1000;
+
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_ZEROG ] = 0;
+ sf->thresh_mult[THR_ZEROA ] = 0;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 0;
+ sf->thresh_mult[THR_NEARG ] = 0;
+ sf->thresh_mult[THR_NEARA ] = 0;
+ sf->thresh_mult[THR_ZEROMV_FILT ] = 0;
+ sf->thresh_mult[THR_ZEROG_FILT ] = 0;
+ sf->thresh_mult[THR_ZEROA_FILT ] = 0;
+ sf->thresh_mult[THR_NEARESTMV_FILT] = 0;
+ sf->thresh_mult[THR_NEARESTG_FILT ] = 0;
+ sf->thresh_mult[THR_NEARESTA_FILT ] = 0;
+ sf->thresh_mult[THR_NEARMV_FILT ] = 0;
+ sf->thresh_mult[THR_NEARG_FILT ] = 0;
+ sf->thresh_mult[THR_NEARA_FILT ] = 0;
+
+ sf->thresh_mult[THR_NEWMV ] = 1000;
+ sf->thresh_mult[THR_NEWG ] = 1000;
+ sf->thresh_mult[THR_NEWA ] = 1000;
+ sf->thresh_mult[THR_NEWMV_FILT ] = 1000;
+ sf->thresh_mult[THR_NEWG_FILT ] = 1000;
+ sf->thresh_mult[THR_NEWA_FILT ] = 1000;
#else
- sf->thresh_mult[THR_NEARESTMV] = 0;
- sf->thresh_mult[THR_ZEROMV ] = 0;
- sf->thresh_mult[THR_DC ] = 0;
- sf->thresh_mult[THR_NEARMV ] = 0;
- sf->thresh_mult[THR_V_PRED ] = 1000;
- sf->thresh_mult[THR_H_PRED ] = 1000;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_DC ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 0;
+ sf->thresh_mult[THR_V_PRED ] = 1000;
+ sf->thresh_mult[THR_H_PRED ] = 1000;
#if CONFIG_NEWINTRAMODES
- sf->thresh_mult[THR_D45_PRED ] = 1000;
- sf->thresh_mult[THR_D135_PRED] = 1000;
- sf->thresh_mult[THR_D117_PRED] = 1000;
- sf->thresh_mult[THR_D153_PRED] = 1000;
- sf->thresh_mult[THR_D27_PRED ] = 1000;
- sf->thresh_mult[THR_D63_PRED ] = 1000;
+ sf->thresh_mult[THR_D45_PRED ] = 1000;
+ sf->thresh_mult[THR_D135_PRED] = 1000;
+ sf->thresh_mult[THR_D117_PRED] = 1000;
+ sf->thresh_mult[THR_D153_PRED] = 1000;
+ sf->thresh_mult[THR_D27_PRED ] = 1000;
+ sf->thresh_mult[THR_D63_PRED ] = 1000;
#endif
- sf->thresh_mult[THR_B_PRED ] = 2500;
- sf->thresh_mult[THR_I8X8_PRED] = 2500;
- sf->thresh_mult[THR_TM ] = 1000;
-
- sf->thresh_mult[THR_NEARESTG ] = 1000;
- sf->thresh_mult[THR_NEARESTA ] = 1000;
-
- sf->thresh_mult[THR_ZEROG ] = 1000;
- sf->thresh_mult[THR_ZEROA ] = 1000;
- sf->thresh_mult[THR_NEARG ] = 1000;
- sf->thresh_mult[THR_NEARA ] = 1000;
-
- sf->thresh_mult[THR_ZEROMV ] = 0;
- sf->thresh_mult[THR_ZEROG ] = 0;
- sf->thresh_mult[THR_ZEROA ] = 0;
- sf->thresh_mult[THR_NEARESTMV] = 0;
- sf->thresh_mult[THR_NEARESTG ] = 0;
- sf->thresh_mult[THR_NEARESTA ] = 0;
- sf->thresh_mult[THR_NEARMV ] = 0;
- sf->thresh_mult[THR_NEARG ] = 0;
- sf->thresh_mult[THR_NEARA ] = 0;
-
- sf->thresh_mult[THR_NEWMV ] = 1000;
- sf->thresh_mult[THR_NEWG ] = 1000;
- sf->thresh_mult[THR_NEWA ] = 1000;
+ sf->thresh_mult[THR_B_PRED ] = 2500;
+ sf->thresh_mult[THR_I8X8_PRED] = 2500;
+ sf->thresh_mult[THR_TM ] = 1000;
+
+ sf->thresh_mult[THR_NEARESTG ] = 1000;
+ sf->thresh_mult[THR_NEARESTA ] = 1000;
+
+ sf->thresh_mult[THR_ZEROG ] = 1000;
+ sf->thresh_mult[THR_ZEROA ] = 1000;
+ sf->thresh_mult[THR_NEARG ] = 1000;
+ sf->thresh_mult[THR_NEARA ] = 1000;
+
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_ZEROG ] = 0;
+ sf->thresh_mult[THR_ZEROA ] = 0;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 0;
+ sf->thresh_mult[THR_NEARG ] = 0;
+ sf->thresh_mult[THR_NEARA ] = 0;
+
+ sf->thresh_mult[THR_NEWMV ] = 1000;
+ sf->thresh_mult[THR_NEWG ] = 1000;
+ sf->thresh_mult[THR_NEWA ] = 1000;
#endif
- sf->thresh_mult[THR_SPLITMV ] = 1700;
- sf->thresh_mult[THR_SPLITG ] = 4500;
- sf->thresh_mult[THR_SPLITA ] = 4500;
-
- sf->thresh_mult[THR_COMP_ZEROLG ] = 0;
- sf->thresh_mult[THR_COMP_NEARESTLG] = 0;
- sf->thresh_mult[THR_COMP_NEARLG ] = 0;
- sf->thresh_mult[THR_COMP_ZEROLA ] = 0;
- sf->thresh_mult[THR_COMP_NEARESTLA] = 0;
- sf->thresh_mult[THR_COMP_NEARLA ] = 0;
- sf->thresh_mult[THR_COMP_ZEROGA ] = 0;
- sf->thresh_mult[THR_COMP_NEARESTGA] = 0;
- sf->thresh_mult[THR_COMP_NEARGA ] = 0;
-
- sf->thresh_mult[THR_COMP_NEWLG ] = 1000;
- sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
- sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
-
- sf->thresh_mult[THR_COMP_SPLITLA ] = 1700;
- sf->thresh_mult[THR_COMP_SPLITGA ] = 4500;
- sf->thresh_mult[THR_COMP_SPLITLG ] = 4500;
-
- if (Speed > 0)
- {
- /* Disable coefficient optimization above speed 0 */
- sf->optimize_coefficients = 0;
- sf->no_skip_block4x4_search = 0;
+ sf->thresh_mult[THR_SPLITMV ] = 1700;
+ sf->thresh_mult[THR_SPLITG ] = 4500;
+ sf->thresh_mult[THR_SPLITA ] = 4500;
+
+ sf->thresh_mult[THR_COMP_ZEROLG ] = 0;
+ sf->thresh_mult[THR_COMP_NEARESTLG] = 0;
+ sf->thresh_mult[THR_COMP_NEARLG ] = 0;
+ sf->thresh_mult[THR_COMP_ZEROLA ] = 0;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 0;
+ sf->thresh_mult[THR_COMP_NEARLA ] = 0;
+ sf->thresh_mult[THR_COMP_ZEROGA ] = 0;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = 0;
+ sf->thresh_mult[THR_COMP_NEARGA ] = 0;
+
+ sf->thresh_mult[THR_COMP_NEWLG ] = 1000;
+ sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
+ sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
+
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 1700;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 4500;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 4500;
+
+ if (Speed > 0) {
+ /* Disable coefficient optimization above speed 0 */
+ sf->optimize_coefficients = 0;
+ sf->no_skip_block4x4_search = 0;
- sf->first_step = 1;
+ sf->first_step = 1;
- cpi->mode_check_freq[THR_SPLITG] = 2;
- cpi->mode_check_freq[THR_SPLITA] = 2;
- cpi->mode_check_freq[THR_SPLITMV] = 0;
+ cpi->mode_check_freq[THR_SPLITG] = 2;
+ cpi->mode_check_freq[THR_SPLITA] = 2;
+ cpi->mode_check_freq[THR_SPLITMV] = 0;
- cpi->mode_check_freq[THR_COMP_SPLITGA] = 2;
- cpi->mode_check_freq[THR_COMP_SPLITLG] = 2;
- cpi->mode_check_freq[THR_COMP_SPLITLA] = 0;
- }
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 0;
+ }
- if (Speed > 1)
- {
- cpi->mode_check_freq[THR_SPLITG] = 4;
- cpi->mode_check_freq[THR_SPLITA] = 4;
- cpi->mode_check_freq[THR_SPLITMV] = 2;
+ if (Speed > 1) {
+ cpi->mode_check_freq[THR_SPLITG] = 4;
+ cpi->mode_check_freq[THR_SPLITA] = 4;
+ cpi->mode_check_freq[THR_SPLITMV] = 2;
- cpi->mode_check_freq[THR_COMP_SPLITGA] = 4;
- cpi->mode_check_freq[THR_COMP_SPLITLG] = 4;
- cpi->mode_check_freq[THR_COMP_SPLITLA] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 4;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 4;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 2;
- sf->thresh_mult[THR_TM ] = 1500;
- sf->thresh_mult[THR_V_PRED ] = 1500;
- sf->thresh_mult[THR_H_PRED ] = 1500;
+ sf->thresh_mult[THR_TM ] = 1500;
+ sf->thresh_mult[THR_V_PRED ] = 1500;
+ sf->thresh_mult[THR_H_PRED ] = 1500;
#if CONFIG_NEWINTRAMODES
- sf->thresh_mult[THR_D45_PRED ] = 1500;
- sf->thresh_mult[THR_D135_PRED] = 1500;
- sf->thresh_mult[THR_D117_PRED] = 1500;
- sf->thresh_mult[THR_D153_PRED] = 1500;
- sf->thresh_mult[THR_D27_PRED ] = 1500;
- sf->thresh_mult[THR_D63_PRED ] = 1500;
+ sf->thresh_mult[THR_D45_PRED ] = 1500;
+ sf->thresh_mult[THR_D135_PRED] = 1500;
+ sf->thresh_mult[THR_D117_PRED] = 1500;
+ sf->thresh_mult[THR_D153_PRED] = 1500;
+ sf->thresh_mult[THR_D27_PRED ] = 1500;
+ sf->thresh_mult[THR_D63_PRED ] = 1500;
#endif
- sf->thresh_mult[THR_B_PRED ] = 5000;
- sf->thresh_mult[THR_I8X8_PRED] = 5000;
+ sf->thresh_mult[THR_B_PRED ] = 5000;
+ sf->thresh_mult[THR_I8X8_PRED] = 5000;
- if (cpi->ref_frame_flags & VP8_LAST_FLAG)
- {
- sf->thresh_mult[THR_NEWMV ] = 2000;
+ if (cpi->ref_frame_flags & VP8_LAST_FLAG) {
+ sf->thresh_mult[THR_NEWMV ] = 2000;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEWMV_FILT ] = 2000;
+ sf->thresh_mult[THR_NEWMV_FILT ] = 2000;
#endif
- sf->thresh_mult[THR_SPLITMV ] = 10000;
- sf->thresh_mult[THR_COMP_SPLITLG ] = 20000;
- }
+ sf->thresh_mult[THR_SPLITMV ] = 10000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 20000;
+ }
- if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
- {
- sf->thresh_mult[THR_NEARESTG ] = 1500;
- sf->thresh_mult[THR_ZEROG ] = 1500;
- sf->thresh_mult[THR_NEARG ] = 1500;
- sf->thresh_mult[THR_NEWG ] = 2000;
+ if (cpi->ref_frame_flags & VP8_GOLD_FLAG) {
+ sf->thresh_mult[THR_NEARESTG ] = 1500;
+ sf->thresh_mult[THR_ZEROG ] = 1500;
+ sf->thresh_mult[THR_NEARG ] = 1500;
+ sf->thresh_mult[THR_NEWG ] = 2000;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTG_FILT ] = 1500;
- sf->thresh_mult[THR_ZEROG_FILT ] = 1500;
- sf->thresh_mult[THR_NEARG_FILT ] = 1500;
- sf->thresh_mult[THR_NEWG_FILT ] = 2000;
+ sf->thresh_mult[THR_NEARESTG_FILT ] = 1500;
+ sf->thresh_mult[THR_ZEROG_FILT ] = 1500;
+ sf->thresh_mult[THR_NEARG_FILT ] = 1500;
+ sf->thresh_mult[THR_NEWG_FILT ] = 2000;
#endif
- sf->thresh_mult[THR_SPLITG ] = 20000;
- sf->thresh_mult[THR_COMP_SPLITGA ] = 20000;
- }
+ sf->thresh_mult[THR_SPLITG ] = 20000;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 20000;
+ }
- if (cpi->ref_frame_flags & VP8_ALT_FLAG)
- {
- sf->thresh_mult[THR_NEARESTA ] = 1500;
- sf->thresh_mult[THR_ZEROA ] = 1500;
- sf->thresh_mult[THR_NEARA ] = 1500;
- sf->thresh_mult[THR_NEWA ] = 2000;
+ if (cpi->ref_frame_flags & VP8_ALT_FLAG) {
+ sf->thresh_mult[THR_NEARESTA ] = 1500;
+ sf->thresh_mult[THR_ZEROA ] = 1500;
+ sf->thresh_mult[THR_NEARA ] = 1500;
+ sf->thresh_mult[THR_NEWA ] = 2000;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTA_FILT ] = 1500;
- sf->thresh_mult[THR_ZEROA_FILT ] = 1500;
- sf->thresh_mult[THR_NEARA_FILT ] = 1500;
- sf->thresh_mult[THR_NEWA_FILT ] = 2000;
+ sf->thresh_mult[THR_NEARESTA_FILT ] = 1500;
+ sf->thresh_mult[THR_ZEROA_FILT ] = 1500;
+ sf->thresh_mult[THR_NEARA_FILT ] = 1500;
+ sf->thresh_mult[THR_NEWA_FILT ] = 2000;
#endif
- sf->thresh_mult[THR_SPLITA ] = 20000;
- sf->thresh_mult[THR_COMP_SPLITLA ] = 10000;
- }
-
- sf->thresh_mult[THR_COMP_ZEROLG ] = 1500;
- sf->thresh_mult[THR_COMP_NEARESTLG] = 1500;
- sf->thresh_mult[THR_COMP_NEARLG ] = 1500;
- sf->thresh_mult[THR_COMP_ZEROLA ] = 1500;
- sf->thresh_mult[THR_COMP_NEARESTLA] = 1500;
- sf->thresh_mult[THR_COMP_NEARLA ] = 1500;
- sf->thresh_mult[THR_COMP_ZEROGA ] = 1500;
- sf->thresh_mult[THR_COMP_NEARESTGA] = 1500;
- sf->thresh_mult[THR_COMP_NEARGA ] = 1500;
-
- sf->thresh_mult[THR_COMP_NEWLG ] = 2000;
- sf->thresh_mult[THR_COMP_NEWLA ] = 2000;
- sf->thresh_mult[THR_COMP_NEWGA ] = 2000;
+ sf->thresh_mult[THR_SPLITA ] = 20000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 10000;
}
- if (Speed > 2)
- {
- cpi->mode_check_freq[THR_SPLITG] = 15;
- cpi->mode_check_freq[THR_SPLITA] = 15;
- cpi->mode_check_freq[THR_SPLITMV] = 7;
-
- cpi->mode_check_freq[THR_COMP_SPLITGA] = 15;
- cpi->mode_check_freq[THR_COMP_SPLITLG] = 15;
- cpi->mode_check_freq[THR_COMP_SPLITLA] = 7;
-
- sf->thresh_mult[THR_TM ] = 2000;
- sf->thresh_mult[THR_V_PRED ] = 2000;
- sf->thresh_mult[THR_H_PRED ] = 2000;
+ sf->thresh_mult[THR_COMP_ZEROLG ] = 1500;
+ sf->thresh_mult[THR_COMP_NEARESTLG] = 1500;
+ sf->thresh_mult[THR_COMP_NEARLG ] = 1500;
+ sf->thresh_mult[THR_COMP_ZEROLA ] = 1500;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 1500;
+ sf->thresh_mult[THR_COMP_NEARLA ] = 1500;
+ sf->thresh_mult[THR_COMP_ZEROGA ] = 1500;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = 1500;
+ sf->thresh_mult[THR_COMP_NEARGA ] = 1500;
+
+ sf->thresh_mult[THR_COMP_NEWLG ] = 2000;
+ sf->thresh_mult[THR_COMP_NEWLA ] = 2000;
+ sf->thresh_mult[THR_COMP_NEWGA ] = 2000;
+ }
+
+ if (Speed > 2) {
+ cpi->mode_check_freq[THR_SPLITG] = 15;
+ cpi->mode_check_freq[THR_SPLITA] = 15;
+ cpi->mode_check_freq[THR_SPLITMV] = 7;
+
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 15;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 15;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 7;
+
+ sf->thresh_mult[THR_TM ] = 2000;
+ sf->thresh_mult[THR_V_PRED ] = 2000;
+ sf->thresh_mult[THR_H_PRED ] = 2000;
#if CONFIG_NEWINTRAMODES
- sf->thresh_mult[THR_D45_PRED ] = 2000;
- sf->thresh_mult[THR_D135_PRED] = 2000;
- sf->thresh_mult[THR_D117_PRED] = 2000;
- sf->thresh_mult[THR_D153_PRED] = 2000;
- sf->thresh_mult[THR_D27_PRED ] = 2000;
- sf->thresh_mult[THR_D63_PRED ] = 2000;
+ sf->thresh_mult[THR_D45_PRED ] = 2000;
+ sf->thresh_mult[THR_D135_PRED] = 2000;
+ sf->thresh_mult[THR_D117_PRED] = 2000;
+ sf->thresh_mult[THR_D153_PRED] = 2000;
+ sf->thresh_mult[THR_D27_PRED ] = 2000;
+ sf->thresh_mult[THR_D63_PRED ] = 2000;
#endif
- sf->thresh_mult[THR_B_PRED ] = 7500;
- sf->thresh_mult[THR_I8X8_PRED] = 7500;
+ sf->thresh_mult[THR_B_PRED ] = 7500;
+ sf->thresh_mult[THR_I8X8_PRED] = 7500;
- if (cpi->ref_frame_flags & VP8_LAST_FLAG)
- {
- sf->thresh_mult[THR_NEWMV ] = 2000;
+ if (cpi->ref_frame_flags & VP8_LAST_FLAG) {
+ sf->thresh_mult[THR_NEWMV ] = 2000;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEWMV_FILT ] = 2000;
+ sf->thresh_mult[THR_NEWMV_FILT ] = 2000;
#endif
- sf->thresh_mult[THR_SPLITMV ] = 25000;
- sf->thresh_mult[THR_COMP_SPLITLG ] = 50000;
- }
+ sf->thresh_mult[THR_SPLITMV ] = 25000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 50000;
+ }
- if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
- {
- sf->thresh_mult[THR_NEARESTG ] = 2000;
- sf->thresh_mult[THR_ZEROG ] = 2000;
- sf->thresh_mult[THR_NEARG ] = 2000;
- sf->thresh_mult[THR_NEWG ] = 2500;
+ if (cpi->ref_frame_flags & VP8_GOLD_FLAG) {
+ sf->thresh_mult[THR_NEARESTG ] = 2000;
+ sf->thresh_mult[THR_ZEROG ] = 2000;
+ sf->thresh_mult[THR_NEARG ] = 2000;
+ sf->thresh_mult[THR_NEWG ] = 2500;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTG_FILT ] = 2000;
- sf->thresh_mult[THR_ZEROG_FILT ] = 2000;
- sf->thresh_mult[THR_NEARG_FILT ] = 2000;
- sf->thresh_mult[THR_NEWG_FILT ] = 2500;
+ sf->thresh_mult[THR_NEARESTG_FILT ] = 2000;
+ sf->thresh_mult[THR_ZEROG_FILT ] = 2000;
+ sf->thresh_mult[THR_NEARG_FILT ] = 2000;
+ sf->thresh_mult[THR_NEWG_FILT ] = 2500;
#endif
- sf->thresh_mult[THR_SPLITG ] = 50000;
- sf->thresh_mult[THR_COMP_SPLITGA ] = 50000;
- }
+ sf->thresh_mult[THR_SPLITG ] = 50000;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 50000;
+ }
- if (cpi->ref_frame_flags & VP8_ALT_FLAG)
- {
- sf->thresh_mult[THR_NEARESTA ] = 2000;
- sf->thresh_mult[THR_ZEROA ] = 2000;
- sf->thresh_mult[THR_NEARA ] = 2000;
- sf->thresh_mult[THR_NEWA ] = 2500;
+ if (cpi->ref_frame_flags & VP8_ALT_FLAG) {
+ sf->thresh_mult[THR_NEARESTA ] = 2000;
+ sf->thresh_mult[THR_ZEROA ] = 2000;
+ sf->thresh_mult[THR_NEARA ] = 2000;
+ sf->thresh_mult[THR_NEWA ] = 2500;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTA_FILT ] = 2000;
- sf->thresh_mult[THR_ZEROA_FILT ] = 2000;
- sf->thresh_mult[THR_NEARA_FILT ] = 2000;
- sf->thresh_mult[THR_NEWA_FILT ] = 2500;
+ sf->thresh_mult[THR_NEARESTA_FILT ] = 2000;
+ sf->thresh_mult[THR_ZEROA_FILT ] = 2000;
+ sf->thresh_mult[THR_NEARA_FILT ] = 2000;
+ sf->thresh_mult[THR_NEWA_FILT ] = 2500;
#endif
- sf->thresh_mult[THR_SPLITA ] = 50000;
- sf->thresh_mult[THR_COMP_SPLITLA ] = 25000;
- }
+ sf->thresh_mult[THR_SPLITA ] = 50000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 25000;
+ }
- sf->thresh_mult[THR_COMP_ZEROLG ] = 2000;
- sf->thresh_mult[THR_COMP_NEARESTLG] = 2000;
- sf->thresh_mult[THR_COMP_NEARLG ] = 2000;
- sf->thresh_mult[THR_COMP_ZEROLA ] = 2000;
- sf->thresh_mult[THR_COMP_NEARESTLA] = 2000;
- sf->thresh_mult[THR_COMP_NEARLA ] = 2000;
- sf->thresh_mult[THR_COMP_ZEROGA ] = 2000;
- sf->thresh_mult[THR_COMP_NEARESTGA] = 2000;
- sf->thresh_mult[THR_COMP_NEARGA ] = 2000;
+ sf->thresh_mult[THR_COMP_ZEROLG ] = 2000;
+ sf->thresh_mult[THR_COMP_NEARESTLG] = 2000;
+ sf->thresh_mult[THR_COMP_NEARLG ] = 2000;
+ sf->thresh_mult[THR_COMP_ZEROLA ] = 2000;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 2000;
+ sf->thresh_mult[THR_COMP_NEARLA ] = 2000;
+ sf->thresh_mult[THR_COMP_ZEROGA ] = 2000;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = 2000;
+ sf->thresh_mult[THR_COMP_NEARGA ] = 2000;
- sf->thresh_mult[THR_COMP_NEWLG ] = 2500;
- sf->thresh_mult[THR_COMP_NEWLA ] = 2500;
- sf->thresh_mult[THR_COMP_NEWGA ] = 2500;
+ sf->thresh_mult[THR_COMP_NEWLG ] = 2500;
+ sf->thresh_mult[THR_COMP_NEWLA ] = 2500;
+ sf->thresh_mult[THR_COMP_NEWGA ] = 2500;
- sf->improved_dct = 0;
+ sf->improved_dct = 0;
- // Only do recode loop on key frames, golden frames and
- // alt ref frames
- sf->recode_loop = 2;
+ // Only do recode loop on key frames, golden frames and
+ // alt ref frames
+ sf->recode_loop = 2;
- }
+ }
- break;
+ break;
- }; /* switch */
+ }; /* switch */
- /* disable frame modes if flags not set */
- if (!(cpi->ref_frame_flags & VP8_LAST_FLAG))
- {
- sf->thresh_mult[THR_NEWMV ] = INT_MAX;
- sf->thresh_mult[THR_NEARESTMV] = INT_MAX;
- sf->thresh_mult[THR_ZEROMV ] = INT_MAX;
- sf->thresh_mult[THR_NEARMV ] = INT_MAX;
+ /* disable frame modes if flags not set */
+ if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) {
+ sf->thresh_mult[THR_NEWMV ] = INT_MAX;
+ sf->thresh_mult[THR_NEARESTMV] = INT_MAX;
+ sf->thresh_mult[THR_ZEROMV ] = INT_MAX;
+ sf->thresh_mult[THR_NEARMV ] = INT_MAX;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEWMV_FILT ] = INT_MAX;
- sf->thresh_mult[THR_NEARESTMV_FILT] = INT_MAX;
- sf->thresh_mult[THR_ZEROMV_FILT ] = INT_MAX;
- sf->thresh_mult[THR_NEARMV_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEWMV_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEARESTMV_FILT] = INT_MAX;
+ sf->thresh_mult[THR_ZEROMV_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEARMV_FILT ] = INT_MAX;
#endif
- sf->thresh_mult[THR_SPLITMV ] = INT_MAX;
- }
-
- if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG))
- {
- sf->thresh_mult[THR_NEARESTG ] = INT_MAX;
- sf->thresh_mult[THR_ZEROG ] = INT_MAX;
- sf->thresh_mult[THR_NEARG ] = INT_MAX;
- sf->thresh_mult[THR_NEWG ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITMV ] = INT_MAX;
+ }
+
+ if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) {
+ sf->thresh_mult[THR_NEARESTG ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROG ] = INT_MAX;
+ sf->thresh_mult[THR_NEARG ] = INT_MAX;
+ sf->thresh_mult[THR_NEWG ] = INT_MAX;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTG_FILT ] = INT_MAX;
- sf->thresh_mult[THR_ZEROG_FILT ] = INT_MAX;
- sf->thresh_mult[THR_NEARG_FILT ] = INT_MAX;
- sf->thresh_mult[THR_NEWG_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEARESTG_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROG_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEARG_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEWG_FILT ] = INT_MAX;
#endif
- sf->thresh_mult[THR_SPLITG ] = INT_MAX;
- }
-
- if (!(cpi->ref_frame_flags & VP8_ALT_FLAG))
- {
- sf->thresh_mult[THR_NEARESTA ] = INT_MAX;
- sf->thresh_mult[THR_ZEROA ] = INT_MAX;
- sf->thresh_mult[THR_NEARA ] = INT_MAX;
- sf->thresh_mult[THR_NEWA ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITG ] = INT_MAX;
+ }
+
+ if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) {
+ sf->thresh_mult[THR_NEARESTA ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROA ] = INT_MAX;
+ sf->thresh_mult[THR_NEARA ] = INT_MAX;
+ sf->thresh_mult[THR_NEWA ] = INT_MAX;
#if CONFIG_PRED_FILTER
- sf->thresh_mult[THR_NEARESTA_FILT ] = INT_MAX;
- sf->thresh_mult[THR_ZEROA_FILT ] = INT_MAX;
- sf->thresh_mult[THR_NEARA_FILT ] = INT_MAX;
- sf->thresh_mult[THR_NEWA_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEARESTA_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROA_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEARA_FILT ] = INT_MAX;
+ sf->thresh_mult[THR_NEWA_FILT ] = INT_MAX;
#endif
- sf->thresh_mult[THR_SPLITA ] = INT_MAX;
- }
-
- if ((cpi->ref_frame_flags & (VP8_LAST_FLAG | VP8_GOLD_FLAG)) != (VP8_LAST_FLAG | VP8_GOLD_FLAG))
- {
- sf->thresh_mult[THR_COMP_ZEROLG ] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEARESTLG] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEARLG ] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEWLG ] = INT_MAX;
- sf->thresh_mult[THR_COMP_SPLITLG ] = INT_MAX;
- }
-
- if ((cpi->ref_frame_flags & (VP8_LAST_FLAG | VP8_ALT_FLAG)) != (VP8_LAST_FLAG | VP8_ALT_FLAG))
- {
- sf->thresh_mult[THR_COMP_ZEROLA ] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEARESTLA] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEARLA ] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEWLA ] = INT_MAX;
- sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
- }
-
- if ((cpi->ref_frame_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) != (VP8_GOLD_FLAG | VP8_ALT_FLAG))
- {
- sf->thresh_mult[THR_COMP_ZEROGA ] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEARESTGA] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEARGA ] = INT_MAX;
- sf->thresh_mult[THR_COMP_NEWGA ] = INT_MAX;
- sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
- }
-
- // Slow quant, dct and trellis not worthwhile for first pass
- // so make sure they are always turned off.
- if ( cpi->pass == 1 )
- {
- sf->optimize_coefficients = 0;
- sf->improved_dct = 0;
- }
+ sf->thresh_mult[THR_SPLITA ] = INT_MAX;
+ }
+
+ if ((cpi->ref_frame_flags & (VP8_LAST_FLAG | VP8_GOLD_FLAG)) != (VP8_LAST_FLAG | VP8_GOLD_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROLG ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTLG] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARLG ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWLG ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = INT_MAX;
+ }
+
+ if ((cpi->ref_frame_flags & (VP8_LAST_FLAG | VP8_ALT_FLAG)) != (VP8_LAST_FLAG | VP8_ALT_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
+ }
+
+ if ((cpi->ref_frame_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) != (VP8_GOLD_FLAG | VP8_ALT_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
+ }
+
+ // Slow quant, dct and trellis not worthwhile for first pass
+ // so make sure they are always turned off.
+ if (cpi->pass == 1) {
+ sf->optimize_coefficients = 0;
+ sf->improved_dct = 0;
+ }
- if (cpi->sf.search_method == NSTEP)
- {
- vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
- }
- else if (cpi->sf.search_method == DIAMOND)
- {
- vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
- }
+ if (cpi->sf.search_method == NSTEP) {
+ vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
+ } else if (cpi->sf.search_method == DIAMOND) {
+ vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
+ }
- if (cpi->sf.improved_dct)
- {
- cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
- cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
- cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
- }
- else
- {
- cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
- cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
- cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
- }
+ if (cpi->sf.improved_dct) {
+ cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
+ cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
+ cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
+ } else {
+ cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
+ cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
+ cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
+ }
- cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
- cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
+ cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
+ cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
- cpi->mb.quantize_b = vp8_regular_quantize_b;
- cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
- cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
- cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
+ cpi->mb.quantize_b = vp8_regular_quantize_b;
+ cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
+ cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
+ cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
- vp8cx_init_quantizer(cpi);
+ vp8cx_init_quantizer(cpi);
#if CONFIG_RUNTIME_CPU_DETECT
- cpi->mb.e_mbd.rtcd = &cpi->common.rtcd;
+ cpi->mb.e_mbd.rtcd = &cpi->common.rtcd;
#endif
- if (cpi->sf.iterative_sub_pixel == 1)
- {
- cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
- }
- else if (cpi->sf.quarter_pixel_search)
- {
- cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
- }
- else if (cpi->sf.half_pixel_search)
- {
- cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
- }
+ if (cpi->sf.iterative_sub_pixel == 1) {
+ cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
+ } else if (cpi->sf.quarter_pixel_search) {
+ cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
+ } else if (cpi->sf.half_pixel_search) {
+ cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
+ }
- if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
- cpi->mb.optimize = 1;
- else
- cpi->mb.optimize = 0;
+ if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1)
+ cpi->mb.optimize = 1;
+ else
+ cpi->mb.optimize = 0;
#ifdef SPEEDSTATS
- frames_at_speed[cpi->Speed]++;
+ frames_at_speed[cpi->Speed]++;
#endif
}
-static void alloc_raw_frame_buffers(VP8_COMP *cpi)
-{
- int width = (cpi->oxcf.Width + 15) & ~15;
- int height = (cpi->oxcf.Height + 15) & ~15;
+static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
+ int width = (cpi->oxcf.Width + 15) & ~15;
+ int height = (cpi->oxcf.Height + 15) & ~15;
- cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
- cpi->oxcf.lag_in_frames);
- if(!cpi->lookahead)
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate lag buffers");
+ cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
+ cpi->oxcf.lag_in_frames);
+ if (!cpi->lookahead)
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate lag buffers");
#if VP8_TEMPORAL_ALT_REF
- if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer,
- width, height, VP8BORDERINPIXELS))
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate altref buffer");
+ if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer,
+ width, height, VP8BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate altref buffer");
#endif
}
-static int vp8_alloc_partition_data(VP8_COMP *cpi)
-{
- vpx_free(cpi->mb.pip);
+static int vp8_alloc_partition_data(VP8_COMP *cpi) {
+ vpx_free(cpi->mb.pip);
- cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) *
- (cpi->common.mb_rows + 1),
- sizeof(PARTITION_INFO));
- if(!cpi->mb.pip)
- return 1;
+ cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) *
+ (cpi->common.mb_rows + 1),
+ sizeof(PARTITION_INFO));
+ if (!cpi->mb.pip)
+ return 1;
- cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
+ cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
- return 0;
+ return 0;
}
-void vp8_alloc_compressor_data(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = & cpi->common;
+void vp8_alloc_compressor_data(VP8_COMP *cpi) {
+ VP8_COMMON *cm = & cpi->common;
- int width = cm->Width;
- int height = cm->Height;
+ int width = cm->Width;
+ int height = cm->Height;
- if (vp8_alloc_frame_buffers(cm, width, height))
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate frame buffers");
+ if (vp8_alloc_frame_buffers(cm, width, height))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
- if (vp8_alloc_partition_data(cpi))
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate partition data");
+ if (vp8_alloc_partition_data(cpi))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate partition data");
- if ((width & 0xf) != 0)
- width += 16 - (width & 0xf);
+ if ((width & 0xf) != 0)
+ width += 16 - (width & 0xf);
- if ((height & 0xf) != 0)
- height += 16 - (height & 0xf);
+ if ((height & 0xf) != 0)
+ height += 16 - (height & 0xf);
- if (vp8_yv12_alloc_frame_buffer(&cpi->last_frame_uf,
- width, height, VP8BORDERINPIXELS))
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate last frame buffer");
+ if (vp8_yv12_alloc_frame_buffer(&cpi->last_frame_uf,
+ width, height, VP8BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate last frame buffer");
- if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source,
- width, height, VP8BORDERINPIXELS))
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate scaled source buffer");
+ if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source,
+ width, height, VP8BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate scaled source buffer");
- vpx_free(cpi->tok);
+ vpx_free(cpi->tok);
- {
- unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
+ {
+ unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
- CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
- }
+ CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
+ }
- // Data used for real time vc mode to see if gf needs refreshing
- cpi->inter_zz_count = 0;
- cpi->gf_bad_count = 0;
- cpi->gf_update_recommended = 0;
+ // Data used for real time vc mode to see if gf needs refreshing
+ cpi->inter_zz_count = 0;
+ cpi->gf_bad_count = 0;
+ cpi->gf_update_recommended = 0;
- // Structures used to minitor GF usage
- vpx_free(cpi->gf_active_flags);
- CHECK_MEM_ERROR(cpi->gf_active_flags,
- vpx_calloc(1, cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
+ // Structures used to minitor GF usage
+ vpx_free(cpi->gf_active_flags);
+ CHECK_MEM_ERROR(cpi->gf_active_flags,
+ vpx_calloc(1, cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
- vpx_free(cpi->mb_activity_map);
- CHECK_MEM_ERROR(cpi->mb_activity_map,
- vpx_calloc(sizeof(unsigned int),
- cm->mb_rows * cm->mb_cols));
+ vpx_free(cpi->mb_activity_map);
+ CHECK_MEM_ERROR(cpi->mb_activity_map,
+ vpx_calloc(sizeof(unsigned int),
+ cm->mb_rows * cm->mb_cols));
- vpx_free(cpi->mb_norm_activity_map);
- CHECK_MEM_ERROR(cpi->mb_norm_activity_map,
- vpx_calloc(sizeof(unsigned int),
- cm->mb_rows * cm->mb_cols));
+ vpx_free(cpi->mb_norm_activity_map);
+ CHECK_MEM_ERROR(cpi->mb_norm_activity_map,
+ vpx_calloc(sizeof(unsigned int),
+ cm->mb_rows * cm->mb_cols));
- vpx_free(cpi->twopass.total_stats);
+ vpx_free(cpi->twopass.total_stats);
- cpi->twopass.total_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
+ cpi->twopass.total_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
- vpx_free(cpi->twopass.total_left_stats);
- cpi->twopass.total_left_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
+ vpx_free(cpi->twopass.total_left_stats);
+ cpi->twopass.total_left_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
- vpx_free(cpi->twopass.this_frame_stats);
+ vpx_free(cpi->twopass.this_frame_stats);
- cpi->twopass.this_frame_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
+ cpi->twopass.this_frame_stats = vpx_calloc(1, sizeof(FIRSTPASS_STATS));
- if( !cpi->twopass.total_stats ||
- !cpi->twopass.total_left_stats ||
- !cpi->twopass.this_frame_stats)
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate firstpass stats");
+ if (!cpi->twopass.total_stats ||
+ !cpi->twopass.total_left_stats ||
+ !cpi->twopass.this_frame_stats)
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate firstpass stats");
- vpx_free(cpi->tplist);
+ vpx_free(cpi->tplist);
- CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cpi->common.mb_rows));
+ CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cpi->common.mb_rows));
}
@@ -1422,968 +1353,917 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi)
//
// Table that converts 0-63 Q range values passed in outside to the Qindex
// range used internally.
-static const int q_trans[] =
-{
- 0, 4, 8, 12, 16, 20, 24, 28,
- 32, 36, 40, 44, 48, 52, 56, 60,
- 64, 68, 72, 76, 80, 84, 88, 92,
- 96, 100, 104, 108, 112, 116, 120, 124,
- 128, 132, 136, 140, 144, 148, 152, 156,
- 160, 164, 168, 172, 176, 180, 184, 188,
- 192, 196, 200, 204, 208, 212, 216, 220,
- 224, 228, 232, 236, 240, 244, 249, 255,
+static const int q_trans[] = {
+ 0, 4, 8, 12, 16, 20, 24, 28,
+ 32, 36, 40, 44, 48, 52, 56, 60,
+ 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124,
+ 128, 132, 136, 140, 144, 148, 152, 156,
+ 160, 164, 168, 172, 176, 180, 184, 188,
+ 192, 196, 200, 204, 208, 212, 216, 220,
+ 224, 228, 232, 236, 240, 244, 249, 255,
};
-int vp8_reverse_trans(int x)
-{
- int i;
+int vp8_reverse_trans(int x) {
+ int i;
- for (i = 0; i < 64; i++)
- if (q_trans[i] >= x)
- return i;
+ for (i = 0; i < 64; i++)
+ if (q_trans[i] >= x)
+ return i;
- return 63;
+ return 63;
};
-void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
-{
- if(framerate < .1)
- framerate = 30;
+void vp8_new_frame_rate(VP8_COMP *cpi, double framerate) {
+ if (framerate < .1)
+ framerate = 30;
- cpi->oxcf.frame_rate = framerate;
- cpi->output_frame_rate = cpi->oxcf.frame_rate;
- cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate);
- cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate);
- cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
+ cpi->oxcf.frame_rate = framerate;
+ cpi->output_frame_rate = cpi->oxcf.frame_rate;
+ cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate);
+ cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate);
+ cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
- if (cpi->min_frame_bandwidth < FRAME_OVERHEAD_BITS )
- cpi->min_frame_bandwidth = FRAME_OVERHEAD_BITS;
+ if (cpi->min_frame_bandwidth < FRAME_OVERHEAD_BITS)
+ cpi->min_frame_bandwidth = FRAME_OVERHEAD_BITS;
- // Set Maximum gf/arf interval
- cpi->max_gf_interval = ((int)(cpi->output_frame_rate / 2.0) + 2);
+ // Set Maximum gf/arf interval
+ cpi->max_gf_interval = ((int)(cpi->output_frame_rate / 2.0) + 2);
- if(cpi->max_gf_interval < 12)
- cpi->max_gf_interval = 12;
+ if (cpi->max_gf_interval < 12)
+ cpi->max_gf_interval = 12;
- // Extended interval for genuinely static scenes
- cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
+ // Extended interval for genuinely static scenes
+ cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
- // Special conditions when altr ref frame enabled in lagged compress mode
- if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
- {
- if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
- cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ // Special conditions when altr ref frame enabled in lagged compress mode
+ if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
+ if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
- if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
- cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
- }
+ if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ }
- if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval )
- cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
+ if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval)
+ cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
}
static int
-rescale(int val, int num, int denom)
-{
- int64_t llnum = num;
- int64_t llden = denom;
- int64_t llval = val;
+rescale(int val, int num, int denom) {
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
- return llval * llnum / llden;
+ return llval * llnum / llden;
}
-static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
- VP8_COMMON *cm = &cpi->common;
+static void init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
+ VP8_COMMON *cm = &cpi->common;
- cpi->oxcf = *oxcf;
+ cpi->oxcf = *oxcf;
- cpi->goldfreq = 7;
+ cpi->goldfreq = 7;
- cm->version = oxcf->Version;
- vp8_setup_version(cm);
+ cm->version = oxcf->Version;
+ vp8_setup_version(cm);
- // change includes all joint functionality
- vp8_change_config(ptr, oxcf);
+ // change includes all joint functionality
+ vp8_change_config(ptr, oxcf);
- // Initialize active best and worst q and average q values.
- cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
- cpi->active_best_quality = cpi->oxcf.best_allowed_q;
- cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+ // Initialize active best and worst q and average q values.
+ cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->active_best_quality = cpi->oxcf.best_allowed_q;
+ cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
- // Initialise the starting buffer levels
- cpi->buffer_level = cpi->oxcf.starting_buffer_level;
- cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
+ // Initialise the starting buffer levels
+ cpi->buffer_level = cpi->oxcf.starting_buffer_level;
+ cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
- cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
- cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
- cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
- cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
+ cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
- cpi->total_actual_bits = 0;
- cpi->total_target_vs_actual = 0;
+ cpi->total_actual_bits = 0;
+ cpi->total_target_vs_actual = 0;
- cpi->static_mb_pct = 0;
+ cpi->static_mb_pct = 0;
#if VP8_TEMPORAL_ALT_REF
- {
- int i;
+ {
+ int i;
- cpi->fixed_divide[0] = 0;
+ cpi->fixed_divide[0] = 0;
- for (i = 1; i < 512; i++)
- cpi->fixed_divide[i] = 0x80000 / i;
- }
+ for (i = 1; i < 512; i++)
+ cpi->fixed_divide[i] = 0x80000 / i;
+ }
#endif
}
-void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
- VP8_COMMON *cm = &cpi->common;
+void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
+ VP8_COMMON *cm = &cpi->common;
- if (!cpi)
- return;
+ if (!cpi)
+ return;
- if (!oxcf)
- return;
+ if (!oxcf)
+ return;
- if (cm->version != oxcf->Version)
- {
- cm->version = oxcf->Version;
- vp8_setup_version(cm);
- }
+ if (cm->version != oxcf->Version) {
+ cm->version = oxcf->Version;
+ vp8_setup_version(cm);
+ }
- cpi->oxcf = *oxcf;
+ cpi->oxcf = *oxcf;
- switch (cpi->oxcf.Mode)
- {
- // Real time and one pass deprecated in test code base
+ switch (cpi->oxcf.Mode) {
+ // Real time and one pass deprecated in test code base
case MODE_FIRSTPASS:
- cpi->pass = 1;
- cpi->compressor_speed = 1;
- break;
+ cpi->pass = 1;
+ cpi->compressor_speed = 1;
+ break;
case MODE_SECONDPASS:
- cpi->pass = 2;
- cpi->compressor_speed = 1;
+ cpi->pass = 2;
+ cpi->compressor_speed = 1;
- if (cpi->oxcf.cpu_used < -5)
- {
- cpi->oxcf.cpu_used = -5;
- }
+ if (cpi->oxcf.cpu_used < -5) {
+ cpi->oxcf.cpu_used = -5;
+ }
- if (cpi->oxcf.cpu_used > 5)
- cpi->oxcf.cpu_used = 5;
+ if (cpi->oxcf.cpu_used > 5)
+ cpi->oxcf.cpu_used = 5;
- break;
+ break;
case MODE_SECONDPASS_BEST:
- cpi->pass = 2;
- cpi->compressor_speed = 0;
- break;
- }
+ cpi->pass = 2;
+ cpi->compressor_speed = 0;
+ break;
+ }
- cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
- cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
- cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
+ cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
+ cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
+ cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
#if CONFIG_LOSSLESS
- cpi->oxcf.lossless = oxcf->lossless;
- if(cpi->oxcf.lossless)
- {
- cpi->rtcd.fdct.short4x4 = vp8_short_walsh4x4_x8_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_walsh4x4_x8_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_walsh8x4_x8_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_walsh8x4_x8_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_lossless_c;
- cpi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
- cpi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
- cpi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
- cpi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
- cpi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_lossless_c;
- }
+ cpi->oxcf.lossless = oxcf->lossless;
+ if (cpi->oxcf.lossless) {
+ cpi->rtcd.fdct.short4x4 = vp8_short_walsh4x4_x8_c;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_walsh4x4_x8_c;
+ cpi->rtcd.fdct.short8x4 = vp8_short_walsh8x4_x8_c;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_walsh8x4_x8_c;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_lossless_c;
+ cpi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
+ cpi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
+ cpi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
+ cpi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
+ cpi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_lossless_c;
+ }
#endif
- cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
- cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG;
+ cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG;
- //cpi->use_golden_frame_only = 0;
- //cpi->use_last_frame_only = 0;
- cm->refresh_golden_frame = 0;
- cm->refresh_last_frame = 1;
- cm->refresh_entropy_probs = 1;
+ // cpi->use_golden_frame_only = 0;
+ // cpi->use_last_frame_only = 0;
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ cm->refresh_entropy_probs = 1;
- setup_features(cpi);
+ setup_features(cpi);
#if CONFIG_HIGH_PRECISION_MV
- cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
+ cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
#endif
- {
- int i;
-
- for (i = 0; i < MAX_MB_SEGMENTS; i++)
- cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
- }
-
- // At the moment the first order values may not be > MAXQ
- if (cpi->oxcf.fixed_q > MAXQ)
- cpi->oxcf.fixed_q = MAXQ;
-
- // local file playback mode == really big buffer
- if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK)
- {
- cpi->oxcf.starting_buffer_level = 60000;
- cpi->oxcf.optimal_buffer_level = 60000;
- cpi->oxcf.maximum_buffer_size = 240000;
- }
-
- // Convert target bandwidth from Kbit/s to Bit/s
- cpi->oxcf.target_bandwidth *= 1000;
-
- cpi->oxcf.starting_buffer_level =
- rescale(cpi->oxcf.starting_buffer_level,
- cpi->oxcf.target_bandwidth, 1000);
-
- // Set or reset optimal and maximum buffer levels.
- if (cpi->oxcf.optimal_buffer_level == 0)
- cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
- else
- cpi->oxcf.optimal_buffer_level =
- rescale(cpi->oxcf.optimal_buffer_level,
- cpi->oxcf.target_bandwidth, 1000);
-
- if (cpi->oxcf.maximum_buffer_size == 0)
- cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
- else
- cpi->oxcf.maximum_buffer_size =
- rescale(cpi->oxcf.maximum_buffer_size,
- cpi->oxcf.target_bandwidth, 1000);
-
- // Set up frame rate and related parameters rate control values.
- vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate);
-
- // Set absolute upper and lower quality limits
- cpi->worst_quality = cpi->oxcf.worst_allowed_q;
- cpi->best_quality = cpi->oxcf.best_allowed_q;
-
- // active values should only be modified if out of new range
- if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q)
- {
- cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
- }
- // less likely
- else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q)
- {
- cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
- }
- if (cpi->active_best_quality < cpi->oxcf.best_allowed_q)
- {
- cpi->active_best_quality = cpi->oxcf.best_allowed_q;
- }
- // less likely
- else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q)
- {
- cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
- }
-
- cpi->buffered_mode = (cpi->oxcf.optimal_buffer_level > 0) ? TRUE : FALSE;
-
- cpi->cq_target_quality = cpi->oxcf.cq_level;
+ {
+ int i;
- if (!cm->use_bilinear_mc_filter)
+ for (i = 0; i < MAX_MB_SEGMENTS; i++)
+ cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
+ }
+
+ // At the moment the first order values may not be > MAXQ
+ if (cpi->oxcf.fixed_q > MAXQ)
+ cpi->oxcf.fixed_q = MAXQ;
+
+ // local file playback mode == really big buffer
+ if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
+ cpi->oxcf.starting_buffer_level = 60000;
+ cpi->oxcf.optimal_buffer_level = 60000;
+ cpi->oxcf.maximum_buffer_size = 240000;
+ }
+
+ // Convert target bandwidth from Kbit/s to Bit/s
+ cpi->oxcf.target_bandwidth *= 1000;
+
+ cpi->oxcf.starting_buffer_level =
+ rescale(cpi->oxcf.starting_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ // Set or reset optimal and maximum buffer levels.
+ if (cpi->oxcf.optimal_buffer_level == 0)
+ cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
+ else
+ cpi->oxcf.optimal_buffer_level =
+ rescale(cpi->oxcf.optimal_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ if (cpi->oxcf.maximum_buffer_size == 0)
+ cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
+ else
+ cpi->oxcf.maximum_buffer_size =
+ rescale(cpi->oxcf.maximum_buffer_size,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ // Set up frame rate and related parameters rate control values.
+ vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate);
+
+ // Set absolute upper and lower quality limits
+ cpi->worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->best_quality = cpi->oxcf.best_allowed_q;
+
+ // active values should only be modified if out of new range
+ if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
+ cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ }
+ // less likely
+ else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
+ cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
+ }
+ if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
+ cpi->active_best_quality = cpi->oxcf.best_allowed_q;
+ }
+ // less likely
+ else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
+ cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
+ }
+
+ cpi->buffered_mode = (cpi->oxcf.optimal_buffer_level > 0) ? TRUE : FALSE;
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+
+ if (!cm->use_bilinear_mc_filter)
#if CONFIG_ENHANCED_INTERP
- cm->mcomp_filter_type = EIGHTTAP;
+ cm->mcomp_filter_type = EIGHTTAP;
#else
- cm->mcomp_filter_type = SIXTAP;
+ cm->mcomp_filter_type = SIXTAP;
#endif
- else
- cm->mcomp_filter_type = BILINEAR;
+ else
+ cm->mcomp_filter_type = BILINEAR;
- cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
+ cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
- cm->Width = cpi->oxcf.Width ;
- cm->Height = cpi->oxcf.Height ;
+ cm->Width = cpi->oxcf.Width;
+ cm->Height = cpi->oxcf.Height;
- cm->horiz_scale = cpi->horiz_scale;
- cm->vert_scale = cpi->vert_scale ;
+ cm->horiz_scale = cpi->horiz_scale;
+ cm->vert_scale = cpi->vert_scale;
- // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs)
- if (cpi->oxcf.Sharpness > 7)
- cpi->oxcf.Sharpness = 7;
+ // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs)
+ if (cpi->oxcf.Sharpness > 7)
+ cpi->oxcf.Sharpness = 7;
- cm->sharpness_level = cpi->oxcf.Sharpness;
+ cm->sharpness_level = cpi->oxcf.Sharpness;
- if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL)
- {
- int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
- int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
+ if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
+ int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
+ int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
- Scale2Ratio(cm->horiz_scale, &hr, &hs);
- Scale2Ratio(cm->vert_scale, &vr, &vs);
+ Scale2Ratio(cm->horiz_scale, &hr, &hs);
+ Scale2Ratio(cm->vert_scale, &vr, &vs);
- // always go to the next whole number
- cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
- cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
- }
+ // always go to the next whole number
+ cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
+ cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
+ }
- if (((cm->Width + 15) & 0xfffffff0) !=
- cm->yv12_fb[cm->lst_fb_idx].y_width ||
- ((cm->Height + 15) & 0xfffffff0) !=
- cm->yv12_fb[cm->lst_fb_idx].y_height ||
- cm->yv12_fb[cm->lst_fb_idx].y_width == 0)
- {
- alloc_raw_frame_buffers(cpi);
- vp8_alloc_compressor_data(cpi);
- }
+ if (((cm->Width + 15) & 0xfffffff0) !=
+ cm->yv12_fb[cm->lst_fb_idx].y_width ||
+ ((cm->Height + 15) & 0xfffffff0) !=
+ cm->yv12_fb[cm->lst_fb_idx].y_height ||
+ cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
+ alloc_raw_frame_buffers(cpi);
+ vp8_alloc_compressor_data(cpi);
+ }
- if (cpi->oxcf.fixed_q >= 0)
- {
- cpi->last_q[0] = cpi->oxcf.fixed_q;
- cpi->last_q[1] = cpi->oxcf.fixed_q;
- cpi->last_boosted_qindex = cpi->oxcf.fixed_q;
- }
+ if (cpi->oxcf.fixed_q >= 0) {
+ cpi->last_q[0] = cpi->oxcf.fixed_q;
+ cpi->last_q[1] = cpi->oxcf.fixed_q;
+ cpi->last_boosted_qindex = cpi->oxcf.fixed_q;
+ }
- cpi->Speed = cpi->oxcf.cpu_used;
+ cpi->Speed = cpi->oxcf.cpu_used;
- // force to allowlag to 0 if lag_in_frames is 0;
- if (cpi->oxcf.lag_in_frames == 0)
- {
- cpi->oxcf.allow_lag = 0;
- }
- // Limit on lag buffers as these are not currently dynamically allocated
- else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
- cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
+ // force to allowlag to 0 if lag_in_frames is 0;
+ if (cpi->oxcf.lag_in_frames == 0) {
+ cpi->oxcf.allow_lag = 0;
+ }
+ // Limit on lag buffers as these are not currently dynamically allocated
+ else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
+ cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
- // YX Temp
- cpi->alt_ref_source = NULL;
- cpi->is_src_frame_alt_ref = 0;
+ // YX Temp
+ cpi->alt_ref_source = NULL;
+ cpi->is_src_frame_alt_ref = 0;
#if 0
- // Experimental RD Code
- cpi->frame_distortion = 0;
- cpi->last_frame_distortion = 0;
+ // Experimental RD Code
+ cpi->frame_distortion = 0;
+ cpi->last_frame_distortion = 0;
#endif
}
#define M_LOG2_E 0.693147180559945309417
#define log2f(x) (log (x) / (float) M_LOG2_E)
-static void cal_mvsadcosts(int *mvsadcost[2])
-{
- int i = 1;
-
- mvsadcost [0] [0] = 300;
- mvsadcost [1] [0] = 300;
-
- do
- {
- double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
- }
- while (++i <= mvfp_max);
+static void cal_mvsadcosts(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost [0] [0] = 300;
+ mvsadcost [1] [0] = 300;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost [0][i] = (int) z;
+ mvsadcost [1][i] = (int) z;
+ mvsadcost [0][-i] = (int) z;
+ mvsadcost [1][-i] = (int) z;
+ } while (++i <= mvfp_max);
}
#if CONFIG_HIGH_PRECISION_MV
-static void cal_mvsadcosts_hp(int *mvsadcost[2])
-{
- int i = 1;
-
- mvsadcost [0] [0] = 300;
- mvsadcost [1] [0] = 300;
-
- do
- {
- double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
- }
- while (++i <= mvfp_max_hp);
+static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost [0] [0] = 300;
+ mvsadcost [1] [0] = 300;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost [0][i] = (int) z;
+ mvsadcost [1][i] = (int) z;
+ mvsadcost [0][-i] = (int) z;
+ mvsadcost [1][-i] = (int) z;
+ } while (++i <= mvfp_max_hp);
}
#endif
-VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
-{
- int i;
- volatile union
- {
- VP8_COMP *cpi;
- VP8_PTR ptr;
- } ctx;
-
+VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
+ int i;
+ volatile union {
VP8_COMP *cpi;
- VP8_COMMON *cm;
-
- cpi = ctx.cpi = vpx_memalign(32, sizeof(VP8_COMP));
- // Check that the CPI instance is valid
- if (!cpi)
- return 0;
-
- cm = &cpi->common;
-
- vpx_memset(cpi, 0, sizeof(VP8_COMP));
-
- if (setjmp(cm->error.jmp))
- {
- VP8_PTR ptr = ctx.ptr;
-
- ctx.cpi->common.error.setjmp = 0;
- vp8_remove_compressor(&ptr);
- return 0;
- }
-
- cpi->common.error.setjmp = 1;
+ VP8_PTR ptr;
+ } ctx;
- CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
+ VP8_COMP *cpi;
+ VP8_COMMON *cm;
- vp8_create_common(&cpi->common);
- vp8_cmachine_specific_config(cpi);
-
- init_config((VP8_PTR)cpi, oxcf);
-
- memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
- cpi->common.current_video_frame = 0;
- cpi->kf_overspend_bits = 0;
- cpi->kf_bitrate_adjustment = 0;
- cpi->frames_till_gf_update_due = 0;
- cpi->gf_overspend_bits = 0;
- cpi->non_gf_bitrate_adjustment = 0;
- cm->prob_last_coded = 128;
- cm->prob_gf_coded = 128;
- cm->prob_intra_coded = 63;
- for ( i = 0; i < COMP_PRED_CONTEXTS; i++ )
- cm->prob_comppred[i] = 128;
-
- // Prime the recent reference frame useage counters.
- // Hereafter they will be maintained as a sort of moving average
- cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
- cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
- cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
- cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
-
- // Set reference frame sign bias for ALTREF frame to 1 (for now)
- cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
-
- cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
-
- cpi->gold_is_last = 0 ;
- cpi->alt_is_last = 0 ;
- cpi->gold_is_alt = 0 ;
-
- // allocate memory for storing last frame's MVs for MV prediction.
- CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int_mv)));
- CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int)));
- CHECK_MEM_ERROR(cpi->lf_ref_frame, vpx_calloc((cpi->common.mb_rows+2) * (cpi->common.mb_cols+2), sizeof(int)));
-
- // Create the encoder segmentation map and set all entries to 0
- CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+ cpi = ctx.cpi = vpx_memalign(32, sizeof(VP8_COMP));
+ // Check that the CPI instance is valid
+ if (!cpi)
+ return 0;
- // And a copy in common for temporal coding
- CHECK_MEM_ERROR(cm->last_frame_seg_map,
- vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+ cm = &cpi->common;
- // And a place holder structure is the coding context
- // for use if we want to save and restore it
- CHECK_MEM_ERROR(cpi->coding_context.last_frame_seg_map_copy,
- vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+ vpx_memset(cpi, 0, sizeof(VP8_COMP));
- CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
- vpx_memset(cpi->active_map , 1, (cpi->common.mb_rows * cpi->common.mb_cols));
- cpi->active_map_enabled = 0;
+ if (setjmp(cm->error.jmp)) {
+ VP8_PTR ptr = ctx.ptr;
- for (i = 0; i < ( sizeof(cpi->mbgraph_stats) /
- sizeof(cpi->mbgraph_stats[0]) ); i++)
- {
- CHECK_MEM_ERROR(cpi->mbgraph_stats[i].mb_stats,
- vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols *
- sizeof(*cpi->mbgraph_stats[i].mb_stats),
- 1));
- }
+ ctx.cpi->common.error.setjmp = 0;
+ vp8_remove_compressor(&ptr);
+ return 0;
+ }
+
+ cpi->common.error.setjmp = 1;
+
+ CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
+
+ vp8_create_common(&cpi->common);
+ vp8_cmachine_specific_config(cpi);
+
+ init_config((VP8_PTR)cpi, oxcf);
+
+ memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
+ cpi->common.current_video_frame = 0;
+ cpi->kf_overspend_bits = 0;
+ cpi->kf_bitrate_adjustment = 0;
+ cpi->frames_till_gf_update_due = 0;
+ cpi->gf_overspend_bits = 0;
+ cpi->non_gf_bitrate_adjustment = 0;
+ cm->prob_last_coded = 128;
+ cm->prob_gf_coded = 128;
+ cm->prob_intra_coded = 63;
+ for (i = 0; i < COMP_PRED_CONTEXTS; i++)
+ cm->prob_comppred[i] = 128;
+
+ // Prime the recent reference frame useage counters.
+ // Hereafter they will be maintained as a sort of moving average
+ cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
+ cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
+
+ // Set reference frame sign bias for ALTREF frame to 1 (for now)
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
+
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+
+ cpi->gold_is_last = 0;
+ cpi->alt_is_last = 0;
+ cpi->gold_is_alt = 0;
+
+ // allocate memory for storing last frame's MVs for MV prediction.
+ CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cpi->common.mb_rows + 2) * (cpi->common.mb_cols + 2), sizeof(int_mv)));
+ CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias, vpx_calloc((cpi->common.mb_rows + 2) * (cpi->common.mb_cols + 2), sizeof(int)));
+ CHECK_MEM_ERROR(cpi->lf_ref_frame, vpx_calloc((cpi->common.mb_rows + 2) * (cpi->common.mb_cols + 2), sizeof(int)));
+
+ // Create the encoder segmentation map and set all entries to 0
+ CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+
+ // And a copy in common for temporal coding
+ CHECK_MEM_ERROR(cm->last_frame_seg_map,
+ vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+
+ // And a place holder structure is the coding context
+ // for use if we want to save and restore it
+ CHECK_MEM_ERROR(cpi->coding_context.last_frame_seg_map_copy,
+ vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
+
+ CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ vpx_memset(cpi->active_map, 1, (cpi->common.mb_rows * cpi->common.mb_cols));
+ cpi->active_map_enabled = 0;
+
+ for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
+ sizeof(cpi->mbgraph_stats[0])); i++) {
+ CHECK_MEM_ERROR(cpi->mbgraph_stats[i].mb_stats,
+ vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols *
+ sizeof(*cpi->mbgraph_stats[i].mb_stats),
+ 1));
+ }
#ifdef ENTROPY_STATS
- if(cpi->pass != 1)
- init_context_counters();
+ if (cpi->pass != 1)
+ init_context_counters();
#endif
#ifdef MODE_STATS
- vp8_zero(y_modes);
- vp8_zero(i8x8_modes);
- vp8_zero(uv_modes);
- vp8_zero(uv_modes_y);
- vp8_zero(b_modes);
- vp8_zero(inter_y_modes);
- vp8_zero(inter_uv_modes);
- vp8_zero(inter_b_modes);
+ vp8_zero(y_modes);
+ vp8_zero(i8x8_modes);
+ vp8_zero(uv_modes);
+ vp8_zero(uv_modes_y);
+ vp8_zero(b_modes);
+ vp8_zero(inter_y_modes);
+ vp8_zero(inter_uv_modes);
+ vp8_zero(inter_b_modes);
#endif
- /*Initialize the feed-forward activity masking.*/
- cpi->activity_avg = 90<<12;
+ /*Initialize the feed-forward activity masking.*/
+ cpi->activity_avg = 90 << 12;
- cpi->frames_since_key = 8; // Give a sensible default for the first frame.
- cpi->key_frame_frequency = cpi->oxcf.key_freq;
- cpi->this_key_frame_forced = FALSE;
- cpi->next_key_frame_forced = FALSE;
+ cpi->frames_since_key = 8; // Give a sensible default for the first frame.
+ cpi->key_frame_frequency = cpi->oxcf.key_freq;
+ cpi->this_key_frame_forced = FALSE;
+ cpi->next_key_frame_forced = FALSE;
- cpi->source_alt_ref_pending = FALSE;
- cpi->source_alt_ref_active = FALSE;
- cpi->common.refresh_alt_ref_frame = 0;
+ cpi->source_alt_ref_pending = FALSE;
+ cpi->source_alt_ref_active = FALSE;
+ cpi->common.refresh_alt_ref_frame = 0;
- cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+ cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
#if CONFIG_INTERNAL_STATS
- cpi->b_calculate_ssimg = 0;
-
- cpi->count = 0;
- cpi->bytes = 0;
-
- if (cpi->b_calculate_psnr)
- {
- cpi->total_sq_error = 0.0;
- cpi->total_sq_error2 = 0.0;
- cpi->total_y = 0.0;
- cpi->total_u = 0.0;
- cpi->total_v = 0.0;
- cpi->total = 0.0;
- cpi->totalp_y = 0.0;
- cpi->totalp_u = 0.0;
- cpi->totalp_v = 0.0;
- cpi->totalp = 0.0;
- cpi->tot_recode_hits = 0;
- cpi->summed_quality = 0;
- cpi->summed_weights = 0;
- }
-
- if (cpi->b_calculate_ssimg)
- {
- cpi->total_ssimg_y = 0;
- cpi->total_ssimg_u = 0;
- cpi->total_ssimg_v = 0;
- cpi->total_ssimg_all = 0;
- }
+ cpi->b_calculate_ssimg = 0;
+
+ cpi->count = 0;
+ cpi->bytes = 0;
+
+ if (cpi->b_calculate_psnr) {
+ cpi->total_sq_error = 0.0;
+ cpi->total_sq_error2 = 0.0;
+ cpi->total_y = 0.0;
+ cpi->total_u = 0.0;
+ cpi->total_v = 0.0;
+ cpi->total = 0.0;
+ cpi->totalp_y = 0.0;
+ cpi->totalp_u = 0.0;
+ cpi->totalp_v = 0.0;
+ cpi->totalp = 0.0;
+ cpi->tot_recode_hits = 0;
+ cpi->summed_quality = 0;
+ cpi->summed_weights = 0;
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ cpi->total_ssimg_y = 0;
+ cpi->total_ssimg_u = 0;
+ cpi->total_ssimg_v = 0;
+ cpi->total_ssimg_all = 0;
+ }
#endif
#ifndef LLONG_MAX
#define LLONG_MAX 9223372036854775807LL
#endif
- cpi->first_time_stamp_ever = LLONG_MAX;
+ cpi->first_time_stamp_ever = LLONG_MAX;
- cpi->frames_till_gf_update_due = 0;
- cpi->key_frame_count = 1;
+ cpi->frames_till_gf_update_due = 0;
+ cpi->key_frame_count = 1;
- cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
- cpi->ni_tot_qi = 0;
- cpi->ni_frames = 0;
- cpi->tot_q = 0.0;
- cpi->avg_q = vp8_convert_qindex_to_q( cpi->oxcf.worst_allowed_q );
- cpi->total_byte_count = 0;
+ cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
+ cpi->ni_tot_qi = 0;
+ cpi->ni_frames = 0;
+ cpi->tot_q = 0.0;
+ cpi->avg_q = vp8_convert_qindex_to_q(cpi->oxcf.worst_allowed_q);
+ cpi->total_byte_count = 0;
- cpi->rate_correction_factor = 1.0;
- cpi->key_frame_rate_correction_factor = 1.0;
- cpi->gf_rate_correction_factor = 1.0;
- cpi->twopass.est_max_qcorrection_factor = 1.0;
+ cpi->rate_correction_factor = 1.0;
+ cpi->key_frame_rate_correction_factor = 1.0;
+ cpi->gf_rate_correction_factor = 1.0;
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
- cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max+1];
- cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max+1];
- cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mvfp_max+1];
- cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mvfp_max+1];
+ cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max + 1];
+ cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max + 1];
+ cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mvfp_max + 1];
+ cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mvfp_max + 1];
- cal_mvsadcosts(cpi->mb.mvsadcost);
+ cal_mvsadcosts(cpi->mb.mvsadcost);
#if CONFIG_HIGH_PRECISION_MV
- cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp+1];
- cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp+1];
- cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp+1];
- cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp+1];
+ cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
+ cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
+ cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
+ cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
- cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
+ cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
#endif
- for (i = 0; i < KEY_FRAME_CONTEXT; i++)
- {
- cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
- }
+ for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
+ cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
+ }
#ifdef OUTPUT_YUV_SRC
- yuv_file = fopen("bd.yuv", "ab");
+ yuv_file = fopen("bd.yuv", "ab");
#endif
#ifdef OUTPUT_YUV_REC
- yuv_rec_file = fopen("rec.yuv", "wb");
+ yuv_rec_file = fopen("rec.yuv", "wb");
#endif
#if 0
- framepsnr = fopen("framepsnr.stt", "a");
- kf_list = fopen("kf_list.stt", "w");
+ framepsnr = fopen("framepsnr.stt", "a");
+ kf_list = fopen("kf_list.stt", "w");
#endif
- cpi->output_pkt_list = oxcf->output_pkt_list;
+ cpi->output_pkt_list = oxcf->output_pkt_list;
- if (cpi->pass == 1)
- {
- vp8_init_first_pass(cpi);
- }
- else if (cpi->pass == 2)
- {
- size_t packet_sz = sizeof(FIRSTPASS_STATS);
- int packets = oxcf->two_pass_stats_in.sz / packet_sz;
-
- cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
- cpi->twopass.stats_in = cpi->twopass.stats_in_start;
- cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in
- + (packets - 1) * packet_sz);
- vp8_init_second_pass(cpi);
- }
+ if (cpi->pass == 1) {
+ vp8_init_first_pass(cpi);
+ } else if (cpi->pass == 2) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int packets = oxcf->two_pass_stats_in.sz / packet_sz;
- vp8_set_speed_features(cpi);
+ cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
+ cpi->twopass.stats_in = cpi->twopass.stats_in_start;
+ cpi->twopass.stats_in_end = (void *)((char *)cpi->twopass.stats_in
+ + (packets - 1) * packet_sz);
+ vp8_init_second_pass(cpi);
+ }
- // Set starting values of RD threshold multipliers (128 = *1)
- for (i = 0; i < MAX_MODES; i++)
- {
- cpi->rd_thresh_mult[i] = 128;
- }
+ vp8_set_speed_features(cpi);
+
+ // Set starting values of RD threshold multipliers (128 = *1)
+ for (i = 0; i < MAX_MODES; i++) {
+ cpi->rd_thresh_mult[i] = 128;
+ }
#ifdef ENTROPY_STATS
- init_mv_ref_counts();
+ init_mv_ref_counts();
#endif
- cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
- cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
- cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
- cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
- cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
- cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
-
- cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
- cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
- cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
- cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
- cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
-
- cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
- cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
- cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
- cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
- cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
-
- cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
- cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
- cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
- cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
- cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
-
- cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
- cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
- cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
- cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
- cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
+ cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
+ cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
+ cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
+ cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
+ cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
+ cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
+ cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
+ cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
+ cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
+
+ cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
+ cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
+ cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
+ cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
+ cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
+ cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
+ cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
+ cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
+ cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
+
+ cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
+ cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
+ cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
+ cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
+ cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
+ cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
+ cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
+ cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
+ cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
+
+ cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
+ cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
+ cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
+ cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
+ cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
+ cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
+ cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
+ cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
+ cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
+
+ cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
+ cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
+ cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
+ cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
+ cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
+ cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
+ cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
+ cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
+ cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
#if ARCH_X86 || ARCH_X86_64
- cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
#endif
- cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
- cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search);
- cpi->refining_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, refining_search);
+ cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
+ cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search);
+ cpi->refining_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, refining_search);
- // make sure frame 1 is okay
- cpi->error_bins[0] = cpi->common.MBs;
+ // make sure frame 1 is okay
+ cpi->error_bins[0] = cpi->common.MBs;
- //vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later
- //when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame.
- vp8cx_init_quantizer(cpi);
+ // vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later
+ // when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame.
+ vp8cx_init_quantizer(cpi);
- vp8_loop_filter_init(cm);
+ vp8_loop_filter_init(cm);
- cpi->common.error.setjmp = 0;
+ cpi->common.error.setjmp = 0;
- vp8_zero(cpi->y_uv_mode_count)
+ vp8_zero(cpi->y_uv_mode_count)
- return (VP8_PTR) cpi;
+ return (VP8_PTR) cpi;
}
-void vp8_remove_compressor(VP8_PTR *ptr)
-{
- VP8_COMP *cpi = (VP8_COMP *)(*ptr);
- int i;
+void vp8_remove_compressor(VP8_PTR *ptr) {
+ VP8_COMP *cpi = (VP8_COMP *)(*ptr);
+ int i;
- if (!cpi)
- return;
+ if (!cpi)
+ return;
- if (cpi && (cpi->common.current_video_frame > 0))
- {
- if (cpi->pass == 2)
- {
- vp8_end_second_pass(cpi);
- }
+ if (cpi && (cpi->common.current_video_frame > 0)) {
+ if (cpi->pass == 2) {
+ vp8_end_second_pass(cpi);
+ }
#ifdef ENTROPY_STATS
- if(cpi->pass != 1)
- {
- print_context_counters();
- print_tree_update_probs();
- print_mode_context();
+ if (cpi->pass != 1) {
+ print_context_counters();
+ print_tree_update_probs();
+ print_mode_context();
}
#endif
#if CONFIG_INTERNAL_STATS
- vp8_clear_system_state();
+ vp8_clear_system_state();
- //printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
- if (cpi->pass != 1)
- {
- FILE *f = fopen("opsnr.stt", "a");
- double time_encoded = (cpi->last_end_time_stamp_seen
- - cpi->first_time_stamp_ever) / 10000000.000;
- double total_encode_time = (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
- double dr = (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
+ // printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
+ if (cpi->pass != 1) {
+ FILE *f = fopen("opsnr.stt", "a");
+ double time_encoded = (cpi->last_end_time_stamp_seen
+ - cpi->first_time_stamp_ever) / 10000000.000;
+ double total_encode_time = (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
+ double dr = (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
#if defined(MODE_STATS)
- print_mode_contexts(&cpi->common);
+ print_mode_contexts(&cpi->common);
#endif
- if (cpi->b_calculate_psnr)
- {
- YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
- double samples = 3.0 / 2 * cpi->count * lst_yv12->y_width * lst_yv12->y_height;
- double total_psnr = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error);
- double total_psnr2 = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error2);
- double total_ssim = 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
-
- fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\tVPXSSIM\t Time(us)\n");
- fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f\n",
- dr, cpi->total / cpi->count, total_psnr, cpi->totalp / cpi->count, total_psnr2, total_ssim,
- total_encode_time);
+ if (cpi->b_calculate_psnr) {
+ YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
+ double samples = 3.0 / 2 * cpi->count * lst_yv12->y_width * lst_yv12->y_height;
+ double total_psnr = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error);
+ double total_psnr2 = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error2);
+ double total_ssim = 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
+
+ fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\tVPXSSIM\t Time(us)\n");
+ fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f\n",
+ dr, cpi->total / cpi->count, total_psnr, cpi->totalp / cpi->count, total_psnr2, total_ssim,
+ total_encode_time);
// fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f %10ld\n",
// dr, cpi->total / cpi->count, total_psnr, cpi->totalp / cpi->count, total_psnr2, total_ssim,
// total_encode_time, cpi->tot_recode_hits);
- }
+ }
- if (cpi->b_calculate_ssimg)
- {
- fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t Time(us)\n");
- fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
- cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
- cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time);
+ if (cpi->b_calculate_ssimg) {
+ fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t Time(us)\n");
+ fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
+ cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
+ cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time);
// fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f %10ld\n", dr,
// cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
// cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time, cpi->tot_recode_hits);
- }
+ }
- fclose(f);
- }
+ fclose(f);
+ }
#endif
#ifdef MODE_STATS
- {
- extern int count_mb_seg[4];
- char modes_stats_file[250];
- FILE *f;
- double dr = (double)cpi->oxcf.frame_rate * (double)cpi->bytes * (double)8 / (double)cpi->count / (double)1000 ;
- sprintf(modes_stats_file, "modes_q%03d.stt",cpi->common.base_qindex);
- f = fopen(modes_stats_file, "w");
- fprintf(f, "intra_mode in Intra Frames:\n");
- {
- int i;
- fprintf(f, "Y: ");
- for (i=0;i<VP8_YMODES;i++) fprintf(f, " %8d,", y_modes[i]);
- fprintf(f, "\n");
- }
- {
- int i;
- fprintf(f, "I8: ");
- for (i=0;i<VP8_I8X8_MODES;i++) fprintf(f, " %8d,", i8x8_modes[i]);
- fprintf(f, "\n");
- }
- {
- int i;
- fprintf(f, "UV: ");
- for (i=0;i<VP8_UV_MODES;i++) fprintf(f, " %8d,", uv_modes[i]);
- fprintf(f, "\n");
- }
- {
- int i, j;
- fprintf(f, "KeyFrame Y-UV:\n");
- for(i=0;i<VP8_YMODES;i++)
- {
- fprintf(f, "%2d:", i);
- for (j=0; j<VP8_UV_MODES;j++) fprintf(f, "%8d, ",uv_modes_y[i][j]);
- fprintf(f, "\n");
- }
- }
- {
- int i, j;
- fprintf(f, "Inter Y-UV:\n");
- for(i=0;i<VP8_YMODES;i++)
- {
- fprintf(f, "%2d:", i);
- for (j=0; j<VP8_UV_MODES;j++) fprintf(f, "%8d, ",cpi->y_uv_mode_count[i][j]);
- fprintf(f, "\n");
- }
- }
- {
- int i;
+ {
+ extern int count_mb_seg[4];
+ char modes_stats_file[250];
+ FILE *f;
+ double dr = (double)cpi->oxcf.frame_rate * (double)cpi->bytes * (double)8 / (double)cpi->count / (double)1000;
+ sprintf(modes_stats_file, "modes_q%03d.stt", cpi->common.base_qindex);
+ f = fopen(modes_stats_file, "w");
+ fprintf(f, "intra_mode in Intra Frames:\n");
+ {
+ int i;
+ fprintf(f, "Y: ");
+ for (i = 0; i < VP8_YMODES; i++) fprintf(f, " %8d,", y_modes[i]);
+ fprintf(f, "\n");
+ }
+ {
+ int i;
+ fprintf(f, "I8: ");
+ for (i = 0; i < VP8_I8X8_MODES; i++) fprintf(f, " %8d,", i8x8_modes[i]);
+ fprintf(f, "\n");
+ }
+ {
+ int i;
+ fprintf(f, "UV: ");
+ for (i = 0; i < VP8_UV_MODES; i++) fprintf(f, " %8d,", uv_modes[i]);
+ fprintf(f, "\n");
+ }
+ {
+ int i, j;
+ fprintf(f, "KeyFrame Y-UV:\n");
+ for (i = 0; i < VP8_YMODES; i++) {
+ fprintf(f, "%2d:", i);
+ for (j = 0; j < VP8_UV_MODES; j++) fprintf(f, "%8d, ", uv_modes_y[i][j]);
+ fprintf(f, "\n");
+ }
+ }
+ {
+ int i, j;
+ fprintf(f, "Inter Y-UV:\n");
+ for (i = 0; i < VP8_YMODES; i++) {
+ fprintf(f, "%2d:", i);
+ for (j = 0; j < VP8_UV_MODES; j++) fprintf(f, "%8d, ", cpi->y_uv_mode_count[i][j]);
+ fprintf(f, "\n");
+ }
+ }
+ {
+ int i;
- fprintf(f, "B: ");
- for (i = 0; i < VP8_BINTRAMODES; i++)
- fprintf(f, "%8d, ", b_modes[i]);
+ fprintf(f, "B: ");
+ for (i = 0; i < VP8_BINTRAMODES; i++)
+ fprintf(f, "%8d, ", b_modes[i]);
- fprintf(f, "\n");
+ fprintf(f, "\n");
- }
+ }
- fprintf(f, "Modes in Inter Frames:\n");
- {
- int i;
- fprintf(f, "Y: ");
- for (i=0;i<MB_MODE_COUNT;i++) fprintf(f, " %8d,", inter_y_modes[i]);
- fprintf(f, "\n");
- }
- {
- int i;
- fprintf(f, "UV: ");
- for (i=0;i<VP8_UV_MODES;i++) fprintf(f, " %8d,", inter_uv_modes[i]);
- fprintf(f, "\n");
- }
- {
- int i;
- fprintf(f, "B: ");
- for (i = 0; i < B_MODE_COUNT; i++) fprintf(f, "%8d, ", inter_b_modes[i]);
- fprintf(f, "\n");
- }
- fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]);
- fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]);
- fclose(f);
- }
+ fprintf(f, "Modes in Inter Frames:\n");
+ {
+ int i;
+ fprintf(f, "Y: ");
+ for (i = 0; i < MB_MODE_COUNT; i++) fprintf(f, " %8d,", inter_y_modes[i]);
+ fprintf(f, "\n");
+ }
+ {
+ int i;
+ fprintf(f, "UV: ");
+ for (i = 0; i < VP8_UV_MODES; i++) fprintf(f, " %8d,", inter_uv_modes[i]);
+ fprintf(f, "\n");
+ }
+ {
+ int i;
+ fprintf(f, "B: ");
+ for (i = 0; i < B_MODE_COUNT; i++) fprintf(f, "%8d, ", inter_b_modes[i]);
+ fprintf(f, "\n");
+ }
+ fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]);
+ fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]);
+ fclose(f);
+ }
#endif
#ifdef ENTROPY_STATS
- {
- int i, j, k;
- FILE *fmode = fopen("modecontext.c", "w");
+ {
+ int i, j, k;
+ FILE *fmode = fopen("modecontext.c", "w");
- fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
- fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
- fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
+ fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
+ fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
+ fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
- for (i = 0; i < 10; i++)
- {
+ for (i = 0; i < 10; i++) {
- fprintf(fmode, " { //Above Mode : %d\n", i);
+ fprintf(fmode, " { // Above Mode : %d\n", i);
- for (j = 0; j < 10; j++)
- {
+ for (j = 0; j < 10; j++) {
- fprintf(fmode, " {");
+ fprintf(fmode, " {");
- for (k = 0; k < VP8_BINTRAMODES; k++)
- {
- if (!intra_mode_stats[i][j][k])
- fprintf(fmode, " %5d, ", 1);
- else
- fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
- }
+ for (k = 0; k < VP8_BINTRAMODES; k++) {
+ if (!intra_mode_stats[i][j][k])
+ fprintf(fmode, " %5d, ", 1);
+ else
+ fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
+ }
- fprintf(fmode, "}, // left_mode %d\n", j);
+ fprintf(fmode, "}, // left_mode %d\n", j);
- }
+ }
- fprintf(fmode, " },\n");
+ fprintf(fmode, " },\n");
- }
+ }
- fprintf(fmode, "};\n");
- fclose(fmode);
- }
+ fprintf(fmode, "};\n");
+ fclose(fmode);
+ }
#endif
#if defined(SECTIONBITS_OUTPUT)
- if (0)
- {
- int i;
- FILE *f = fopen("tokenbits.stt", "a");
+ if (0) {
+ int i;
+ FILE *f = fopen("tokenbits.stt", "a");
- for (i = 0; i < 28; i++)
- fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
+ for (i = 0; i < 28; i++)
+ fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
- fprintf(f, "\n");
- fclose(f);
- }
+ fprintf(f, "\n");
+ fclose(f);
+ }
#endif
#if 0
- {
- printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
- printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
- printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
- }
+ {
+ printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
+ printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
+ printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
+ }
#endif
- }
+ }
- dealloc_compressor_data(cpi);
- vpx_free(cpi->mb.ss);
- vpx_free(cpi->tok);
+ dealloc_compressor_data(cpi);
+ vpx_free(cpi->mb.ss);
+ vpx_free(cpi->tok);
- for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]); i++)
- {
- vpx_free(cpi->mbgraph_stats[i].mb_stats);
- }
+ for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]); i++) {
+ vpx_free(cpi->mbgraph_stats[i].mb_stats);
+ }
- vp8_remove_common(&cpi->common);
- vpx_free(cpi);
- *ptr = 0;
+ vp8_remove_common(&cpi->common);
+ vpx_free(cpi);
+ *ptr = 0;
#ifdef OUTPUT_YUV_SRC
- fclose(yuv_file);
+ fclose(yuv_file);
#endif
#ifdef OUTPUT_YUV_REC
- fclose(yuv_rec_file);
+ fclose(yuv_rec_file);
#endif
#if 0
- if (keyfile)
- fclose(keyfile);
+ if (keyfile)
+ fclose(keyfile);
- if (framepsnr)
- fclose(framepsnr);
+ if (framepsnr)
+ fclose(framepsnr);
- if (kf_list)
- fclose(kf_list);
+ if (kf_list)
+ fclose(kf_list);
#endif
@@ -2393,2461 +2273,2233 @@ void vp8_remove_compressor(VP8_PTR *ptr)
static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
unsigned char *recon, int recon_stride,
unsigned int cols, unsigned int rows,
- vp8_variance_rtcd_vtable_t *rtcd)
-{
- unsigned int row, col;
- uint64_t total_sse = 0;
- int diff;
-
- for (row = 0; row + 16 <= rows; row += 16)
- {
- for (col = 0; col + 16 <= cols; col += 16)
- {
- unsigned int sse;
-
- VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride,
- recon + col, recon_stride,
- &sse);
- total_sse += sse;
+ vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned int row, col;
+ uint64_t total_sse = 0;
+ int diff;
+
+ for (row = 0; row + 16 <= rows; row += 16) {
+ for (col = 0; col + 16 <= cols; col += 16) {
+ unsigned int sse;
+
+ VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride,
+ recon + col, recon_stride,
+ &sse);
+ total_sse += sse;
+ }
+
+ /* Handle odd-sized width */
+ if (col < cols) {
+ unsigned int border_row, border_col;
+ unsigned char *border_orig = orig;
+ unsigned char *border_recon = recon;
+
+ for (border_row = 0; border_row < 16; border_row++) {
+ for (border_col = col; border_col < cols; border_col++) {
+ diff = border_orig[border_col] - border_recon[border_col];
+ total_sse += diff * diff;
}
- /* Handle odd-sized width */
- if (col < cols)
- {
- unsigned int border_row, border_col;
- unsigned char *border_orig = orig;
- unsigned char *border_recon = recon;
-
- for (border_row = 0; border_row < 16; border_row++)
- {
- for (border_col = col; border_col < cols; border_col++)
- {
- diff = border_orig[border_col] - border_recon[border_col];
- total_sse += diff * diff;
- }
-
- border_orig += orig_stride;
- border_recon += recon_stride;
- }
- }
-
- orig += orig_stride * 16;
- recon += recon_stride * 16;
+ border_orig += orig_stride;
+ border_recon += recon_stride;
+ }
}
- /* Handle odd-sized height */
- for (; row < rows; row++)
- {
- for (col = 0; col < cols; col++)
- {
- diff = orig[col] - recon[col];
- total_sse += diff * diff;
- }
+ orig += orig_stride * 16;
+ recon += recon_stride * 16;
+ }
- orig += orig_stride;
- recon += recon_stride;
+ /* Handle odd-sized height */
+ for (; row < rows; row++) {
+ for (col = 0; col < cols; col++) {
+ diff = orig[col] - recon[col];
+ total_sse += diff * diff;
}
- return total_sse;
+ orig += orig_stride;
+ recon += recon_stride;
+ }
+
+ return total_sse;
}
-static void generate_psnr_packet(VP8_COMP *cpi)
-{
- YV12_BUFFER_CONFIG *orig = cpi->Source;
- YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
- struct vpx_codec_cx_pkt pkt;
- uint64_t sse;
- int i;
- unsigned int width = cpi->common.Width;
- unsigned int height = cpi->common.Height;
-
- pkt.kind = VPX_CODEC_PSNR_PKT;
- sse = calc_plane_error(orig->y_buffer, orig->y_stride,
- recon->y_buffer, recon->y_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
- pkt.data.psnr.sse[0] = sse;
- pkt.data.psnr.sse[1] = sse;
- pkt.data.psnr.samples[0] = width * height;
- pkt.data.psnr.samples[1] = width * height;
-
- width = (width + 1) / 2;
- height = (height + 1) / 2;
-
- sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
- recon->u_buffer, recon->uv_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
- pkt.data.psnr.sse[0] += sse;
- pkt.data.psnr.sse[2] = sse;
- pkt.data.psnr.samples[0] += width * height;
- pkt.data.psnr.samples[2] = width * height;
-
- sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
- recon->v_buffer, recon->uv_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
- pkt.data.psnr.sse[0] += sse;
- pkt.data.psnr.sse[3] = sse;
- pkt.data.psnr.samples[0] += width * height;
- pkt.data.psnr.samples[3] = width * height;
-
- for (i = 0; i < 4; i++)
- pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0,
- pkt.data.psnr.sse[i]);
-
- vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+static void generate_psnr_packet(VP8_COMP *cpi) {
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ struct vpx_codec_cx_pkt pkt;
+ uint64_t sse;
+ int i;
+ unsigned int width = cpi->common.Width;
+ unsigned int height = cpi->common.Height;
+
+ pkt.kind = VPX_CODEC_PSNR_PKT;
+ sse = calc_plane_error(orig->y_buffer, orig->y_stride,
+ recon->y_buffer, recon->y_stride,
+ width, height,
+ IF_RTCD(&cpi->rtcd.variance));
+ pkt.data.psnr.sse[0] = sse;
+ pkt.data.psnr.sse[1] = sse;
+ pkt.data.psnr.samples[0] = width * height;
+ pkt.data.psnr.samples[1] = width * height;
+
+ width = (width + 1) / 2;
+ height = (height + 1) / 2;
+
+ sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
+ recon->u_buffer, recon->uv_stride,
+ width, height,
+ IF_RTCD(&cpi->rtcd.variance));
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[2] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[2] = width * height;
+
+ sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
+ recon->v_buffer, recon->uv_stride,
+ width, height,
+ IF_RTCD(&cpi->rtcd.variance));
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[3] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[3] = width * height;
+
+ for (i = 0; i < 4; i++)
+ pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0,
+ pkt.data.psnr.sse[i]);
+
+ vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
-int vp8_use_as_reference(VP8_PTR ptr, int ref_frame_flags)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
+int vp8_use_as_reference(VP8_PTR ptr, int ref_frame_flags) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
- if (ref_frame_flags > 7)
- return -1 ;
+ if (ref_frame_flags > 7)
+ return -1;
- cpi->ref_frame_flags = ref_frame_flags;
- return 0;
+ cpi->ref_frame_flags = ref_frame_flags;
+ return 0;
}
-int vp8_update_reference(VP8_PTR ptr, int ref_frame_flags)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
+int vp8_update_reference(VP8_PTR ptr, int ref_frame_flags) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
- if (ref_frame_flags > 7)
- return -1 ;
+ if (ref_frame_flags > 7)
+ return -1;
- cpi->common.refresh_golden_frame = 0;
- cpi->common.refresh_alt_ref_frame = 0;
- cpi->common.refresh_last_frame = 0;
+ cpi->common.refresh_golden_frame = 0;
+ cpi->common.refresh_alt_ref_frame = 0;
+ cpi->common.refresh_last_frame = 0;
- if (ref_frame_flags & VP8_LAST_FLAG)
- cpi->common.refresh_last_frame = 1;
+ if (ref_frame_flags & VP8_LAST_FLAG)
+ cpi->common.refresh_last_frame = 1;
- if (ref_frame_flags & VP8_GOLD_FLAG)
- cpi->common.refresh_golden_frame = 1;
+ if (ref_frame_flags & VP8_GOLD_FLAG)
+ cpi->common.refresh_golden_frame = 1;
- if (ref_frame_flags & VP8_ALT_FLAG)
- cpi->common.refresh_alt_ref_frame = 1;
+ if (ref_frame_flags & VP8_ALT_FLAG)
+ cpi->common.refresh_alt_ref_frame = 1;
- return 0;
+ return 0;
}
-int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
- VP8_COMMON *cm = &cpi->common;
- int ref_fb_idx;
-
- if (ref_frame_flag == VP8_LAST_FLAG)
- ref_fb_idx = cm->lst_fb_idx;
- else if (ref_frame_flag == VP8_GOLD_FLAG)
- ref_fb_idx = cm->gld_fb_idx;
- else if (ref_frame_flag == VP8_ALT_FLAG)
- ref_fb_idx = cm->alt_fb_idx;
- else
- return -1;
-
- vp8_yv12_copy_frame_ptr(&cm->yv12_fb[ref_fb_idx], sd);
-
- return 0;
-}
-int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
- VP8_COMMON *cm = &cpi->common;
-
- int ref_fb_idx;
-
- if (ref_frame_flag == VP8_LAST_FLAG)
- ref_fb_idx = cm->lst_fb_idx;
- else if (ref_frame_flag == VP8_GOLD_FLAG)
- ref_fb_idx = cm->gld_fb_idx;
- else if (ref_frame_flag == VP8_ALT_FLAG)
- ref_fb_idx = cm->alt_fb_idx;
- else
- return -1;
+int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
+ VP8_COMMON *cm = &cpi->common;
+ int ref_fb_idx;
- vp8_yv12_copy_frame_ptr(sd, &cm->yv12_fb[ref_fb_idx]);
+ if (ref_frame_flag == VP8_LAST_FLAG)
+ ref_fb_idx = cm->lst_fb_idx;
+ else if (ref_frame_flag == VP8_GOLD_FLAG)
+ ref_fb_idx = cm->gld_fb_idx;
+ else if (ref_frame_flag == VP8_ALT_FLAG)
+ ref_fb_idx = cm->alt_fb_idx;
+ else
+ return -1;
- return 0;
-}
-int vp8_update_entropy(VP8_PTR comp, int update)
-{
- VP8_COMP *cpi = (VP8_COMP *) comp;
- VP8_COMMON *cm = &cpi->common;
- cm->refresh_entropy_probs = update;
+ vp8_yv12_copy_frame_ptr(&cm->yv12_fb[ref_fb_idx], sd);
- return 0;
+ return 0;
}
+int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
+ VP8_COMMON *cm = &cpi->common;
+ int ref_fb_idx;
-#ifdef OUTPUT_YUV_SRC
-void vp8_write_yuv_frame(YV12_BUFFER_CONFIG *s)
-{
- unsigned char *src = s->y_buffer;
- int h = s->y_height;
+ if (ref_frame_flag == VP8_LAST_FLAG)
+ ref_fb_idx = cm->lst_fb_idx;
+ else if (ref_frame_flag == VP8_GOLD_FLAG)
+ ref_fb_idx = cm->gld_fb_idx;
+ else if (ref_frame_flag == VP8_ALT_FLAG)
+ ref_fb_idx = cm->alt_fb_idx;
+ else
+ return -1;
- do
- {
- fwrite(src, s->y_width, 1, yuv_file);
- src += s->y_stride;
- }
- while (--h);
+ vp8_yv12_copy_frame_ptr(sd, &cm->yv12_fb[ref_fb_idx]);
- src = s->u_buffer;
- h = s->uv_height;
+ return 0;
+}
+int vp8_update_entropy(VP8_PTR comp, int update) {
+ VP8_COMP *cpi = (VP8_COMP *) comp;
+ VP8_COMMON *cm = &cpi->common;
+ cm->refresh_entropy_probs = update;
- do
- {
- fwrite(src, s->uv_width, 1, yuv_file);
- src += s->uv_stride;
- }
- while (--h);
+ return 0;
+}
- src = s->v_buffer;
- h = s->uv_height;
- do
- {
- fwrite(src, s->uv_width, 1, yuv_file);
- src += s->uv_stride;
- }
- while (--h);
+#ifdef OUTPUT_YUV_SRC
+void vp8_write_yuv_frame(YV12_BUFFER_CONFIG *s) {
+ unsigned char *src = s->y_buffer;
+ int h = s->y_height;
+
+ do {
+ fwrite(src, s->y_width, 1, yuv_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
}
#endif
#ifdef OUTPUT_YUV_REC
-void vp8_write_yuv_rec_frame(VP8_COMMON *cm)
-{
- YV12_BUFFER_CONFIG *s = cm->frame_to_show;
- unsigned char *src = s->y_buffer;
- int h = cm->Height;
+void vp8_write_yuv_rec_frame(VP8_COMMON *cm) {
+ YV12_BUFFER_CONFIG *s = cm->frame_to_show;
+ unsigned char *src = s->y_buffer;
+ int h = cm->Height;
+
+ do {
+ fwrite(src, s->y_width, 1, yuv_rec_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = (cm->Height + 1) / 2;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = (cm->Height + 1) / 2;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
+ src += s->uv_stride;
+ } while (--h);
+}
+#endif
- do
- {
- fwrite(src, s->y_width, 1, yuv_rec_file);
- src += s->y_stride;
- }
- while (--h);
+static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
- src = s->u_buffer;
- h = (cm->Height+1)/2;
+ // Update data structure that monitors level of reference to last GF
+ vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
- do
- {
- fwrite(src, s->uv_width, 1, yuv_rec_file);
- src += s->uv_stride;
- }
- while (--h);
+ // this frame refreshes means next frames don't unless specified by user
+ cpi->common.frames_since_golden = 0;
- src = s->v_buffer;
- h = (cm->Height+1)/2;
+ // Clear the alternate reference update pending flag.
+ cpi->source_alt_ref_pending = FALSE;
- do
- {
- fwrite(src, s->uv_width, 1, yuv_rec_file);
- src += s->uv_stride;
- }
- while (--h);
-}
-#endif
+ // Set the alternate refernce frame active flag
+ cpi->source_alt_ref_active = TRUE;
-static void update_alt_ref_frame_stats(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
+}
+static void update_golden_frame_stats(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ // Update the Golden frame usage counts.
+ if (cm->refresh_golden_frame) {
// Update data structure that monitors level of reference to last GF
vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
// this frame refreshes means next frames don't unless specified by user
+ cm->refresh_golden_frame = 0;
cpi->common.frames_since_golden = 0;
- // Clear the alternate reference update pending flag.
- cpi->source_alt_ref_pending = FALSE;
-
- // Set the alternate refernce frame active flag
- cpi->source_alt_ref_active = TRUE;
-
-
-}
-static void update_golden_frame_stats(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
-
- // Update the Golden frame usage counts.
- if (cm->refresh_golden_frame)
- {
- // Update data structure that monitors level of reference to last GF
- vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
-
- // this frame refreshes means next frames don't unless specified by user
- cm->refresh_golden_frame = 0;
- cpi->common.frames_since_golden = 0;
-
- //if ( cm->frame_type == KEY_FRAME )
- //{
- cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
- cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
- cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
- cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
- //}
- //else
- //{
- // // Carry a potrtion of count over to begining of next gf sequence
- // cpi->recent_ref_frame_usage[INTRA_FRAME] >>= 5;
- // cpi->recent_ref_frame_usage[LAST_FRAME] >>= 5;
- // cpi->recent_ref_frame_usage[GOLDEN_FRAME] >>= 5;
- // cpi->recent_ref_frame_usage[ALTREF_FRAME] >>= 5;
- //}
-
- // ******** Fixed Q test code only ************
- // If we are going to use the ALT reference for the next group of frames set a flag to say so.
- if (cpi->oxcf.fixed_q >= 0 &&
- cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame)
- {
- cpi->source_alt_ref_pending = TRUE;
- cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
- }
-
- if (!cpi->source_alt_ref_pending)
- cpi->source_alt_ref_active = FALSE;
-
- // Decrement count down till next gf
- if (cpi->frames_till_gf_update_due > 0)
- cpi->frames_till_gf_update_due--;
-
- }
- else if (!cpi->common.refresh_alt_ref_frame)
- {
- // Decrement count down till next gf
- if (cpi->frames_till_gf_update_due > 0)
- cpi->frames_till_gf_update_due--;
-
- if (cpi->common.frames_till_alt_ref_frame)
- cpi->common.frames_till_alt_ref_frame --;
-
- cpi->common.frames_since_golden ++;
-
- if (cpi->common.frames_since_golden > 1)
- {
- cpi->recent_ref_frame_usage[INTRA_FRAME] += cpi->count_mb_ref_frame_usage[INTRA_FRAME];
- cpi->recent_ref_frame_usage[LAST_FRAME] += cpi->count_mb_ref_frame_usage[LAST_FRAME];
- cpi->recent_ref_frame_usage[GOLDEN_FRAME] += cpi->count_mb_ref_frame_usage[GOLDEN_FRAME];
- cpi->recent_ref_frame_usage[ALTREF_FRAME] += cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
- }
- }
+ // if ( cm->frame_type == KEY_FRAME )
+ // {
+ cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
+ cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
+ // }
+ // else
+ // {
+ // // Carry a potrtion of count over to begining of next gf sequence
+ // cpi->recent_ref_frame_usage[INTRA_FRAME] >>= 5;
+ // cpi->recent_ref_frame_usage[LAST_FRAME] >>= 5;
+ // cpi->recent_ref_frame_usage[GOLDEN_FRAME] >>= 5;
+ // cpi->recent_ref_frame_usage[ALTREF_FRAME] >>= 5;
+ // }
+
+ // ******** Fixed Q test code only ************
+ // If we are going to use the ALT reference for the next group of frames set a flag to say so.
+ if (cpi->oxcf.fixed_q >= 0 &&
+ cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame) {
+ cpi->source_alt_ref_pending = TRUE;
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ }
+
+ if (!cpi->source_alt_ref_pending)
+ cpi->source_alt_ref_active = FALSE;
+
+ // Decrement count down till next gf
+ if (cpi->frames_till_gf_update_due > 0)
+ cpi->frames_till_gf_update_due--;
+
+ } else if (!cpi->common.refresh_alt_ref_frame) {
+ // Decrement count down till next gf
+ if (cpi->frames_till_gf_update_due > 0)
+ cpi->frames_till_gf_update_due--;
+
+ if (cpi->common.frames_till_alt_ref_frame)
+ cpi->common.frames_till_alt_ref_frame--;
+
+ cpi->common.frames_since_golden++;
+
+ if (cpi->common.frames_since_golden > 1) {
+ cpi->recent_ref_frame_usage[INTRA_FRAME] += cpi->count_mb_ref_frame_usage[INTRA_FRAME];
+ cpi->recent_ref_frame_usage[LAST_FRAME] += cpi->count_mb_ref_frame_usage[LAST_FRAME];
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] += cpi->count_mb_ref_frame_usage[GOLDEN_FRAME];
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] += cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
+ }
+ }
}
-int find_fp_qindex()
-{
- int i;
+int find_fp_qindex() {
+ int i;
- for ( i = 0; i < QINDEX_RANGE; i++ )
- {
- if ( vp8_convert_qindex_to_q(i) >= 30.0 )
- {
- break;
- }
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (vp8_convert_qindex_to_q(i) >= 30.0) {
+ break;
}
+ }
- if ( i == QINDEX_RANGE )
- i--;
+ if (i == QINDEX_RANGE)
+ i--;
- return i;
+ return i;
}
-static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags)
-{
- (void) size;
- (void) dest;
- (void) frame_flags;
+static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) {
+ (void) size;
+ (void) dest;
+ (void) frame_flags;
- vp8_set_quantizer(cpi, find_fp_qindex());
- vp8_first_pass(cpi);
+ vp8_set_quantizer(cpi, find_fp_qindex());
+ vp8_first_pass(cpi);
}
#if 1
-void write_yuv_frame_to_file(YV12_BUFFER_CONFIG *frame)
-{
-
- // write the frame
- int i;
- FILE *fp = fopen("encode_recon.yuv", "a");
-
- for (i = 0; i < frame->y_height; i++)
- fwrite(frame->y_buffer + i * frame->y_stride,
- frame->y_width, 1, fp);
- for (i = 0; i < frame->uv_height; i++)
- fwrite(frame->u_buffer + i * frame->uv_stride,
- frame->uv_width, 1, fp);
- for (i = 0; i < frame->uv_height; i++)
- fwrite(frame->v_buffer + i * frame->uv_stride,
- frame->uv_width, 1, fp);
-
- fclose(fp);
+void write_yuv_frame_to_file(YV12_BUFFER_CONFIG *frame) {
+
+ // write the frame
+ int i;
+ FILE *fp = fopen("encode_recon.yuv", "a");
+
+ for (i = 0; i < frame->y_height; i++)
+ fwrite(frame->y_buffer + i * frame->y_stride,
+ frame->y_width, 1, fp);
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->u_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, fp);
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->v_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, fp);
+
+ fclose(fp);
}
#endif
-//#define WRITE_RECON_BUFFER 1
+// #define WRITE_RECON_BUFFER 1
#if WRITE_RECON_BUFFER
-void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
-{
+void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) {
- // write the frame
- FILE *yframe;
- int i;
- char filename[255];
+ // write the frame
+ FILE *yframe;
+ int i;
+ char filename[255];
- sprintf(filename, "cx\\y%04d.raw", this_frame);
- yframe = fopen(filename, "wb");
+ sprintf(filename, "cx\\y%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
- for (i = 0; i < frame->y_height; i++)
- fwrite(frame->y_buffer + i * frame->y_stride,
- frame->y_width, 1, yframe);
+ for (i = 0; i < frame->y_height; i++)
+ fwrite(frame->y_buffer + i * frame->y_stride,
+ frame->y_width, 1, yframe);
- fclose(yframe);
- sprintf(filename, "cx\\u%04d.raw", this_frame);
- yframe = fopen(filename, "wb");
+ fclose(yframe);
+ sprintf(filename, "cx\\u%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
- for (i = 0; i < frame->uv_height; i++)
- fwrite(frame->u_buffer + i * frame->uv_stride,
- frame->uv_width, 1, yframe);
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->u_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
- fclose(yframe);
- sprintf(filename, "cx\\v%04d.raw", this_frame);
- yframe = fopen(filename, "wb");
+ fclose(yframe);
+ sprintf(filename, "cx\\v%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
- for (i = 0; i < frame->uv_height; i++)
- fwrite(frame->v_buffer + i * frame->uv_stride,
- frame->uv_width, 1, yframe);
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->v_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
- fclose(yframe);
+ fclose(yframe);
}
#endif
-static double compute_edge_pixel_proportion(YV12_BUFFER_CONFIG *frame)
-{
+static double compute_edge_pixel_proportion(YV12_BUFFER_CONFIG *frame) {
#define EDGE_THRESH 128
- int i, j;
- int num_edge_pels = 0;
- int num_pels = (frame->y_height - 2) * (frame->y_width - 2);
- unsigned char *prev = frame->y_buffer + 1;
- unsigned char *curr = frame->y_buffer + 1 + frame->y_stride;
- unsigned char *next = frame->y_buffer + 1 + 2*frame->y_stride;
- for (i = 1; i < frame->y_height - 1; i++)
- {
- for (j = 1; j < frame->y_width - 1; j++)
- {
- /* Sobel hor and ver gradients */
- int v = 2*(curr[1] - curr[-1]) + (prev[1] - prev[-1]) + (next[1] - next[-1]);
- int h = 2*(prev[0] - next[0]) + (prev[1] - next[1]) + (prev[-1] - next[-1]);
- h = (h < 0 ? -h : h);
- v = (v < 0 ? -v : v);
- if (h > EDGE_THRESH || v > EDGE_THRESH) num_edge_pels++;
- curr++;
- prev++;
- next++;
- }
- curr += frame->y_stride - frame->y_width + 2;
- prev += frame->y_stride - frame->y_width + 2;
- next += frame->y_stride - frame->y_width + 2;
- }
- return (double)num_edge_pels/(double)num_pels;
+ int i, j;
+ int num_edge_pels = 0;
+ int num_pels = (frame->y_height - 2) * (frame->y_width - 2);
+ unsigned char *prev = frame->y_buffer + 1;
+ unsigned char *curr = frame->y_buffer + 1 + frame->y_stride;
+ unsigned char *next = frame->y_buffer + 1 + 2 * frame->y_stride;
+ for (i = 1; i < frame->y_height - 1; i++) {
+ for (j = 1; j < frame->y_width - 1; j++) {
+ /* Sobel hor and ver gradients */
+ int v = 2 * (curr[1] - curr[-1]) + (prev[1] - prev[-1]) + (next[1] - next[-1]);
+ int h = 2 * (prev[0] - next[0]) + (prev[1] - next[1]) + (prev[-1] - next[-1]);
+ h = (h < 0 ? -h : h);
+ v = (v < 0 ? -v : v);
+ if (h > EDGE_THRESH || v > EDGE_THRESH) num_edge_pels++;
+ curr++;
+ prev++;
+ next++;
+ }
+ curr += frame->y_stride - frame->y_width + 2;
+ prev += frame->y_stride - frame->y_width + 2;
+ next += frame->y_stride - frame->y_width + 2;
+ }
+ return (double)num_edge_pels / (double)num_pels;
}
// Function to test for conditions that indicate we should loop
// back and recode a frame.
-static BOOL recode_loop_test( VP8_COMP *cpi,
- int high_limit, int low_limit,
- int q, int maxq, int minq )
-{
- BOOL force_recode = FALSE;
- VP8_COMMON *cm = &cpi->common;
-
- // Is frame recode allowed at all
- // Yes if either recode mode 1 is selected or mode two is selcted
- // and the frame is a key frame. golden frame or alt_ref_frame
- if ( (cpi->sf.recode_loop == 1) ||
- ( (cpi->sf.recode_loop == 2) &&
- ( (cm->frame_type == KEY_FRAME) ||
- cm->refresh_golden_frame ||
- cm->refresh_alt_ref_frame ) ) )
- {
- // General over and under shoot tests
- if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
- ((cpi->projected_frame_size < low_limit) && (q > minq)) )
- {
- force_recode = TRUE;
- }
- // Special Constrained quality tests
- else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
- {
- // Undershoot and below auto cq level
- if ( (q > cpi->cq_target_quality) &&
- (cpi->projected_frame_size <
- ((cpi->this_frame_target * 7) >> 3)))
- {
- force_recode = TRUE;
- }
- // Severe undershoot and between auto and user cq level
- else if ( (q > cpi->oxcf.cq_level) &&
- (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
- (cpi->active_best_quality > cpi->oxcf.cq_level))
- {
- force_recode = TRUE;
- cpi->active_best_quality = cpi->oxcf.cq_level;
- }
- }
- }
-
- return force_recode;
+static BOOL recode_loop_test(VP8_COMP *cpi,
+ int high_limit, int low_limit,
+ int q, int maxq, int minq) {
+ BOOL force_recode = FALSE;
+ VP8_COMMON *cm = &cpi->common;
+
+ // Is frame recode allowed at all
+ // Yes if either recode mode 1 is selected or mode two is selcted
+ // and the frame is a key frame. golden frame or alt_ref_frame
+ if ((cpi->sf.recode_loop == 1) ||
+ ((cpi->sf.recode_loop == 2) &&
+ ((cm->frame_type == KEY_FRAME) ||
+ cm->refresh_golden_frame ||
+ cm->refresh_alt_ref_frame))) {
+ // General over and under shoot tests
+ if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
+ ((cpi->projected_frame_size < low_limit) && (q > minq))) {
+ force_recode = TRUE;
+ }
+ // Special Constrained quality tests
+ else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ // Undershoot and below auto cq level
+ if ((q > cpi->cq_target_quality) &&
+ (cpi->projected_frame_size <
+ ((cpi->this_frame_target * 7) >> 3))) {
+ force_recode = TRUE;
+ }
+ // Severe undershoot and between auto and user cq level
+ else if ((q > cpi->oxcf.cq_level) &&
+ (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
+ (cpi->active_best_quality > cpi->oxcf.cq_level)) {
+ force_recode = TRUE;
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ }
+ }
+ }
+
+ return force_recode;
}
-void update_reference_frames(VP8_COMMON *cm)
-{
- YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
+void update_reference_frames(VP8_COMMON *cm) {
+ YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
- // At this point the new frame has been encoded.
- // If any buffer copy / swapping is signaled it should be done here.
+ // At this point the new frame has been encoded.
+ // If any buffer copy / swapping is signaled it should be done here.
- if (cm->frame_type == KEY_FRAME)
- {
- yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FLAG | VP8_ALT_FLAG ;
+ if (cm->frame_type == KEY_FRAME) {
+ yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FLAG | VP8_ALT_FLAG;
- yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
- yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
+ yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
+ yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
- cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
- }
- else /* For non key frames */
- {
- if (cm->refresh_alt_ref_frame)
- {
- assert(!cm->copy_buffer_to_arf);
+ cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
+ } else { /* For non key frames */
+ if (cm->refresh_alt_ref_frame) {
+ assert(!cm->copy_buffer_to_arf);
- cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALT_FLAG;
- cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
- cm->alt_fb_idx = cm->new_fb_idx;
- }
- else if (cm->copy_buffer_to_arf)
- {
- assert(!(cm->copy_buffer_to_arf & ~0x3));
+ cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALT_FLAG;
+ cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
+ cm->alt_fb_idx = cm->new_fb_idx;
+ } else if (cm->copy_buffer_to_arf) {
+ assert(!(cm->copy_buffer_to_arf & ~0x3));
- if (cm->copy_buffer_to_arf == 1)
- {
- if(cm->alt_fb_idx != cm->lst_fb_idx)
- {
- yv12_fb[cm->lst_fb_idx].flags |= VP8_ALT_FLAG;
- yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
- cm->alt_fb_idx = cm->lst_fb_idx;
- }
- }
- else /* if (cm->copy_buffer_to_arf == 2) */
- {
- if(cm->alt_fb_idx != cm->gld_fb_idx)
- {
- yv12_fb[cm->gld_fb_idx].flags |= VP8_ALT_FLAG;
- yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
- cm->alt_fb_idx = cm->gld_fb_idx;
- }
- }
+ if (cm->copy_buffer_to_arf == 1) {
+ if (cm->alt_fb_idx != cm->lst_fb_idx) {
+ yv12_fb[cm->lst_fb_idx].flags |= VP8_ALT_FLAG;
+ yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
+ cm->alt_fb_idx = cm->lst_fb_idx;
}
+ } else { /* if (cm->copy_buffer_to_arf == 2) */
+ if (cm->alt_fb_idx != cm->gld_fb_idx) {
+ yv12_fb[cm->gld_fb_idx].flags |= VP8_ALT_FLAG;
+ yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALT_FLAG;
+ cm->alt_fb_idx = cm->gld_fb_idx;
+ }
+ }
+ }
- if (cm->refresh_golden_frame)
- {
- assert(!cm->copy_buffer_to_gf);
+ if (cm->refresh_golden_frame) {
+ assert(!cm->copy_buffer_to_gf);
- cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FLAG;
- cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
- cm->gld_fb_idx = cm->new_fb_idx;
- }
- else if (cm->copy_buffer_to_gf)
- {
- assert(!(cm->copy_buffer_to_arf & ~0x3));
+ cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FLAG;
+ cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
+ cm->gld_fb_idx = cm->new_fb_idx;
+ } else if (cm->copy_buffer_to_gf) {
+ assert(!(cm->copy_buffer_to_arf & ~0x3));
- if (cm->copy_buffer_to_gf == 1)
- {
- if(cm->gld_fb_idx != cm->lst_fb_idx)
- {
- yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FLAG;
- yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
- cm->gld_fb_idx = cm->lst_fb_idx;
- }
- }
- else /* if (cm->copy_buffer_to_gf == 2) */
- {
- if(cm->alt_fb_idx != cm->gld_fb_idx)
- {
- yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FLAG;
- yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
- cm->gld_fb_idx = cm->alt_fb_idx;
- }
- }
+ if (cm->copy_buffer_to_gf == 1) {
+ if (cm->gld_fb_idx != cm->lst_fb_idx) {
+ yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FLAG;
+ yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
+ cm->gld_fb_idx = cm->lst_fb_idx;
+ }
+ } else { /* if (cm->copy_buffer_to_gf == 2) */
+ if (cm->alt_fb_idx != cm->gld_fb_idx) {
+ yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FLAG;
+ yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FLAG;
+ cm->gld_fb_idx = cm->alt_fb_idx;
}
+ }
}
+ }
- if (cm->refresh_last_frame)
- {
- cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FLAG;
- cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FLAG;
- cm->lst_fb_idx = cm->new_fb_idx;
- }
+ if (cm->refresh_last_frame) {
+ cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FLAG;
+ cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FLAG;
+ cm->lst_fb_idx = cm->new_fb_idx;
+ }
}
-void loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
-{
- if (cm->no_lpf)
- {
- cm->filter_level = 0;
- }
+void loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
+ if (cm->no_lpf) {
+ cm->filter_level = 0;
+ }
#if CONFIG_LOSSLESS
- else if(cpi->oxcf.lossless)
- {
- cm->filter_level = 0;
- }
+ else if (cpi->oxcf.lossless) {
+ cm->filter_level = 0;
+ }
#endif
- else
- {
- struct vpx_usec_timer timer;
+ else {
+ struct vpx_usec_timer timer;
- vp8_clear_system_state();
+ vp8_clear_system_state();
- vpx_usec_timer_start(&timer);
- if (cpi->sf.auto_filter == 0)
- vp8cx_pick_filter_level_fast(cpi->Source, cpi);
+ vpx_usec_timer_start(&timer);
+ if (cpi->sf.auto_filter == 0)
+ vp8cx_pick_filter_level_fast(cpi->Source, cpi);
- else
- vp8cx_pick_filter_level(cpi->Source, cpi);
+ else
+ vp8cx_pick_filter_level(cpi->Source, cpi);
- vpx_usec_timer_mark(&timer);
- cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
- }
+ vpx_usec_timer_mark(&timer);
+ cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+ }
- if (cm->filter_level > 0)
- {
- vp8cx_set_alt_lf_level(cpi, cm->filter_level);
- vp8_loop_filter_frame(cm, &cpi->mb.e_mbd);
- }
+ if (cm->filter_level > 0) {
+ vp8cx_set_alt_lf_level(cpi, cm->filter_level);
+ vp8_loop_filter_frame(cm, &cpi->mb.e_mbd);
+ }
- vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
+ vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show);
}
#if CONFIG_PRED_FILTER
-void select_pred_filter_mode(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
-
- int prob_pred_filter_off = cm->prob_pred_filter_off;
+void select_pred_filter_mode(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+
+ int prob_pred_filter_off = cm->prob_pred_filter_off;
+
+ // Force filter on/off if probability is extreme
+ if (prob_pred_filter_off >= 255 * 0.95)
+ cm->pred_filter_mode = 0; // Off at the frame level
+ else if (prob_pred_filter_off <= 255 * 0.05)
+ cm->pred_filter_mode = 1; // On at the frame level
+ else
+ cm->pred_filter_mode = 2; // Selectable at the MB level
+}
- // Force filter on/off if probability is extreme
- if (prob_pred_filter_off >= 255 * 0.95)
- cm->pred_filter_mode = 0; // Off at the frame level
- else if (prob_pred_filter_off <= 255 * 0.05)
- cm->pred_filter_mode = 1; // On at the frame level
- else
- cm->pred_filter_mode = 2; // Selectable at the MB level
+void update_pred_filt_prob(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
+ int prob_pred_filter_off;
+
+ // Based on the selection in the previous frame determine what mode
+ // to use for the current frame and work out the signaling probability
+ if (cpi->pred_filter_on_count + cpi->pred_filter_off_count) {
+ prob_pred_filter_off = cpi->pred_filter_off_count * 256 /
+ (cpi->pred_filter_on_count + cpi->pred_filter_off_count);
+
+ if (prob_pred_filter_off < 1)
+ prob_pred_filter_off = 1;
+
+ if (prob_pred_filter_off > 255)
+ prob_pred_filter_off = 255;
+
+ cm->prob_pred_filter_off = prob_pred_filter_off;
+ } else
+ cm->prob_pred_filter_off = 128;
+ /*
+ {
+ FILE *fp = fopen("filt_use.txt", "a");
+ fprintf (fp, "%d %d prob=%d\n", cpi->pred_filter_off_count,
+ cpi->pred_filter_on_count, cm->prob_pred_filter_off);
+ fclose(fp);
+ }
+ */
}
+#endif
-void update_pred_filt_prob(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
- int prob_pred_filter_off;
+static void encode_frame_to_data_rate
+(
+ VP8_COMP *cpi,
+ unsigned long *size,
+ unsigned char *dest,
+ unsigned int *frame_flags
+) {
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
- // Based on the selection in the previous frame determine what mode
- // to use for the current frame and work out the signaling probability
- if ( cpi->pred_filter_on_count + cpi->pred_filter_off_count )
- {
- prob_pred_filter_off = cpi->pred_filter_off_count * 256 /
- ( cpi->pred_filter_on_count + cpi->pred_filter_off_count);
+ int Q;
+ int frame_over_shoot_limit;
+ int frame_under_shoot_limit;
- if (prob_pred_filter_off < 1)
- prob_pred_filter_off = 1;
+ int Loop = FALSE;
+ int loop_count;
+ int this_q;
+ int last_zbin_oq;
- if (prob_pred_filter_off > 255)
- prob_pred_filter_off = 255;
+ int q_low;
+ int q_high;
+ int zbin_oq_high;
+ int zbin_oq_low = 0;
- cm->prob_pred_filter_off = prob_pred_filter_off;
- }
- else
- cm->prob_pred_filter_off = 128;
-/*
- {
- FILE *fp = fopen("filt_use.txt", "a");
- fprintf (fp, "%d %d prob=%d\n", cpi->pred_filter_off_count,
- cpi->pred_filter_on_count, cm->prob_pred_filter_off);
- fclose(fp);
- }
-*/
-}
-#endif
+ int top_index;
+ int bottom_index;
+ int active_worst_qchanged = FALSE;
-static void encode_frame_to_data_rate
-(
- VP8_COMP *cpi,
- unsigned long *size,
- unsigned char *dest,
- unsigned int *frame_flags
-)
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
-
- int Q;
- int frame_over_shoot_limit;
- int frame_under_shoot_limit;
-
- int Loop = FALSE;
- int loop_count;
- int this_q;
- int last_zbin_oq;
-
- int q_low;
- int q_high;
- int zbin_oq_high;
- int zbin_oq_low = 0;
-
- int top_index;
- int bottom_index;
- int active_worst_qchanged = FALSE;
-
- int overshoot_seen = FALSE;
- int undershoot_seen = FALSE;
-
- int loop_size_estimate = 0;
+ int overshoot_seen = FALSE;
+ int undershoot_seen = FALSE;
+
+ int loop_size_estimate = 0;
#if CONFIG_ENHANCED_INTERP
- SPEED_FEATURES *sf = &cpi->sf;
+ SPEED_FEATURES *sf = &cpi->sf;
#if RESET_FOREACH_FILTER
- int q_low0;
- int q_high0;
- int zbin_oq_high0;
- int zbin_oq_low0 = 0;
- int Q0;
- int last_zbin_oq0;
- int active_best_quality0;
- int active_worst_quality0;
- double rate_correction_factor0;
- double gf_rate_correction_factor0;
+ int q_low0;
+ int q_high0;
+ int zbin_oq_high0;
+ int zbin_oq_low0 = 0;
+ int Q0;
+ int last_zbin_oq0;
+ int active_best_quality0;
+ int active_worst_quality0;
+ double rate_correction_factor0;
+ double gf_rate_correction_factor0;
#endif
- /* list of filters to search over */
- int mcomp_filters_to_search[] = {EIGHTTAP, EIGHTTAP_SHARP, SIXTAP};
- int mcomp_filters = sizeof(mcomp_filters_to_search)/sizeof(*mcomp_filters_to_search);
- int mcomp_filter_index = 0;
- INT64 mcomp_filter_cost[4];
+ /* list of filters to search over */
+ int mcomp_filters_to_search[] = {EIGHTTAP, EIGHTTAP_SHARP, SIXTAP};
+ int mcomp_filters = sizeof(mcomp_filters_to_search) / sizeof(*mcomp_filters_to_search);
+ int mcomp_filter_index = 0;
+ INT64 mcomp_filter_cost[4];
#endif
- // Clear down mmx registers to allow floating point in what follows
- vp8_clear_system_state();
+ // Clear down mmx registers to allow floating point in what follows
+ vp8_clear_system_state();
- // For an alt ref frame in 2 pass we skip the call to the second
- // pass function that sets the target bandwidth so must set it here
- if (cpi->common.refresh_alt_ref_frame)
- {
- cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
- cpi->target_bandwidth = cpi->twopass.gf_bits * cpi->output_frame_rate; // per second target bitrate
- }
+ // For an alt ref frame in 2 pass we skip the call to the second
+ // pass function that sets the target bandwidth so must set it here
+ if (cpi->common.refresh_alt_ref_frame) {
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits; // Per frame bit target for the alt ref frame
+ cpi->target_bandwidth = cpi->twopass.gf_bits * cpi->output_frame_rate; // per second target bitrate
+ }
- // Default turn off buffer to buffer copying
- cm->copy_buffer_to_gf = 0;
- cm->copy_buffer_to_arf = 0;
+ // Default turn off buffer to buffer copying
+ cm->copy_buffer_to_gf = 0;
+ cm->copy_buffer_to_arf = 0;
- // Clear zbin over-quant value and mode boost values.
- cpi->zbin_over_quant = 0;
- cpi->zbin_mode_boost = 0;
+ // Clear zbin over-quant value and mode boost values.
+ cpi->zbin_over_quant = 0;
+ cpi->zbin_mode_boost = 0;
- // Enable or disable mode based tweaking of the zbin
- // For 2 Pass Only used where GF/ARF prediction quality
- // is above a threshold
- cpi->zbin_mode_boost = 0;
+ // Enable or disable mode based tweaking of the zbin
+ // For 2 Pass Only used where GF/ARF prediction quality
+ // is above a threshold
+ cpi->zbin_mode_boost = 0;
#if CONFIG_LOSSLESS
- cpi->zbin_mode_boost_enabled = FALSE;
+ cpi->zbin_mode_boost_enabled = FALSE;
#else
- cpi->zbin_mode_boost_enabled = TRUE;
+ cpi->zbin_mode_boost_enabled = TRUE;
#endif
- if ( cpi->gfu_boost <= 400 )
- {
- cpi->zbin_mode_boost_enabled = FALSE;
- }
-
- // Current default encoder behaviour for the altref sign bias
- if (cpi->source_alt_ref_active)
- cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
- else
- cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
-
- // Check to see if a key frame is signalled
- // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass.
- if ((cm->current_video_frame == 0) ||
- (cm->frame_flags & FRAMEFLAGS_KEY) ||
- (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0)))
- {
- // Key frame from VFW/auto-keyframe/first frame
- cm->frame_type = KEY_FRAME;
- }
-
- // Set default state for segment based loop filter update flags
- xd->mode_ref_lf_delta_update = 0;
-
- // Set various flags etc to special state if it is a key frame
- if (cm->frame_type == KEY_FRAME)
- {
- int i;
-
- // Reset the loop filter deltas and segmentation map
- setup_features(cpi);
+ if (cpi->gfu_boost <= 400) {
+ cpi->zbin_mode_boost_enabled = FALSE;
+ }
- // If segmentation is enabled force a map update for key frames
- if (xd->segmentation_enabled)
- {
- xd->update_mb_segmentation_map = 1;
- xd->update_mb_segmentation_data = 1;
- }
+ // Current default encoder behaviour for the altref sign bias
+ if (cpi->source_alt_ref_active)
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
+ else
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
+
+ // Check to see if a key frame is signalled
+ // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass.
+ if ((cm->current_video_frame == 0) ||
+ (cm->frame_flags & FRAMEFLAGS_KEY) ||
+ (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
+ // Key frame from VFW/auto-keyframe/first frame
+ cm->frame_type = KEY_FRAME;
+ }
+
+ // Set default state for segment based loop filter update flags
+ xd->mode_ref_lf_delta_update = 0;
+
+ // Set various flags etc to special state if it is a key frame
+ if (cm->frame_type == KEY_FRAME) {
+ int i;
- // The alternate reference frame cannot be active for a key frame
- cpi->source_alt_ref_active = FALSE;
+ // Reset the loop filter deltas and segmentation map
+ setup_features(cpi);
- // Reset the RD threshold multipliers to default of * 1 (128)
- for (i = 0; i < MAX_MODES; i++)
- {
- cpi->rd_thresh_mult[i] = 128;
- }
+ // If segmentation is enabled force a map update for key frames
+ if (xd->segmentation_enabled) {
+ xd->update_mb_segmentation_map = 1;
+ xd->update_mb_segmentation_data = 1;
}
-//#if !CONFIG_COMPRED
- // This function has been deprecated for now but we may want to do
- // something here at a late date
- //update_rd_ref_frame_probs(cpi);
-//#endif
-
- // Test code for new segment features
- init_seg_features( cpi );
+ // The alternate reference frame cannot be active for a key frame
+ cpi->source_alt_ref_active = FALSE;
- // Decide how big to make the frame
- vp8_pick_frame_size(cpi);
+ // Reset the RD threshold multipliers to default of * 1 (128)
+ for (i = 0; i < MAX_MODES; i++) {
+ cpi->rd_thresh_mult[i] = 128;
+ }
+ }
- vp8_clear_system_state();
+// #if !CONFIG_COMPRED
+ // This function has been deprecated for now but we may want to do
+ // something here at a late date
+ // update_rd_ref_frame_probs(cpi);
+// #endif
- // Set an active best quality and if necessary active worst quality
- Q = cpi->active_worst_quality;
+ // Test code for new segment features
+ init_seg_features(cpi);
- if ( cm->frame_type == KEY_FRAME )
- {
- int high = 2000;
- int low = 400;
-
- if ( cpi->kf_boost > high )
- cpi->active_best_quality = kf_low_motion_minq[Q];
- else if ( cpi->kf_boost < low )
- cpi->active_best_quality = kf_high_motion_minq[Q];
- else
- {
- int gap = high - low;
- int offset = high - cpi->kf_boost;
- int qdiff = kf_high_motion_minq[Q] - kf_low_motion_minq[Q];
- int adjustment = ((offset * qdiff) + (gap>>1)) / gap;
+ // Decide how big to make the frame
+ vp8_pick_frame_size(cpi);
- cpi->active_best_quality = kf_low_motion_minq[Q] + adjustment;
- }
+ vp8_clear_system_state();
- // Make an adjustment based on the %s static
- // The main impact of this is at lower Q to prevent overly large key
- // frames unless a lot of the image is static.
- if (cpi->kf_zeromotion_pct < 64 )
- cpi->active_best_quality += 4 - (cpi->kf_zeromotion_pct >> 4);
+ // Set an active best quality and if necessary active worst quality
+ Q = cpi->active_worst_quality;
- // Special case for key frames forced because we have reached
- // the maximum key frame interval. Here force the Q to a range
- // based on the ambient Q to reduce the risk of popping
- if ( cpi->this_key_frame_forced )
- {
- int delta_qindex;
- int qindex = cpi->last_boosted_qindex;
+ if (cm->frame_type == KEY_FRAME) {
+ int high = 2000;
+ int low = 400;
- delta_qindex = compute_qdelta( cpi, qindex,
- (qindex * 0.75) );
+ if (cpi->kf_boost > high)
+ cpi->active_best_quality = kf_low_motion_minq[Q];
+ else if (cpi->kf_boost < low)
+ cpi->active_best_quality = kf_high_motion_minq[Q];
+ else {
+ int gap = high - low;
+ int offset = high - cpi->kf_boost;
+ int qdiff = kf_high_motion_minq[Q] - kf_low_motion_minq[Q];
+ int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
- cpi->active_best_quality = qindex + delta_qindex;
- if (cpi->active_best_quality < cpi->best_quality)
- cpi->active_best_quality = cpi->best_quality;
- }
+ cpi->active_best_quality = kf_low_motion_minq[Q] + adjustment;
}
- else if (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
- {
- int high = 2000;
- int low = 400;
-
- // Use the lower of cpi->active_worst_quality and recent
- // average Q as basis for GF/ARF Q limit unless last frame was
- // a key frame.
- if ( (cpi->frames_since_key > 1) &&
- (cpi->avg_frame_qindex < cpi->active_worst_quality) )
- {
- Q = cpi->avg_frame_qindex;
- }
-
- // For constrained quality dont allow Q less than the cq level
- if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
- (Q < cpi->cq_target_quality) )
- {
- Q = cpi->cq_target_quality;
- }
-
- if ( cpi->gfu_boost > high )
- cpi->active_best_quality = gf_low_motion_minq[Q];
- else if ( cpi->gfu_boost < low )
- cpi->active_best_quality = gf_high_motion_minq[Q];
- else
- {
- int gap = high - low;
- int offset = high - cpi->gfu_boost;
- int qdiff = gf_high_motion_minq[Q] - gf_low_motion_minq[Q];
- int adjustment = ((offset * qdiff) + (gap>>1)) / gap;
-
- cpi->active_best_quality = gf_low_motion_minq[Q] + adjustment;
- }
-
- // Constrained quality use slightly lower active best.
- if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
- {
- cpi->active_best_quality =
- cpi->active_best_quality * 15/16;
- }
- }
- else
- {
- cpi->active_best_quality = inter_minq[Q];
+ // Make an adjustment based on the %s static
+ // The main impact of this is at lower Q to prevent overly large key
+ // frames unless a lot of the image is static.
+ if (cpi->kf_zeromotion_pct < 64)
+ cpi->active_best_quality += 4 - (cpi->kf_zeromotion_pct >> 4);
- // For the constant/constrained quality mode we dont want
- // q to fall below the cq level.
- if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
- (cpi->active_best_quality < cpi->cq_target_quality) )
- {
- // If we are strongly undershooting the target rate in the last
- // frames then use the user passed in cq value not the auto
- // cq value.
- if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth )
- cpi->active_best_quality = cpi->oxcf.cq_level;
- else
- cpi->active_best_quality = cpi->cq_target_quality;
- }
- }
+ // Special case for key frames forced because we have reached
+ // the maximum key frame interval. Here force the Q to a range
+ // based on the ambient Q to reduce the risk of popping
+ if (cpi->this_key_frame_forced) {
+ int delta_qindex;
+ int qindex = cpi->last_boosted_qindex;
- // Clip the active best and worst quality values to limits
- if (cpi->active_worst_quality > cpi->worst_quality)
- cpi->active_worst_quality = cpi->worst_quality;
+ delta_qindex = compute_qdelta(cpi, qindex,
+ (qindex * 0.75));
- if (cpi->active_best_quality < cpi->best_quality)
+ cpi->active_best_quality = qindex + delta_qindex;
+ if (cpi->active_best_quality < cpi->best_quality)
cpi->active_best_quality = cpi->best_quality;
-
- if (cpi->active_best_quality > cpi->worst_quality)
- cpi->active_best_quality = cpi->worst_quality;
-
- if ( cpi->active_worst_quality < cpi->active_best_quality )
- cpi->active_worst_quality = cpi->active_best_quality;
-
- // Specuial case code to try and match quality with forced key frames
- if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
- {
- Q = cpi->last_boosted_qindex;
}
- else
- {
- // Determine initial Q to try
- Q = vp8_regulate_q(cpi, cpi->this_frame_target);
- }
- last_zbin_oq = cpi->zbin_over_quant;
-
- // Set highest allowed value for Zbin over quant
- if (cm->frame_type == KEY_FRAME)
- zbin_oq_high = 0; //ZBIN_OQ_MAX/16
- else if (cm->refresh_alt_ref_frame || (cm->refresh_golden_frame && !cpi->source_alt_ref_active))
- zbin_oq_high = 16;
- else
- zbin_oq_high = ZBIN_OQ_MAX;
-
- vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
-
- // Limit Q range for the adaptive loop.
- bottom_index = cpi->active_best_quality;
- top_index = cpi->active_worst_quality;
- q_low = cpi->active_best_quality;
- q_high = cpi->active_worst_quality;
-
- loop_count = 0;
+ }
+
+ else if (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame) {
+ int high = 2000;
+ int low = 400;
+
+ // Use the lower of cpi->active_worst_quality and recent
+ // average Q as basis for GF/ARF Q limit unless last frame was
+ // a key frame.
+ if ((cpi->frames_since_key > 1) &&
+ (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
+ Q = cpi->avg_frame_qindex;
+ }
+
+ // For constrained quality dont allow Q less than the cq level
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (Q < cpi->cq_target_quality)) {
+ Q = cpi->cq_target_quality;
+ }
+
+ if (cpi->gfu_boost > high)
+ cpi->active_best_quality = gf_low_motion_minq[Q];
+ else if (cpi->gfu_boost < low)
+ cpi->active_best_quality = gf_high_motion_minq[Q];
+ else {
+ int gap = high - low;
+ int offset = high - cpi->gfu_boost;
+ int qdiff = gf_high_motion_minq[Q] - gf_low_motion_minq[Q];
+ int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+
+ cpi->active_best_quality = gf_low_motion_minq[Q] + adjustment;
+ }
+
+ // Constrained quality use slightly lower active best.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ cpi->active_best_quality =
+ cpi->active_best_quality * 15 / 16;
+ }
+ } else {
+ cpi->active_best_quality = inter_minq[Q];
+
+ // For the constant/constrained quality mode we dont want
+ // q to fall below the cq level.
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (cpi->active_best_quality < cpi->cq_target_quality)) {
+ // If we are strongly undershooting the target rate in the last
+ // frames then use the user passed in cq value not the auto
+ // cq value.
+ if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth)
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ else
+ cpi->active_best_quality = cpi->cq_target_quality;
+ }
+ }
+
+ // Clip the active best and worst quality values to limits
+ if (cpi->active_worst_quality > cpi->worst_quality)
+ cpi->active_worst_quality = cpi->worst_quality;
+
+ if (cpi->active_best_quality < cpi->best_quality)
+ cpi->active_best_quality = cpi->best_quality;
+
+ if (cpi->active_best_quality > cpi->worst_quality)
+ cpi->active_best_quality = cpi->worst_quality;
+
+ if (cpi->active_worst_quality < cpi->active_best_quality)
+ cpi->active_worst_quality = cpi->active_best_quality;
+
+ // Specuial case code to try and match quality with forced key frames
+ if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ Q = cpi->last_boosted_qindex;
+ } else {
+ // Determine initial Q to try
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ }
+ last_zbin_oq = cpi->zbin_over_quant;
+
+ // Set highest allowed value for Zbin over quant
+ if (cm->frame_type == KEY_FRAME)
+ zbin_oq_high = 0; // ZBIN_OQ_MAX/16
+ else if (cm->refresh_alt_ref_frame || (cm->refresh_golden_frame && !cpi->source_alt_ref_active))
+ zbin_oq_high = 16;
+ else
+ zbin_oq_high = ZBIN_OQ_MAX;
+
+ vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
+
+ // Limit Q range for the adaptive loop.
+ bottom_index = cpi->active_best_quality;
+ top_index = cpi->active_worst_quality;
+ q_low = cpi->active_best_quality;
+ q_high = cpi->active_worst_quality;
+
+ loop_count = 0;
#if CONFIG_HIGH_PRECISION_MV || CONFIG_ENHANCED_INTERP
- if (cm->frame_type != KEY_FRAME)
- {
+ if (cm->frame_type != KEY_FRAME) {
#if CONFIG_ENHANCED_INTERP
- /* TODO: Decide this more intelligently */
- if (sf->search_best_filter)
- {
- cm->mcomp_filter_type = mcomp_filters_to_search[0];
- mcomp_filter_index = 0;
- }
- else
- cm->mcomp_filter_type = EIGHTTAP;
+ /* TODO: Decide this more intelligently */
+ if (sf->search_best_filter) {
+ cm->mcomp_filter_type = mcomp_filters_to_search[0];
+ mcomp_filter_index = 0;
+ } else
+ cm->mcomp_filter_type = EIGHTTAP;
#endif
#if CONFIG_HIGH_PRECISION_MV
- /* TODO: Decide this more intelligently */
- xd->allow_high_precision_mv = (Q < HIGH_PRECISION_MV_QTHRESH);
+ /* TODO: Decide this more intelligently */
+ xd->allow_high_precision_mv = (Q < HIGH_PRECISION_MV_QTHRESH);
#endif
- }
+ }
#endif
#if CONFIG_POSTPROC
- if (cpi->oxcf.noise_sensitivity > 0)
- {
- unsigned char *src;
- int l = 0;
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ unsigned char *src;
+ int l = 0;
- switch (cpi->oxcf.noise_sensitivity)
- {
- case 1:
- l = 20;
- break;
- case 2:
- l = 40;
- break;
- case 3:
- l = 60;
- break;
- case 4:
-
- case 5:
- l = 100;
- break;
- case 6:
- l = 150;
- break;
- }
+ switch (cpi->oxcf.noise_sensitivity) {
+ case 1:
+ l = 20;
+ break;
+ case 2:
+ l = 40;
+ break;
+ case 3:
+ l = 60;
+ break;
+ case 4:
+ case 5:
+ l = 100;
+ break;
+ case 6:
+ l = 150;
+ break;
+ }
- if (cm->frame_type == KEY_FRAME)
- {
- vp8_de_noise(cpi->Source, cpi->Source, l , 1, 0, RTCD(postproc));
- }
- else
- {
- vp8_de_noise(cpi->Source, cpi->Source, l , 1, 0, RTCD(postproc));
- src = cpi->Source->y_buffer;
+ if (cm->frame_type == KEY_FRAME) {
+ vp8_de_noise(cpi->Source, cpi->Source, l, 1, 0, RTCD(postproc));
+ } else {
+ vp8_de_noise(cpi->Source, cpi->Source, l, 1, 0, RTCD(postproc));
- if (cpi->Source->y_stride < 0)
- {
- src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
- }
- }
+ src = cpi->Source->y_buffer;
+
+ if (cpi->Source->y_stride < 0) {
+ src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
+ }
}
+ }
#endif
#ifdef OUTPUT_YUV_SRC
- vp8_write_yuv_frame(cpi->Source);
+ vp8_write_yuv_frame(cpi->Source);
#endif
#if CONFIG_ENHANCED_INTERP && RESET_FOREACH_FILTER
- if (sf->search_best_filter)
- {
- q_low0 = q_low;
- q_high0 = q_high;
- Q0 = Q;
- zbin_oq_low0 = zbin_oq_low;
- zbin_oq_high0 = zbin_oq_high;
- last_zbin_oq0 = last_zbin_oq;
- rate_correction_factor0 = cpi->rate_correction_factor;
- gf_rate_correction_factor0 = cpi->gf_rate_correction_factor;
- active_best_quality0 = cpi->active_best_quality;
- active_worst_quality0 = cpi->active_worst_quality;
- }
+ if (sf->search_best_filter) {
+ q_low0 = q_low;
+ q_high0 = q_high;
+ Q0 = Q;
+ zbin_oq_low0 = zbin_oq_low;
+ zbin_oq_high0 = zbin_oq_high;
+ last_zbin_oq0 = last_zbin_oq;
+ rate_correction_factor0 = cpi->rate_correction_factor;
+ gf_rate_correction_factor0 = cpi->gf_rate_correction_factor;
+ active_best_quality0 = cpi->active_best_quality;
+ active_worst_quality0 = cpi->active_worst_quality;
+ }
#endif
- do
- {
- vp8_clear_system_state(); //__asm emms;
+ do {
+ vp8_clear_system_state(); // __asm emms;
- vp8_set_quantizer(cpi, Q);
- this_q = Q;
+ vp8_set_quantizer(cpi, Q);
+ this_q = Q;
- if ( loop_count == 0 )
- {
+ if (loop_count == 0) {
- // setup skip prob for costing in mode/mv decision
- if (cpi->common.mb_no_coeff_skip)
- {
+ // setup skip prob for costing in mode/mv decision
+ if (cpi->common.mb_no_coeff_skip) {
#if CONFIG_NEWENTROPY
- int k;
- for (k=0; k<MBSKIP_CONTEXTS; k++)
- cm->mbskip_pred_probs[k] = cpi->base_skip_false_prob[Q][k];
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; k++)
+ cm->mbskip_pred_probs[k] = cpi->base_skip_false_prob[Q][k];
#else
- cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
+ cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
#endif
- if (cm->frame_type != KEY_FRAME)
- {
- if (cpi->common.refresh_alt_ref_frame)
- {
+ if (cm->frame_type != KEY_FRAME) {
+ if (cpi->common.refresh_alt_ref_frame) {
#if CONFIG_NEWENTROPY
- for (k=0; k<MBSKIP_CONTEXTS; k++)
- {
- if (cpi->last_skip_false_probs[2][k] != 0)
- cm->mbskip_pred_probs[k] = cpi->last_skip_false_probs[2][k];
- }
+ for (k = 0; k < MBSKIP_CONTEXTS; k++) {
+ if (cpi->last_skip_false_probs[2][k] != 0)
+ cm->mbskip_pred_probs[k] = cpi->last_skip_false_probs[2][k];
+ }
#else
- if (cpi->last_skip_false_probs[2] != 0)
- cpi->prob_skip_false = cpi->last_skip_false_probs[2];
+ if (cpi->last_skip_false_probs[2] != 0)
+ cpi->prob_skip_false = cpi->last_skip_false_probs[2];
#endif
- }
- else if (cpi->common.refresh_golden_frame)
- {
+ } else if (cpi->common.refresh_golden_frame) {
#if CONFIG_NEWENTROPY
- for (k=0; k<MBSKIP_CONTEXTS; k++)
- {
- if (cpi->last_skip_false_probs[1][k] != 0)
- cm->mbskip_pred_probs[k] = cpi->last_skip_false_probs[1][k];
- }
+ for (k = 0; k < MBSKIP_CONTEXTS; k++) {
+ if (cpi->last_skip_false_probs[1][k] != 0)
+ cm->mbskip_pred_probs[k] = cpi->last_skip_false_probs[1][k];
+ }
#else
- if (cpi->last_skip_false_probs[1] != 0)
- cpi->prob_skip_false = cpi->last_skip_false_probs[1];
+ if (cpi->last_skip_false_probs[1] != 0)
+ cpi->prob_skip_false = cpi->last_skip_false_probs[1];
#endif
- }
- else
- {
+ } else {
#if CONFIG_NEWENTROPY
- int k;
- for (k=0; k<MBSKIP_CONTEXTS; k++)
- {
- if (cpi->last_skip_false_probs[0][k] != 0)
- cm->mbskip_pred_probs[k] = cpi->last_skip_false_probs[0][k];
- }
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; k++) {
+ if (cpi->last_skip_false_probs[0][k] != 0)
+ cm->mbskip_pred_probs[k] = cpi->last_skip_false_probs[0][k];
+ }
#else
- if (cpi->last_skip_false_probs[0] != 0)
- cpi->prob_skip_false = cpi->last_skip_false_probs[0];
+ if (cpi->last_skip_false_probs[0] != 0)
+ cpi->prob_skip_false = cpi->last_skip_false_probs[0];
#endif
- }
+ }
- // as this is for cost estimate, let's make sure it does not
- // get extreme either way
+ // as this is for cost estimate, let's make sure it does not
+ // get extreme either way
#if CONFIG_NEWENTROPY
- {
- int k;
- for (k=0; k<MBSKIP_CONTEXTS; ++k)
- {
- if (cm->mbskip_pred_probs[k] < 5)
- cm->mbskip_pred_probs[k] = 5;
-
- if (cm->mbskip_pred_probs[k] > 250)
- cm->mbskip_pred_probs[k] = 250;
-
- if (cpi->is_src_frame_alt_ref)
- cm->mbskip_pred_probs[k] = 1;
- }
- }
+ {
+ int k;
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
+ if (cm->mbskip_pred_probs[k] < 5)
+ cm->mbskip_pred_probs[k] = 5;
+
+ if (cm->mbskip_pred_probs[k] > 250)
+ cm->mbskip_pred_probs[k] = 250;
+
+ if (cpi->is_src_frame_alt_ref)
+ cm->mbskip_pred_probs[k] = 1;
+ }
+ }
#else
- if (cpi->prob_skip_false < 5)
- cpi->prob_skip_false = 5;
+ if (cpi->prob_skip_false < 5)
+ cpi->prob_skip_false = 5;
- if (cpi->prob_skip_false > 250)
- cpi->prob_skip_false = 250;
+ if (cpi->prob_skip_false > 250)
+ cpi->prob_skip_false = 250;
- if (cpi->is_src_frame_alt_ref)
- cpi->prob_skip_false = 1;
+ if (cpi->is_src_frame_alt_ref)
+ cpi->prob_skip_false = 1;
#endif
- }
- }
-
- // Set up entropy depending on frame type.
- if (cm->frame_type == KEY_FRAME)
- vp8_setup_key_frame(cpi);
- else
- vp8_setup_inter_frame(cpi);
}
+ }
+
+ // Set up entropy depending on frame type.
+ if (cm->frame_type == KEY_FRAME)
+ vp8_setup_key_frame(cpi);
+ else
+ vp8_setup_inter_frame(cpi);
+ }
- // transform / motion compensation build reconstruction frame
+ // transform / motion compensation build reconstruction frame
- vp8_encode_frame(cpi);
+ vp8_encode_frame(cpi);
- // Update the skip mb flag probabilities based on the distribution
- // seen in the last encoder iteration.
- update_base_skip_probs( cpi );
+ // Update the skip mb flag probabilities based on the distribution
+ // seen in the last encoder iteration.
+ update_base_skip_probs(cpi);
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state(); // __asm emms;
#if CONFIG_PRED_FILTER
- // Update prediction filter on/off probability based on
- // selection made for the current frame
- if (cm->frame_type != KEY_FRAME)
- update_pred_filt_prob( cpi );
+ // Update prediction filter on/off probability based on
+ // selection made for the current frame
+ if (cm->frame_type != KEY_FRAME)
+ update_pred_filt_prob(cpi);
#endif
- // Dummy pack of the bitstream using up to date stats to get an
- // accurate estimate of output frame size to determine if we need
- // to recode.
- vp8_save_coding_context(cpi);
- cpi->dummy_packing = 1;
- vp8_pack_bitstream(cpi, dest, size);
- cpi->projected_frame_size = (*size) << 3;
- vp8_restore_coding_context(cpi);
-
- if (frame_over_shoot_limit == 0)
- frame_over_shoot_limit = 1;
- active_worst_qchanged = FALSE;
-
- // Special case handling for forced key frames
- if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
- {
- int last_q = Q;
- int kf_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
-
- int high_err_target = cpi->ambient_err;
- int low_err_target = (cpi->ambient_err >> 1);
-
- // Prevent possible divide by zero error below for perfect KF
- kf_err += (!kf_err);
-
- // The key frame is not good enough or we can afford
- // to make it better without undue risk of popping.
- if ( ( (kf_err > high_err_target) &&
- (cpi->projected_frame_size <= frame_over_shoot_limit) ) ||
- ( (kf_err > low_err_target) &&
- (cpi->projected_frame_size <= frame_under_shoot_limit) ) )
- {
- // Lower q_high
- q_high = (Q > q_low) ? (Q - 1) : q_low;
-
- // Adjust Q
- Q = (Q * high_err_target) / kf_err;
- if ( Q < ((q_high + q_low) >> 1))
- Q = (q_high + q_low) >> 1;
- }
- // The key frame is much better than the previous frame
- else if ( (kf_err < low_err_target) &&
- (cpi->projected_frame_size >= frame_under_shoot_limit) )
- {
- // Raise q_low
- q_low = (Q < q_high) ? (Q + 1) : q_high;
-
- // Adjust Q
- Q = (Q * low_err_target) / kf_err;
- if ( Q > ((q_high + q_low + 1) >> 1))
- Q = (q_high + q_low + 1) >> 1;
- }
-
- // Clamp Q to upper and lower limits:
- if (Q > q_high)
- Q = q_high;
- else if (Q < q_low)
- Q = q_low;
-
- Loop = ((Q != last_q)) ? TRUE : FALSE;
+ // Dummy pack of the bitstream using up to date stats to get an
+ // accurate estimate of output frame size to determine if we need
+ // to recode.
+ vp8_save_coding_context(cpi);
+ cpi->dummy_packing = 1;
+ vp8_pack_bitstream(cpi, dest, size);
+ cpi->projected_frame_size = (*size) << 3;
+ vp8_restore_coding_context(cpi);
+
+ if (frame_over_shoot_limit == 0)
+ frame_over_shoot_limit = 1;
+ active_worst_qchanged = FALSE;
+
+ // Special case handling for forced key frames
+ if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ int last_q = Q;
+ int kf_err = vp8_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx],
+ IF_RTCD(&cpi->rtcd.variance));
+
+ int high_err_target = cpi->ambient_err;
+ int low_err_target = (cpi->ambient_err >> 1);
+
+ // Prevent possible divide by zero error below for perfect KF
+ kf_err += (!kf_err);
+
+ // The key frame is not good enough or we can afford
+ // to make it better without undue risk of popping.
+ if (((kf_err > high_err_target) &&
+ (cpi->projected_frame_size <= frame_over_shoot_limit)) ||
+ ((kf_err > low_err_target) &&
+ (cpi->projected_frame_size <= frame_under_shoot_limit))) {
+ // Lower q_high
+ q_high = (Q > q_low) ? (Q - 1) : q_low;
+
+ // Adjust Q
+ Q = (Q * high_err_target) / kf_err;
+ if (Q < ((q_high + q_low) >> 1))
+ Q = (q_high + q_low) >> 1;
+ }
+ // The key frame is much better than the previous frame
+ else if ((kf_err < low_err_target) &&
+ (cpi->projected_frame_size >= frame_under_shoot_limit)) {
+ // Raise q_low
+ q_low = (Q < q_high) ? (Q + 1) : q_high;
+
+ // Adjust Q
+ Q = (Q * low_err_target) / kf_err;
+ if (Q > ((q_high + q_low + 1) >> 1))
+ Q = (q_high + q_low + 1) >> 1;
+ }
+
+ // Clamp Q to upper and lower limits:
+ if (Q > q_high)
+ Q = q_high;
+ else if (Q < q_low)
+ Q = q_low;
+
+ Loop = ((Q != last_q)) ? TRUE : FALSE;
+ }
+
+ // Is the projected frame size out of range and are we allowed to attempt to recode.
+ else if (recode_loop_test(cpi,
+ frame_over_shoot_limit, frame_under_shoot_limit,
+ Q, top_index, bottom_index)) {
+ int last_q = Q;
+ int Retries = 0;
+
+ // Frame size out of permitted range:
+ // Update correction factor & compute new Q to try...
+
+ // Frame is too large
+ if (cpi->projected_frame_size > cpi->this_frame_target) {
+ q_low = (Q < q_high) ? (Q + 1) : q_high; // Raise Qlow as to at least the current value
+
+ if (cpi->zbin_over_quant > 0) // If we are using over quant do the same for zbin_oq_low
+ zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
+
+ if (undershoot_seen || (loop_count > 1)) {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp8_update_rate_correction_factors(cpi, 1);
+
+ Q = (q_high + q_low + 1) / 2;
+
+ // Adjust cpi->zbin_over_quant (only allowed when Q is max)
+ if (Q < MAXQ)
+ cpi->zbin_over_quant = 0;
+ else {
+ zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
+ cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ }
+ } else {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp8_update_rate_correction_factors(cpi, 0);
+
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+
+ while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10)) {
+ vp8_update_rate_correction_factors(cpi, 0);
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ Retries++;
+ }
}
- // Is the projected frame size out of range and are we allowed to attempt to recode.
- else if ( recode_loop_test( cpi,
- frame_over_shoot_limit, frame_under_shoot_limit,
- Q, top_index, bottom_index ) )
- {
- int last_q = Q;
- int Retries = 0;
-
- // Frame size out of permitted range:
- // Update correction factor & compute new Q to try...
-
- // Frame is too large
- if (cpi->projected_frame_size > cpi->this_frame_target)
- {
- q_low = (Q < q_high) ? (Q + 1) : q_high; // Raise Qlow as to at least the current value
-
- if (cpi->zbin_over_quant > 0) // If we are using over quant do the same for zbin_oq_low
- zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
+ overshoot_seen = TRUE;
+ }
+ // Frame is too small
+ else {
+ if (cpi->zbin_over_quant == 0)
+ q_high = (Q > q_low) ? (Q - 1) : q_low; // Lower q_high if not using over quant
+ else // else lower zbin_oq_high
+ zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low;
+
+ if (overshoot_seen || (loop_count > 1)) {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp8_update_rate_correction_factors(cpi, 1);
+
+ Q = (q_high + q_low) / 2;
+
+ // Adjust cpi->zbin_over_quant (only allowed when Q is max)
+ if (Q < MAXQ)
+ cpi->zbin_over_quant = 0;
+ else
+ cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ } else {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp8_update_rate_correction_factors(cpi, 0);
+
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+
+ // Special case reset for qlow for constrained quality.
+ // This should only trigger where there is very substantial
+ // undershoot on a frame and the auto cq level is above
+ // the user passsed in value.
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (Q < q_low)) {
+ q_low = Q;
+ }
+
+ while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10)) {
+ vp8_update_rate_correction_factors(cpi, 0);
+ Q = vp8_regulate_q(cpi, cpi->this_frame_target);
+ Retries++;
+ }
+ }
- if ( undershoot_seen || (loop_count > 1) )
- {
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
- if (!active_worst_qchanged)
- vp8_update_rate_correction_factors(cpi, 1);
-
- Q = (q_high + q_low + 1) / 2;
-
- // Adjust cpi->zbin_over_quant (only allowed when Q is max)
- if (Q < MAXQ)
- cpi->zbin_over_quant = 0;
- else
- {
- zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
- cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
- }
- }
- else
- {
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
- if (!active_worst_qchanged)
- vp8_update_rate_correction_factors(cpi, 0);
-
- Q = vp8_regulate_q(cpi, cpi->this_frame_target);
-
- while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10))
- {
- vp8_update_rate_correction_factors(cpi, 0);
- Q = vp8_regulate_q(cpi, cpi->this_frame_target);
- Retries ++;
- }
- }
+ undershoot_seen = TRUE;
+ }
- overshoot_seen = TRUE;
- }
- // Frame is too small
- else
- {
- if (cpi->zbin_over_quant == 0)
- q_high = (Q > q_low) ? (Q - 1) : q_low; // Lower q_high if not using over quant
- else // else lower zbin_oq_high
- zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low;
+ // Clamp Q to upper and lower limits:
+ if (Q > q_high)
+ Q = q_high;
+ else if (Q < q_low)
+ Q = q_low;
- if ( overshoot_seen || (loop_count > 1) )
- {
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
- if (!active_worst_qchanged)
- vp8_update_rate_correction_factors(cpi, 1);
+ // Clamp cpi->zbin_over_quant
+ cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant;
- Q = (q_high + q_low) / 2;
+ // Loop = ((Q != last_q) || (last_zbin_oq != cpi->zbin_over_quant)) ? TRUE : FALSE;
+ Loop = ((Q != last_q)) ? TRUE : FALSE;
+ last_zbin_oq = cpi->zbin_over_quant;
+ } else
+ Loop = FALSE;
- // Adjust cpi->zbin_over_quant (only allowed when Q is max)
- if (Q < MAXQ)
- cpi->zbin_over_quant = 0;
- else
- cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
- }
- else
- {
- // Update rate_correction_factor unless cpi->active_worst_quality has changed.
- if (!active_worst_qchanged)
- vp8_update_rate_correction_factors(cpi, 0);
-
- Q = vp8_regulate_q(cpi, cpi->this_frame_target);
-
- // Special case reset for qlow for constrained quality.
- // This should only trigger where there is very substantial
- // undershoot on a frame and the auto cq level is above
- // the user passsed in value.
- if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
- (Q < q_low) )
- {
- q_low = Q;
- }
-
- while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10))
- {
- vp8_update_rate_correction_factors(cpi, 0);
- Q = vp8_regulate_q(cpi, cpi->this_frame_target);
- Retries ++;
- }
- }
+ if (cpi->is_src_frame_alt_ref)
+ Loop = FALSE;
- undershoot_seen = TRUE;
+#if CONFIG_ENHANCED_INTERP
+ if (Loop == FALSE && cm->frame_type != KEY_FRAME && sf->search_best_filter) {
+ if (mcomp_filter_index < mcomp_filters) {
+ INT64 err = vp8_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx],
+ IF_RTCD(&cpi->rtcd.variance));
+ INT64 rate = cpi->projected_frame_size << 8;
+ mcomp_filter_cost[mcomp_filter_index] =
+ (RDCOST(cpi->RDMULT, cpi->RDDIV, rate, err));
+ mcomp_filter_index++;
+ if (mcomp_filter_index < mcomp_filters) {
+ cm->mcomp_filter_type = mcomp_filters_to_search[mcomp_filter_index];
+ loop_count = -1;
+ Loop = TRUE;
+ } else {
+ int f;
+ INT64 best_cost = mcomp_filter_cost[0];
+ int mcomp_best_filter = mcomp_filters_to_search[0];
+ for (f = 1; f < mcomp_filters; f++) {
+ if (mcomp_filter_cost[f] < best_cost) {
+ mcomp_best_filter = mcomp_filters_to_search[f];
+ best_cost = mcomp_filter_cost[f];
}
-
- // Clamp Q to upper and lower limits:
- if (Q > q_high)
- Q = q_high;
- else if (Q < q_low)
- Q = q_low;
-
- // Clamp cpi->zbin_over_quant
- cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant;
-
- //Loop = ((Q != last_q) || (last_zbin_oq != cpi->zbin_over_quant)) ? TRUE : FALSE;
- Loop = ((Q != last_q)) ? TRUE : FALSE;
- last_zbin_oq = cpi->zbin_over_quant;
+ }
+ if (mcomp_best_filter != mcomp_filters_to_search[mcomp_filters - 1]) {
+ loop_count = -1;
+ Loop = TRUE;
+ cm->mcomp_filter_type = mcomp_best_filter;
+ }
+ /*
+ printf(" best filter = %d, ( ", mcomp_best_filter);
+ for (f=0;f<mcomp_filters; f++) printf("%d ", mcomp_filter_cost[f]);
+ printf(")\n");
+ */
}
- else
- Loop = FALSE;
-
- if (cpi->is_src_frame_alt_ref)
- Loop = FALSE;
-
-#if CONFIG_ENHANCED_INTERP
- if (Loop == FALSE && cm->frame_type != KEY_FRAME && sf->search_best_filter)
- {
- if (mcomp_filter_index < mcomp_filters)
- {
- INT64 err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
- INT64 rate = cpi->projected_frame_size << 8;
- mcomp_filter_cost[mcomp_filter_index] =
- (RDCOST(cpi->RDMULT, cpi->RDDIV, rate, err));
- mcomp_filter_index++;
- if (mcomp_filter_index < mcomp_filters)
- {
- cm->mcomp_filter_type = mcomp_filters_to_search[mcomp_filter_index];
- loop_count = -1;
- Loop = TRUE;
- }
- else
- {
- int f;
- INT64 best_cost = mcomp_filter_cost[0];
- int mcomp_best_filter = mcomp_filters_to_search[0];
- for (f = 1; f < mcomp_filters; f++)
- {
- if (mcomp_filter_cost[f] < best_cost)
- {
- mcomp_best_filter = mcomp_filters_to_search[f];
- best_cost = mcomp_filter_cost[f];
- }
- }
- if (mcomp_best_filter != mcomp_filters_to_search[mcomp_filters-1])
- {
- loop_count = -1;
- Loop = TRUE;
- cm->mcomp_filter_type = mcomp_best_filter;
- }
- /*
- printf(" best filter = %d, ( ", mcomp_best_filter);
- for (f=0;f<mcomp_filters; f++) printf("%d ", mcomp_filter_cost[f]);
- printf(")\n");
- */
- }
#if RESET_FOREACH_FILTER
- if (Loop == TRUE)
- {
- overshoot_seen = FALSE;
- undershoot_seen = FALSE;
- zbin_oq_low = zbin_oq_low0;
- zbin_oq_high = zbin_oq_high0;
- q_low = q_low0;
- q_high = q_high0;
- Q = Q0;
- cpi->zbin_over_quant = last_zbin_oq = last_zbin_oq0;
- cpi->rate_correction_factor = rate_correction_factor0;
- cpi->gf_rate_correction_factor = gf_rate_correction_factor0;
- cpi->active_best_quality = active_best_quality0;
- cpi->active_worst_quality = active_worst_quality0;
- }
-#endif
- }
+ if (Loop == TRUE) {
+ overshoot_seen = FALSE;
+ undershoot_seen = FALSE;
+ zbin_oq_low = zbin_oq_low0;
+ zbin_oq_high = zbin_oq_high0;
+ q_low = q_low0;
+ q_high = q_high0;
+ Q = Q0;
+ cpi->zbin_over_quant = last_zbin_oq = last_zbin_oq0;
+ cpi->rate_correction_factor = rate_correction_factor0;
+ cpi->gf_rate_correction_factor = gf_rate_correction_factor0;
+ cpi->active_best_quality = active_best_quality0;
+ cpi->active_worst_quality = active_worst_quality0;
}
#endif
+ }
+ }
+#endif
- if (Loop == TRUE)
- {
- loop_count++;
+ if (Loop == TRUE) {
+ loop_count++;
#if CONFIG_INTERNAL_STATS
- cpi->tot_recode_hits++;
+ cpi->tot_recode_hits++;
#endif
- }
- }
- while (Loop == TRUE);
-
- // Special case code to reduce pulsing when key frames are forced at a
- // fixed interval. Note the reconstruction error if it is the frame before
- // the force key frame
- if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
- {
- cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
}
-
- // This frame's MVs are saved and will be used in next frame's MV
- // prediction. Last frame has one more line(add to bottom) and one
- // more column(add to right) than cm->mip. The edge elements are
- // initialized to 0.
- if(cm->show_frame) //do not save for altref frame
- {
- int mb_row;
- int mb_col;
- MODE_INFO *tmp = cm->mip;
-
- if(cm->frame_type != KEY_FRAME)
- {
- for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
- {
- for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
- {
- if(tmp->mbmi.ref_frame != INTRA_FRAME)
- cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride+1)].as_int = tmp->mbmi.mv.as_int;
-
- cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride+1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
- cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride+1)] = tmp->mbmi.ref_frame;
- tmp++;
- }
- }
+ } while (Loop == TRUE);
+
+ // Special case code to reduce pulsing when key frames are forced at a
+ // fixed interval. Note the reconstruction error if it is the frame before
+ // the force key frame
+ if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
+ cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx],
+ IF_RTCD(&cpi->rtcd.variance));
+ }
+
+ // This frame's MVs are saved and will be used in next frame's MV
+ // prediction. Last frame has one more line(add to bottom) and one
+ // more column(add to right) than cm->mip. The edge elements are
+ // initialized to 0.
+ if (cm->show_frame) { // do not save for altref frame
+ int mb_row;
+ int mb_col;
+ MODE_INFO *tmp = cm->mip;
+
+ if (cm->frame_type != KEY_FRAME) {
+ for (mb_row = 0; mb_row < cm->mb_rows + 1; mb_row ++) {
+ for (mb_col = 0; mb_col < cm->mb_cols + 1; mb_col ++) {
+ if (tmp->mbmi.ref_frame != INTRA_FRAME)
+ cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int = tmp->mbmi.mv.as_int;
+
+ cpi->lf_ref_frame_sign_bias[mb_col + mb_row * (cm->mode_info_stride + 1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
+ cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] = tmp->mbmi.ref_frame;
+ tmp++;
}
+ }
}
+ }
- // Update the GF useage maps.
- // This is done after completing the compression of a frame when all modes
- // etc. are finalized but before loop filter
- vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
+ // Update the GF useage maps.
+ // This is done after completing the compression of a frame when all modes
+ // etc. are finalized but before loop filter
+ vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
- if (cm->frame_type == KEY_FRAME)
- cm->refresh_last_frame = 1;
+ if (cm->frame_type == KEY_FRAME)
+ cm->refresh_last_frame = 1;
#if 0
- {
- FILE *f = fopen("gfactive.stt", "a");
- fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
- fclose(f);
- }
+ {
+ FILE *f = fopen("gfactive.stt", "a");
+ fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
+ fclose(f);
+ }
#endif
- cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
#if WRITE_RECON_BUFFER
- if(cm->show_frame)
- write_cx_frame_to_file(cm->frame_to_show,
- cm->current_video_frame);
- else
- write_cx_frame_to_file(cm->frame_to_show,
- cm->current_video_frame+1000);
+ if (cm->show_frame)
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame);
+ else
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 1000);
#endif
- // Pick the loop filter level for the frame.
- loopfilter_frame(cpi, cm);
+ // Pick the loop filter level for the frame.
+ loopfilter_frame(cpi, cm);
- // build the bitstream
- cpi->dummy_packing = 0;
- vp8_pack_bitstream(cpi, dest, size);
+ // build the bitstream
+ cpi->dummy_packing = 0;
+ vp8_pack_bitstream(cpi, dest, size);
#if CONFIG_PRED_FILTER
- // Select the prediction filtering mode to use for the
- // next frame based on the current frame selections
- if(cm->frame_type != KEY_FRAME)
- select_pred_filter_mode (cpi);
+ // Select the prediction filtering mode to use for the
+ // next frame based on the current frame selections
+ if (cm->frame_type != KEY_FRAME)
+ select_pred_filter_mode(cpi);
#endif
- update_reference_frames(cm);
+ update_reference_frames(cm);
#if CONFIG_ADAPTIVE_ENTROPY
- vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts);
- vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8);
- vp8_adapt_coef_probs(&cpi->common);
- if (cpi->common.frame_type != KEY_FRAME)
- {
- vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
- vp8_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
- vp8_copy(cpi->common.fc.bmode_counts, cpi->bmode_count);
- vp8_copy(cpi->common.fc.i8x8_mode_counts, cpi->i8x8_mode_count);
- vp8_copy(cpi->common.fc.sub_mv_ref_counts, cpi->sub_mv_ref_count);
- vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
- vp8_adapt_mode_probs(&cpi->common);
-
- vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
+ vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts);
+ vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8);
+ vp8_adapt_coef_probs(&cpi->common);
+ if (cpi->common.frame_type != KEY_FRAME) {
+ vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
+ vp8_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
+ vp8_copy(cpi->common.fc.bmode_counts, cpi->bmode_count);
+ vp8_copy(cpi->common.fc.i8x8_mode_counts, cpi->i8x8_mode_count);
+ vp8_copy(cpi->common.fc.sub_mv_ref_counts, cpi->sub_mv_ref_count);
+ vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
+ vp8_adapt_mode_probs(&cpi->common);
+
+ vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
#if CONFIG_HIGH_PRECISION_MV
- vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
+ vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
#endif
- vp8_adapt_mv_probs(&cpi->common);
- vp8_update_mode_context(&cpi->common);
- }
+ vp8_adapt_mv_probs(&cpi->common);
+ vp8_update_mode_context(&cpi->common);
+ }
#endif /* CONFIG_ADAPTIVE_ENTROPY */
- /* Move storing frame_type out of the above loop since it is also
- * needed in motion search besides loopfilter */
- cm->last_frame_type = cm->frame_type;
-
- // Keep a copy of the size estimate used in the loop
- loop_size_estimate = cpi->projected_frame_size;
-
- // Update rate control heuristics
- cpi->total_byte_count += (*size);
- cpi->projected_frame_size = (*size) << 3;
-
- if (!active_worst_qchanged)
- vp8_update_rate_correction_factors(cpi, 2);
-
- cpi->last_q[cm->frame_type] = cm->base_qindex;
-
- // Keep record of last boosted (KF/KF/ARF) Q value.
- // If the current frame is coded at a lower Q then we also update it.
- // If all mbs in this group are skipped only update if the Q value is
- // better than that already stored.
- // This is used to help set quality in forced key frames to reduce popping
- if ( (cm->base_qindex < cpi->last_boosted_qindex) ||
- ( (cpi->static_mb_pct < 100) &&
- ( (cm->frame_type == KEY_FRAME) ||
- cm->refresh_alt_ref_frame ||
- (cm->refresh_golden_frame && !cpi->is_src_frame_alt_ref) ) ) )
- {
- cpi->last_boosted_qindex = cm->base_qindex;
- }
-
- if (cm->frame_type == KEY_FRAME)
- {
- vp8_adjust_key_frame_context(cpi);
- }
-
- // Keep a record of ambient average Q.
- if (cm->frame_type != KEY_FRAME)
- cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
-
- // Keep a record from which we can calculate the average Q excluding GF updates and key frames
- if ((cm->frame_type != KEY_FRAME) && !cm->refresh_golden_frame && !cm->refresh_alt_ref_frame)
- {
- cpi->ni_frames++;
- cpi->tot_q += vp8_convert_qindex_to_q(Q);
- cpi->avg_q = cpi->tot_q / (double)cpi->ni_frames;
-
- // Calculate the average Q for normal inter frames (not key or GFU
- // frames).
- cpi->ni_tot_qi += Q;
- cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
- }
-
- // Update the buffer level variable.
- // Non-viewable frames are a special case and are treated as pure overhead.
- if ( !cm->show_frame )
- cpi->bits_off_target -= cpi->projected_frame_size;
+ /* Move storing frame_type out of the above loop since it is also
+ * needed in motion search besides loopfilter */
+ cm->last_frame_type = cm->frame_type;
+
+ // Keep a copy of the size estimate used in the loop
+ loop_size_estimate = cpi->projected_frame_size;
+
+ // Update rate control heuristics
+ cpi->total_byte_count += (*size);
+ cpi->projected_frame_size = (*size) << 3;
+
+ if (!active_worst_qchanged)
+ vp8_update_rate_correction_factors(cpi, 2);
+
+ cpi->last_q[cm->frame_type] = cm->base_qindex;
+
+ // Keep record of last boosted (KF/KF/ARF) Q value.
+ // If the current frame is coded at a lower Q then we also update it.
+ // If all mbs in this group are skipped only update if the Q value is
+ // better than that already stored.
+ // This is used to help set quality in forced key frames to reduce popping
+ if ((cm->base_qindex < cpi->last_boosted_qindex) ||
+ ((cpi->static_mb_pct < 100) &&
+ ((cm->frame_type == KEY_FRAME) ||
+ cm->refresh_alt_ref_frame ||
+ (cm->refresh_golden_frame && !cpi->is_src_frame_alt_ref)))) {
+ cpi->last_boosted_qindex = cm->base_qindex;
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp8_adjust_key_frame_context(cpi);
+ }
+
+ // Keep a record of ambient average Q.
+ if (cm->frame_type != KEY_FRAME)
+ cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
+
+ // Keep a record from which we can calculate the average Q excluding GF updates and key frames
+ if ((cm->frame_type != KEY_FRAME) && !cm->refresh_golden_frame && !cm->refresh_alt_ref_frame) {
+ cpi->ni_frames++;
+ cpi->tot_q += vp8_convert_qindex_to_q(Q);
+ cpi->avg_q = cpi->tot_q / (double)cpi->ni_frames;
+
+ // Calculate the average Q for normal inter frames (not key or GFU
+ // frames).
+ cpi->ni_tot_qi += Q;
+ cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
+ }
+
+ // Update the buffer level variable.
+ // Non-viewable frames are a special case and are treated as pure overhead.
+ if (!cm->show_frame)
+ cpi->bits_off_target -= cpi->projected_frame_size;
+ else
+ cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
+
+ // Clip the buffer level at the maximum buffer size
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
+ cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+
+ // Rolling monitors of whether we are over or underspending used to help regulate min and Max Q in two pass.
+ cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
+ cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
+ cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
+ cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
+
+ // Actual bits spent
+ cpi->total_actual_bits += cpi->projected_frame_size;
+
+ // Debug stats
+ cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
+
+ cpi->buffer_level = cpi->bits_off_target;
+
+ // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+
+ if (cpi->twopass.kf_group_bits < 0)
+ cpi->twopass.kf_group_bits = 0;
+ } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
+ cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
+ }
+
+ // Update the skip mb flag probabilities based on the distribution seen
+ // in this frame.
+ update_base_skip_probs(cpi);
+
+#if 0// 1 && CONFIG_INTERNAL_STATS
+ {
+ FILE *f = fopen("tmp.stt", "a");
+ int recon_err;
+
+ vp8_clear_system_state(); // __asm emms;
+
+ recon_err = vp8_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx],
+ IF_RTCD(&cpi->rtcd.variance));
+
+ if (cpi->twopass.total_left_stats->coded_error != 0.0)
+ fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
+ "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
+ "%6d %5d %5d %5d %8d %8.2f %10d %10.3f"
+ "%10.3f %8d %10d %10d %10d\n",
+ cpi->common.current_video_frame, cpi->this_frame_target,
+ cpi->projected_frame_size, loop_size_estimate,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ (int)cpi->total_target_vs_actual,
+ (cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
+ (int)cpi->total_actual_bits,
+ vp8_convert_qindex_to_q(cm->base_qindex),
+ (double)vp8_dc_quant(cm->base_qindex, 0) / 4.0,
+ vp8_convert_qindex_to_q(cpi->active_best_quality),
+ vp8_convert_qindex_to_q(cpi->active_worst_quality),
+ cpi->avg_q,
+ vp8_convert_qindex_to_q(cpi->ni_av_qi),
+ vp8_convert_qindex_to_q(cpi->cq_target_quality),
+ cpi->zbin_over_quant,
+ // cpi->avg_frame_qindex, cpi->zbin_over_quant,
+ cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ (int)cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats->coded_error,
+ (double)cpi->twopass.bits_left /
+ cpi->twopass.total_left_stats->coded_error,
+ cpi->tot_recode_hits, recon_err, cpi->kf_boost,
+ cpi->kf_zeromotion_pct);
else
- cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
-
- // Clip the buffer level at the maximum buffer size
- if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
- cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
-
- // Rolling monitors of whether we are over or underspending used to help regulate min and Max Q in two pass.
- cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
- cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
- cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
- cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
-
- // Actual bits spent
- cpi->total_actual_bits += cpi->projected_frame_size;
-
- // Debug stats
- cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
-
- cpi->buffer_level = cpi->bits_off_target;
-
- // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames
- if (cm->frame_type == KEY_FRAME)
- {
- cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
-
- if (cpi->twopass.kf_group_bits < 0)
- cpi->twopass.kf_group_bits = 0 ;
- }
- else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
- {
- cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
-
- if (cpi->twopass.gf_group_bits < 0)
- cpi->twopass.gf_group_bits = 0 ;
- }
-
- // Update the skip mb flag probabilities based on the distribution seen
- // in this frame.
- update_base_skip_probs( cpi );
-
-#if 0//1 && CONFIG_INTERNAL_STATS
- {
- FILE *f = fopen("tmp.stt", "a");
- int recon_err;
-
- vp8_clear_system_state(); //__asm emms;
-
- recon_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
-
- if (cpi->twopass.total_left_stats->coded_error != 0.0)
- fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
- "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
- "%6d %5d %5d %5d %8d %8.2f %10d %10.3f"
- "%10.3f %8d %10d %10d %10d\n",
- cpi->common.current_video_frame, cpi->this_frame_target,
- cpi->projected_frame_size, loop_size_estimate,
- (cpi->projected_frame_size - cpi->this_frame_target),
- (int)cpi->total_target_vs_actual,
- (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
- (int)cpi->total_actual_bits,
- vp8_convert_qindex_to_q(cm->base_qindex),
- (double)vp8_dc_quant(cm->base_qindex,0)/4.0,
- vp8_convert_qindex_to_q(cpi->active_best_quality),
- vp8_convert_qindex_to_q(cpi->active_worst_quality),
- cpi->avg_q,
- vp8_convert_qindex_to_q(cpi->ni_av_qi),
- vp8_convert_qindex_to_q(cpi->cq_target_quality),
- cpi->zbin_over_quant,
- //cpi->avg_frame_qindex, cpi->zbin_over_quant,
- cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
- cm->frame_type, cpi->gfu_boost,
- cpi->twopass.est_max_qcorrection_factor,
- (int)cpi->twopass.bits_left,
- cpi->twopass.total_left_stats->coded_error,
- (double)cpi->twopass.bits_left /
- cpi->twopass.total_left_stats->coded_error,
- cpi->tot_recode_hits, recon_err, cpi->kf_boost,
- cpi->kf_zeromotion_pct);
- else
- fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
- "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
- "%6d %5d %5d %5d %8d %8.2f %10d %10.3f"
- "%8d %10d %10d %10d\n",
- cpi->common.current_video_frame,
- cpi->this_frame_target, cpi->projected_frame_size,
- loop_size_estimate,
- (cpi->projected_frame_size - cpi->this_frame_target),
- (int)cpi->total_target_vs_actual,
- (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
- (int)cpi->total_actual_bits,
- vp8_convert_qindex_to_q(cm->base_qindex),
- (double)vp8_dc_quant(cm->base_qindex,0)/4.0,
- vp8_convert_qindex_to_q(cpi->active_best_quality),
- vp8_convert_qindex_to_q(cpi->active_worst_quality),
- cpi->avg_q,
- vp8_convert_qindex_to_q(cpi->ni_av_qi),
- vp8_convert_qindex_to_q(cpi->cq_target_quality),
- cpi->zbin_over_quant,
- //cpi->avg_frame_qindex, cpi->zbin_over_quant,
- cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
- cm->frame_type, cpi->gfu_boost,
- cpi->twopass.est_max_qcorrection_factor,
- (int)cpi->twopass.bits_left,
- cpi->twopass.total_left_stats->coded_error,
- cpi->tot_recode_hits, recon_err, cpi->kf_boost,
- cpi->kf_zeromotion_pct);
-
- fclose(f);
-
- if ( 0 )
- {
- FILE *fmodes = fopen("Modes.stt", "a");
- int i;
-
- fprintf(fmodes, "%6d:%1d:%1d:%1d ",
- cpi->common.current_video_frame,
- cm->frame_type, cm->refresh_golden_frame,
- cm->refresh_alt_ref_frame);
-
- for (i = 0; i < MAX_MODES; i++)
- fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
-
- fprintf(fmodes, "\n");
-
- fclose(fmodes);
- }
- }
+ fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
+ "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
+ "%6d %5d %5d %5d %8d %8.2f %10d %10.3f"
+ "%8d %10d %10d %10d\n",
+ cpi->common.current_video_frame,
+ cpi->this_frame_target, cpi->projected_frame_size,
+ loop_size_estimate,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ (int)cpi->total_target_vs_actual,
+ (cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
+ (int)cpi->total_actual_bits,
+ vp8_convert_qindex_to_q(cm->base_qindex),
+ (double)vp8_dc_quant(cm->base_qindex, 0) / 4.0,
+ vp8_convert_qindex_to_q(cpi->active_best_quality),
+ vp8_convert_qindex_to_q(cpi->active_worst_quality),
+ cpi->avg_q,
+ vp8_convert_qindex_to_q(cpi->ni_av_qi),
+ vp8_convert_qindex_to_q(cpi->cq_target_quality),
+ cpi->zbin_over_quant,
+ // cpi->avg_frame_qindex, cpi->zbin_over_quant,
+ cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ (int)cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats->coded_error,
+ cpi->tot_recode_hits, recon_err, cpi->kf_boost,
+ cpi->kf_zeromotion_pct);
+
+ fclose(f);
+
+ if (0) {
+ FILE *fmodes = fopen("Modes.stt", "a");
+ int i;
+
+ fprintf(fmodes, "%6d:%1d:%1d:%1d ",
+ cpi->common.current_video_frame,
+ cm->frame_type, cm->refresh_golden_frame,
+ cm->refresh_alt_ref_frame);
+
+ for (i = 0; i < MAX_MODES; i++)
+ fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
+
+ fprintf(fmodes, "\n");
+
+ fclose(fmodes);
+ }
+ }
#endif
#if 0
- // Debug stats for segment feature experiments.
- print_seg_map(cpi);
+ // Debug stats for segment feature experiments.
+ print_seg_map(cpi);
#endif
- // If this was a kf or Gf note the Q
- if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
- cm->last_kf_gf_q = cm->base_qindex;
+ // If this was a kf or Gf note the Q
+ if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
+ cm->last_kf_gf_q = cm->base_qindex;
- if (cm->refresh_golden_frame == 1)
- cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
- else
- cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
+ if (cm->refresh_golden_frame == 1)
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
+ else
+ cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
- if (cm->refresh_alt_ref_frame == 1)
- cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
- else
- cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
+ if (cm->refresh_alt_ref_frame == 1)
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
+ else
+ cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
- if (cm->refresh_last_frame & cm->refresh_golden_frame) // both refreshed
- cpi->gold_is_last = 1;
- else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other
- cpi->gold_is_last = 0;
+ if (cm->refresh_last_frame & cm->refresh_golden_frame) // both refreshed
+ cpi->gold_is_last = 1;
+ else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other
+ cpi->gold_is_last = 0;
- if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) // both refreshed
- cpi->alt_is_last = 1;
- else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) // 1 refreshed but not the other
- cpi->alt_is_last = 0;
+ if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) // both refreshed
+ cpi->alt_is_last = 1;
+ else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) // 1 refreshed but not the other
+ cpi->alt_is_last = 0;
- if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame) // both refreshed
- cpi->gold_is_alt = 1;
- else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other
- cpi->gold_is_alt = 0;
+ if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame) // both refreshed
+ cpi->gold_is_alt = 1;
+ else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other
+ cpi->gold_is_alt = 0;
- cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG;
+ cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG;
- if (cpi->gold_is_last)
- cpi->ref_frame_flags &= ~VP8_GOLD_FLAG;
+ if (cpi->gold_is_last)
+ cpi->ref_frame_flags &= ~VP8_GOLD_FLAG;
- if (cpi->alt_is_last)
- cpi->ref_frame_flags &= ~VP8_ALT_FLAG;
+ if (cpi->alt_is_last)
+ cpi->ref_frame_flags &= ~VP8_ALT_FLAG;
- if (cpi->gold_is_alt)
- cpi->ref_frame_flags &= ~VP8_ALT_FLAG;
+ if (cpi->gold_is_alt)
+ cpi->ref_frame_flags &= ~VP8_ALT_FLAG;
- if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
- // Update the alternate reference frame stats as appropriate.
- update_alt_ref_frame_stats(cpi);
- else
- // Update the Golden frame stats as appropriate.
- update_golden_frame_stats(cpi);
+ if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
+ // Update the alternate reference frame stats as appropriate.
+ update_alt_ref_frame_stats(cpi);
+ else
+ // Update the Golden frame stats as appropriate.
+ update_golden_frame_stats(cpi);
- if (cm->frame_type == KEY_FRAME)
- {
- // Tell the caller that the frame was coded as a key frame
- *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
+ if (cm->frame_type == KEY_FRAME) {
+ // Tell the caller that the frame was coded as a key frame
+ *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
- // As this frame is a key frame the next defaults to an inter frame.
- cm->frame_type = INTER_FRAME;
- }
- else
- {
- *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
- }
+ // As this frame is a key frame the next defaults to an inter frame.
+ cm->frame_type = INTER_FRAME;
+ } else {
+ *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
+ }
- // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
- xd->update_mb_segmentation_map = 0;
- xd->update_mb_segmentation_data = 0;
- xd->mode_ref_lf_delta_update = 0;
+ // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ xd->mode_ref_lf_delta_update = 0;
- // Dont increment frame counters if this was an altref buffer update not a real frame
- if (cm->show_frame)
- {
- cm->current_video_frame++;
- cpi->frames_since_key++;
- }
+ // Dont increment frame counters if this was an altref buffer update not a real frame
+ if (cm->show_frame) {
+ cm->current_video_frame++;
+ cpi->frames_since_key++;
+ }
- // reset to normal state now that we are done.
+ // reset to normal state now that we are done.
#if 0
- {
- char filename[512];
- FILE *recon_file;
- sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
- recon_file = fopen(filename, "wb");
- fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
- cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
- fclose(recon_file);
- }
+ {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
+ recon_file = fopen(filename, "wb");
+ fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
+ cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
+ fclose(recon_file);
+ }
#endif
#ifdef OUTPUT_YUV_REC
- vp8_write_yuv_rec_frame(cm);
+ vp8_write_yuv_rec_frame(cm);
#endif
- if(cm->show_frame)
- {
- vpx_memcpy(cm->prev_mip, cm->mip,
- (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
- }
- else
- {
- vpx_memset(cm->prev_mip, 0,
- (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
- }
+ if (cm->show_frame) {
+ vpx_memcpy(cm->prev_mip, cm->mip,
+ (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
+ } else {
+ vpx_memset(cm->prev_mip, 0,
+ (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO));
+ }
}
-static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags)
-{
+static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) {
- if (!cpi->common.refresh_alt_ref_frame)
- vp8_second_pass(cpi);
+ if (!cpi->common.refresh_alt_ref_frame)
+ vp8_second_pass(cpi);
- encode_frame_to_data_rate(cpi, size, dest, frame_flags);
- cpi->twopass.bits_left -= 8 * *size;
+ encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+ cpi->twopass.bits_left -= 8 * *size;
- if (!cpi->common.refresh_alt_ref_frame)
- {
- double lower_bounds_min_rate = FRAME_OVERHEAD_BITS*cpi->oxcf.frame_rate;
- double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
- *cpi->oxcf.two_pass_vbrmin_section / 100);
+ if (!cpi->common.refresh_alt_ref_frame) {
+ double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.frame_rate;
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
+ * cpi->oxcf.two_pass_vbrmin_section / 100);
- if (two_pass_min_rate < lower_bounds_min_rate)
- two_pass_min_rate = lower_bounds_min_rate;
+ if (two_pass_min_rate < lower_bounds_min_rate)
+ two_pass_min_rate = lower_bounds_min_rate;
- cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->oxcf.frame_rate);
- }
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->oxcf.frame_rate);
+ }
}
-//For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us.
+// For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us.
#if HAVE_ARMV7
extern void vp8_push_neon(int64_t *store);
extern void vp8_pop_neon(int64_t *store);
#endif
-int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
-{
+int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) {
#if HAVE_ARMV7
- int64_t store_reg[8];
+ int64_t store_reg[8];
#endif
- VP8_COMP *cpi = (VP8_COMP *) ptr;
- VP8_COMMON *cm = &cpi->common;
- struct vpx_usec_timer timer;
- int res = 0;
+ VP8_COMP *cpi = (VP8_COMP *) ptr;
+ VP8_COMMON *cm = &cpi->common;
+ struct vpx_usec_timer timer;
+ int res = 0;
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_push_neon(store_reg);
- }
+ {
+ vp8_push_neon(store_reg);
+ }
#endif
- vpx_usec_timer_start(&timer);
- if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
- frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
- res = -1;
- cm->clr_type = sd->clrtype;
- vpx_usec_timer_mark(&timer);
- cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+ vpx_usec_timer_start(&timer);
+ if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+ frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
+ res = -1;
+ cm->clr_type = sd->clrtype;
+ vpx_usec_timer_mark(&timer);
+ cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_pop_neon(store_reg);
- }
+ {
+ vp8_pop_neon(store_reg);
+ }
#endif
- return res;
+ return res;
}
-static int frame_is_reference(const VP8_COMP *cpi)
-{
- const VP8_COMMON *cm = &cpi->common;
- const MACROBLOCKD *xd = &cpi->mb.e_mbd;
+static int frame_is_reference(const VP8_COMP *cpi) {
+ const VP8_COMMON *cm = &cpi->common;
+ const MACROBLOCKD *xd = &cpi->mb.e_mbd;
- return cm->frame_type == KEY_FRAME || cm->refresh_last_frame
- || cm->refresh_golden_frame || cm->refresh_alt_ref_frame
- || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf
- || cm->refresh_entropy_probs
- || xd->mode_ref_lf_delta_update
- || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
+ return cm->frame_type == KEY_FRAME || cm->refresh_last_frame
+ || cm->refresh_golden_frame || cm->refresh_alt_ref_frame
+ || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf
+ || cm->refresh_entropy_probs
+ || xd->mode_ref_lf_delta_update
+ || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
}
-int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush)
-{
+int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, int64_t *time_stamp, int64_t *time_end, int flush) {
#if HAVE_ARMV7
- int64_t store_reg[8];
+ int64_t store_reg[8];
#endif
- VP8_COMP *cpi = (VP8_COMP *) ptr;
- VP8_COMMON *cm = &cpi->common;
- struct vpx_usec_timer cmptimer;
- YV12_BUFFER_CONFIG *force_src_buffer = NULL;
+ VP8_COMP *cpi = (VP8_COMP *) ptr;
+ VP8_COMMON *cm = &cpi->common;
+ struct vpx_usec_timer cmptimer;
+ YV12_BUFFER_CONFIG *force_src_buffer = NULL;
- if (!cpi)
- return -1;
+ if (!cpi)
+ return -1;
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_push_neon(store_reg);
- }
+ {
+ vp8_push_neon(store_reg);
+ }
#endif
- vpx_usec_timer_start(&cmptimer);
+ vpx_usec_timer_start(&cmptimer);
- cpi->source = NULL;
+ cpi->source = NULL;
#if CONFIG_HIGH_PRECISION_MV
- cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
+ cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
#endif
- // Should we code an alternate reference frame
- if (cpi->oxcf.play_alternate &&
- cpi->source_alt_ref_pending)
- {
- if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
- cpi->frames_till_gf_update_due)))
- {
- cpi->alt_ref_source = cpi->source;
- if (cpi->oxcf.arnr_max_frames > 0)
- {
- vp8_temporal_filter_prepare_c(cpi,
- cpi->frames_till_gf_update_due);
- force_src_buffer = &cpi->alt_ref_buffer;
- }
- cm->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
- cm->refresh_alt_ref_frame = 1;
- cm->refresh_golden_frame = 0;
- cm->refresh_last_frame = 0;
- cm->show_frame = 0;
- cpi->source_alt_ref_pending = FALSE; // Clear Pending altf Ref flag.
- cpi->is_src_frame_alt_ref = 0;
- }
- }
-
- if (!cpi->source)
- {
- if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush)))
- {
- cm->show_frame = 1;
-
- cpi->is_src_frame_alt_ref = cpi->alt_ref_source
- && (cpi->source == cpi->alt_ref_source);
-
- if(cpi->is_src_frame_alt_ref)
- cpi->alt_ref_source = NULL;
- }
- }
-
- if (cpi->source)
- {
- cpi->un_scaled_source =
- cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
- *time_stamp = cpi->source->ts_start;
- *time_end = cpi->source->ts_end;
- *frame_flags = cpi->source->flags;
+ // Should we code an alternate reference frame
+ if (cpi->oxcf.play_alternate &&
+ cpi->source_alt_ref_pending) {
+ if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
+ cpi->frames_till_gf_update_due))) {
+ cpi->alt_ref_source = cpi->source;
+ if (cpi->oxcf.arnr_max_frames > 0) {
+ vp8_temporal_filter_prepare_c(cpi,
+ cpi->frames_till_gf_update_due);
+ force_src_buffer = &cpi->alt_ref_buffer;
+ }
+ cm->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
+ cm->refresh_alt_ref_frame = 1;
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 0;
+ cm->show_frame = 0;
+ cpi->source_alt_ref_pending = FALSE; // Clear Pending altf Ref flag.
+ cpi->is_src_frame_alt_ref = 0;
+ }
+ }
+
+ if (!cpi->source) {
+ if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
+ cm->show_frame = 1;
+
+ cpi->is_src_frame_alt_ref = cpi->alt_ref_source
+ && (cpi->source == cpi->alt_ref_source);
+
+ if (cpi->is_src_frame_alt_ref)
+ cpi->alt_ref_source = NULL;
+ }
+ }
+
+ if (cpi->source) {
+ cpi->un_scaled_source =
+ cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
+ *time_stamp = cpi->source->ts_start;
+ *time_end = cpi->source->ts_end;
+ *frame_flags = cpi->source->flags;
+ } else {
+ *size = 0;
+ if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
+ vp8_end_first_pass(cpi); /* get last stats packet */
+ cpi->twopass.first_pass_done = 1;
}
- else
- {
- *size = 0;
- if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done)
- {
- vp8_end_first_pass(cpi); /* get last stats packet */
- cpi->twopass.first_pass_done = 1;
- }
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
-#endif
- {
- vp8_pop_neon(store_reg);
- }
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- return -1;
- }
-
- if (cpi->source->ts_start < cpi->first_time_stamp_ever)
{
- cpi->first_time_stamp_ever = cpi->source->ts_start;
- cpi->last_end_time_stamp_seen = cpi->source->ts_start;
+ vp8_pop_neon(store_reg);
}
+#endif
+ return -1;
+ }
- // adjust frame rates based on timestamps given
- if (!cm->refresh_alt_ref_frame)
- {
- int64_t this_duration;
- int step = 0;
+ if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
+ cpi->first_time_stamp_ever = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_start;
+ }
- if (cpi->source->ts_start == cpi->first_time_stamp_ever)
- {
- this_duration = cpi->source->ts_end - cpi->source->ts_start;
- step = 1;
- }
- else
- {
- int64_t last_duration;
-
- this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
- last_duration = cpi->last_end_time_stamp_seen
- - cpi->last_time_stamp_seen;
- // do a step update if the duration changes by 10%
- if (last_duration)
- step = ((this_duration - last_duration) * 10 / last_duration);
- }
+ // adjust frame rates based on timestamps given
+ if (!cm->refresh_alt_ref_frame) {
+ int64_t this_duration;
+ int step = 0;
- if (this_duration)
- {
- if (step)
- vp8_new_frame_rate(cpi, 10000000.0 / this_duration);
- else
- {
- double avg_duration, interval;
+ if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
+ this_duration = cpi->source->ts_end - cpi->source->ts_start;
+ step = 1;
+ } else {
+ int64_t last_duration;
- /* Average this frame's rate into the last second's average
- * frame rate. If we haven't seen 1 second yet, then average
- * over the whole interval seen.
- */
- interval = cpi->source->ts_end - cpi->first_time_stamp_ever;
- if(interval > 10000000.0)
- interval = 10000000;
+ this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
+ last_duration = cpi->last_end_time_stamp_seen
+ - cpi->last_time_stamp_seen;
+ // do a step update if the duration changes by 10%
+ if (last_duration)
+ step = ((this_duration - last_duration) * 10 / last_duration);
+ }
- avg_duration = 10000000.0 / cpi->oxcf.frame_rate;
- avg_duration *= (interval - avg_duration + this_duration);
- avg_duration /= interval;
+ if (this_duration) {
+ if (step)
+ vp8_new_frame_rate(cpi, 10000000.0 / this_duration);
+ else {
+ double avg_duration, interval;
- vp8_new_frame_rate(cpi, 10000000.0 / avg_duration);
- }
- }
+ /* Average this frame's rate into the last second's average
+ * frame rate. If we haven't seen 1 second yet, then average
+ * over the whole interval seen.
+ */
+ interval = cpi->source->ts_end - cpi->first_time_stamp_ever;
+ if (interval > 10000000.0)
+ interval = 10000000;
- cpi->last_time_stamp_seen = cpi->source->ts_start;
- cpi->last_end_time_stamp_seen = cpi->source->ts_end;
+ avg_duration = 10000000.0 / cpi->oxcf.frame_rate;
+ avg_duration *= (interval - avg_duration + this_duration);
+ avg_duration /= interval;
+
+ vp8_new_frame_rate(cpi, 10000000.0 / avg_duration);
+ }
}
- // start with a 0 size frame
- *size = 0;
+ cpi->last_time_stamp_seen = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_end;
+ }
- // Clear down mmx registers
- vp8_clear_system_state(); //__asm emms;
+ // start with a 0 size frame
+ *size = 0;
- cm->frame_type = INTER_FRAME;
- cm->frame_flags = *frame_flags;
+ // Clear down mmx registers
+ vp8_clear_system_state(); // __asm emms;
+
+ cm->frame_type = INTER_FRAME;
+ cm->frame_flags = *frame_flags;
#if 0
- if (cm->refresh_alt_ref_frame)
- {
- //cm->refresh_golden_frame = 1;
- cm->refresh_golden_frame = 0;
- cm->refresh_last_frame = 0;
- }
- else
- {
- cm->refresh_golden_frame = 0;
- cm->refresh_last_frame = 1;
- }
+ if (cm->refresh_alt_ref_frame) {
+ // cm->refresh_golden_frame = 1;
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 0;
+ } else {
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ }
#endif
- /* find a free buffer for the new frame */
- {
- int i = 0;
- for(; i < NUM_YV12_BUFFERS; i++)
- {
- if(!cm->yv12_fb[i].flags)
- {
- cm->new_fb_idx = i;
- break;
- }
- }
-
- assert(i < NUM_YV12_BUFFERS );
- }
- if (cpi->pass == 1)
- {
- Pass1Encode(cpi, size, dest, frame_flags);
- }
- else if (cpi->pass == 2)
- {
- Pass2Encode(cpi, size, dest, frame_flags);
+ /* find a free buffer for the new frame */
+ {
+ int i = 0;
+ for (; i < NUM_YV12_BUFFERS; i++) {
+ if (!cm->yv12_fb[i].flags) {
+ cm->new_fb_idx = i;
+ break;
+ }
}
- else
- encode_frame_to_data_rate(cpi, size, dest, frame_flags);
- if(cm->refresh_entropy_probs)
- {
- if(cm->refresh_alt_ref_frame)
- vpx_memcpy(&cm->lfc_a, &cm->fc, sizeof(cm->fc));
- else
- vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
- }
+ assert(i < NUM_YV12_BUFFERS);
+ }
+ if (cpi->pass == 1) {
+ Pass1Encode(cpi, size, dest, frame_flags);
+ } else if (cpi->pass == 2) {
+ Pass2Encode(cpi, size, dest, frame_flags);
+ } else
+ encode_frame_to_data_rate(cpi, size, dest, frame_flags);
- // if its a dropped frame honor the requests on subsequent frames
- if (*size > 0)
- {
- cpi->droppable = !frame_is_reference(cpi);
+ if (cm->refresh_entropy_probs) {
+ if (cm->refresh_alt_ref_frame)
+ vpx_memcpy(&cm->lfc_a, &cm->fc, sizeof(cm->fc));
+ else
+ vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
+ }
- // return to normal state
- cm->refresh_entropy_probs = 1;
- cm->refresh_alt_ref_frame = 0;
- cm->refresh_golden_frame = 0;
- cm->refresh_last_frame = 1;
- cm->frame_type = INTER_FRAME;
+ // if its a dropped frame honor the requests on subsequent frames
+ if (*size > 0) {
+ cpi->droppable = !frame_is_reference(cpi);
- }
+ // return to normal state
+ cm->refresh_entropy_probs = 1;
+ cm->refresh_alt_ref_frame = 0;
+ cm->refresh_golden_frame = 0;
+ cm->refresh_last_frame = 1;
+ cm->frame_type = INTER_FRAME;
- vpx_usec_timer_mark(&cmptimer);
- cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+ }
- if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
- {
- generate_psnr_packet(cpi);
- }
+ vpx_usec_timer_mark(&cmptimer);
+ cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+
+ if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
+ generate_psnr_packet(cpi);
+ }
#if CONFIG_INTERNAL_STATS
- if (cpi->pass != 1)
- {
- cpi->bytes += *size;
+ if (cpi->pass != 1) {
+ cpi->bytes += *size;
- if (cm->show_frame)
- {
+ if (cm->show_frame) {
- cpi->count ++;
+ cpi->count++;
- if (cpi->b_calculate_psnr)
- {
- double ye,ue,ve;
- double frame_psnr;
- YV12_BUFFER_CONFIG *orig = cpi->Source;
- YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
- YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
- int y_samples = orig->y_height * orig->y_width ;
- int uv_samples = orig->uv_height * orig->uv_width ;
- int t_samples = y_samples + 2 * uv_samples;
- int64_t sq_error;
-
- ye = calc_plane_error(orig->y_buffer, orig->y_stride,
- recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
- IF_RTCD(&cpi->rtcd.variance));
-
- ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
- recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
-
- ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
- recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
-
- sq_error = ye + ue + ve;
-
- frame_psnr = vp8_mse2psnr(t_samples, 255.0, sq_error);
-
- cpi->total_y += vp8_mse2psnr(y_samples, 255.0, ye);
- cpi->total_u += vp8_mse2psnr(uv_samples, 255.0, ue);
- cpi->total_v += vp8_mse2psnr(uv_samples, 255.0, ve);
- cpi->total_sq_error += sq_error;
- cpi->total += frame_psnr;
- {
- double frame_psnr2, frame_ssim2 = 0;
- double weight = 0;
+ if (cpi->b_calculate_psnr) {
+ double ye, ue, ve;
+ double frame_psnr;
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
+ int y_samples = orig->y_height * orig->y_width;
+ int uv_samples = orig->uv_height * orig->uv_width;
+ int t_samples = y_samples + 2 * uv_samples;
+ int64_t sq_error;
+
+ ye = calc_plane_error(orig->y_buffer, orig->y_stride,
+ recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
+ IF_RTCD(&cpi->rtcd.variance));
- vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
- vp8_clear_system_state();
+ ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
+ recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
+ IF_RTCD(&cpi->rtcd.variance));
- ye = calc_plane_error(orig->y_buffer, orig->y_stride,
- pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
- IF_RTCD(&cpi->rtcd.variance));
+ ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
+ recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
+ IF_RTCD(&cpi->rtcd.variance));
- ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
- pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ sq_error = ye + ue + ve;
- ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
- pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ frame_psnr = vp8_mse2psnr(t_samples, 255.0, sq_error);
- sq_error = ye + ue + ve;
+ cpi->total_y += vp8_mse2psnr(y_samples, 255.0, ye);
+ cpi->total_u += vp8_mse2psnr(uv_samples, 255.0, ue);
+ cpi->total_v += vp8_mse2psnr(uv_samples, 255.0, ve);
+ cpi->total_sq_error += sq_error;
+ cpi->total += frame_psnr;
+ {
+ double frame_psnr2, frame_ssim2 = 0;
+ double weight = 0;
- frame_psnr2 = vp8_mse2psnr(t_samples, 255.0, sq_error);
+ vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
+ vp8_clear_system_state();
- cpi->totalp_y += vp8_mse2psnr(y_samples, 255.0, ye);
- cpi->totalp_u += vp8_mse2psnr(uv_samples, 255.0, ue);
- cpi->totalp_v += vp8_mse2psnr(uv_samples, 255.0, ve);
- cpi->total_sq_error2 += sq_error;
- cpi->totalp += frame_psnr2;
+ ye = calc_plane_error(orig->y_buffer, orig->y_stride,
+ pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
+ IF_RTCD(&cpi->rtcd.variance));
- frame_ssim2 = vp8_calc_ssim(cpi->Source,
- &cm->post_proc_buffer, 1, &weight,
- IF_RTCD(&cpi->rtcd.variance));
+ ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
+ pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
+ IF_RTCD(&cpi->rtcd.variance));
- cpi->summed_quality += frame_ssim2 * weight;
- cpi->summed_weights += weight;
+ ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
+ pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
+ IF_RTCD(&cpi->rtcd.variance));
+
+ sq_error = ye + ue + ve;
+
+ frame_psnr2 = vp8_mse2psnr(t_samples, 255.0, sq_error);
+
+ cpi->totalp_y += vp8_mse2psnr(y_samples, 255.0, ye);
+ cpi->totalp_u += vp8_mse2psnr(uv_samples, 255.0, ue);
+ cpi->totalp_v += vp8_mse2psnr(uv_samples, 255.0, ve);
+ cpi->total_sq_error2 += sq_error;
+ cpi->totalp += frame_psnr2;
+
+ frame_ssim2 = vp8_calc_ssim(cpi->Source,
+ &cm->post_proc_buffer, 1, &weight,
+ IF_RTCD(&cpi->rtcd.variance));
+
+ cpi->summed_quality += frame_ssim2 * weight;
+ cpi->summed_weights += weight;
#if 0
- {
- FILE *f = fopen("q_used.stt", "a");
- fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n",
- cpi->common.current_video_frame,y2, u2, v2,
- frame_psnr2, frame_ssim2);
- fclose(f);
- }
+ {
+ FILE *f = fopen("q_used.stt", "a");
+ fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n",
+ cpi->common.current_video_frame, y2, u2, v2,
+ frame_psnr2, frame_ssim2);
+ fclose(f);
+ }
#endif
- }
- }
+ }
+ }
- if (cpi->b_calculate_ssimg)
- {
- double y, u, v, frame_all;
- frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
- &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
- cpi->total_ssimg_y += y;
- cpi->total_ssimg_u += u;
- cpi->total_ssimg_v += v;
- cpi->total_ssimg_all += frame_all;
- }
+ if (cpi->b_calculate_ssimg) {
+ double y, u, v, frame_all;
+ frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
+ &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
+ cpi->total_ssimg_y += y;
+ cpi->total_ssimg_u += u;
+ cpi->total_ssimg_v += v;
+ cpi->total_ssimg_all += frame_all;
+ }
- }
}
+ }
#endif
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_pop_neon(store_reg);
- }
+ {
+ vp8_pop_neon(store_reg);
+ }
#endif
- return 0;
+ return 0;
}
-int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags)
-{
- VP8_COMP *cpi = (VP8_COMP *) comp;
+int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags) {
+ VP8_COMP *cpi = (VP8_COMP *) comp;
- if (cpi->common.refresh_alt_ref_frame)
- return -1;
- else
- {
- int ret;
+ if (cpi->common.refresh_alt_ref_frame)
+ return -1;
+ else {
+ int ret;
#if CONFIG_POSTPROC
- ret = vp8_post_proc_frame(&cpi->common, dest, flags);
+ ret = vp8_post_proc_frame(&cpi->common, dest, flags);
#else
- if (cpi->common.frame_to_show)
- {
- *dest = *cpi->common.frame_to_show;
- dest->y_width = cpi->common.Width;
- dest->y_height = cpi->common.Height;
- dest->uv_height = cpi->common.Height / 2;
- ret = 0;
- }
- else
- {
- ret = -1;
- }
-
-#endif //!CONFIG_POSTPROC
- vp8_clear_system_state();
- return ret;
- }
-}
-
-int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4])
-{
- VP8_COMP *cpi = (VP8_COMP *) comp;
- signed char feature_data[SEG_LVL_MAX][MAX_MB_SEGMENTS];
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int i;
-
- if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
- return -1;
-
- if (!map)
- {
- vp8_disable_segmentation((VP8_PTR)cpi);
- return 0;
+ if (cpi->common.frame_to_show) {
+ *dest = *cpi->common.frame_to_show;
+ dest->y_width = cpi->common.Width;
+ dest->y_height = cpi->common.Height;
+ dest->uv_height = cpi->common.Height / 2;
+ ret = 0;
+ } else {
+ ret = -1;
}
- // Set the segmentation Map
- vp8_set_segmentation_map((VP8_PTR)cpi, map);
-
- // Activate segmentation.
- vp8_enable_segmentation((VP8_PTR)cpi);
+#endif // !CONFIG_POSTPROC
+ vp8_clear_system_state();
+ return ret;
+ }
+}
- // Set up the quant segment data
- feature_data[SEG_LVL_ALT_Q][0] = delta_q[0];
- feature_data[SEG_LVL_ALT_Q][1] = delta_q[1];
- feature_data[SEG_LVL_ALT_Q][2] = delta_q[2];
- feature_data[SEG_LVL_ALT_Q][3] = delta_q[3];
+int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]) {
+ VP8_COMP *cpi = (VP8_COMP *) comp;
+ signed char feature_data[SEG_LVL_MAX][MAX_MB_SEGMENTS];
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ int i;
- // Set up the loop segment data s
- feature_data[SEG_LVL_ALT_LF][0] = delta_lf[0];
- feature_data[SEG_LVL_ALT_LF][1] = delta_lf[1];
- feature_data[SEG_LVL_ALT_LF][2] = delta_lf[2];
- feature_data[SEG_LVL_ALT_LF][3] = delta_lf[3];
+ if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
+ return -1;
- cpi->segment_encode_breakout[0] = threshold[0];
- cpi->segment_encode_breakout[1] = threshold[1];
- cpi->segment_encode_breakout[2] = threshold[2];
- cpi->segment_encode_breakout[3] = threshold[3];
+ if (!map) {
+ vp8_disable_segmentation((VP8_PTR)cpi);
+ return 0;
+ }
+
+ // Set the segmentation Map
+ vp8_set_segmentation_map((VP8_PTR)cpi, map);
+
+ // Activate segmentation.
+ vp8_enable_segmentation((VP8_PTR)cpi);
+
+ // Set up the quant segment data
+ feature_data[SEG_LVL_ALT_Q][0] = delta_q[0];
+ feature_data[SEG_LVL_ALT_Q][1] = delta_q[1];
+ feature_data[SEG_LVL_ALT_Q][2] = delta_q[2];
+ feature_data[SEG_LVL_ALT_Q][3] = delta_q[3];
+
+ // Set up the loop segment data s
+ feature_data[SEG_LVL_ALT_LF][0] = delta_lf[0];
+ feature_data[SEG_LVL_ALT_LF][1] = delta_lf[1];
+ feature_data[SEG_LVL_ALT_LF][2] = delta_lf[2];
+ feature_data[SEG_LVL_ALT_LF][3] = delta_lf[3];
+
+ cpi->segment_encode_breakout[0] = threshold[0];
+ cpi->segment_encode_breakout[1] = threshold[1];
+ cpi->segment_encode_breakout[2] = threshold[2];
+ cpi->segment_encode_breakout[3] = threshold[3];
+
+ // Enable the loop and quant changes in the feature mask
+ for (i = 0; i < 4; i++) {
+ if (delta_q[i])
+ enable_segfeature(xd, i, SEG_LVL_ALT_Q);
+ else
+ disable_segfeature(xd, i, SEG_LVL_ALT_Q);
- // Enable the loop and quant changes in the feature mask
- for ( i = 0; i < 4; i++ )
- {
- if (delta_q[i])
- enable_segfeature(xd, i, SEG_LVL_ALT_Q);
- else
- disable_segfeature(xd, i, SEG_LVL_ALT_Q);
-
- if (delta_lf[i])
- enable_segfeature(xd, i, SEG_LVL_ALT_LF);
- else
- disable_segfeature(xd, i, SEG_LVL_ALT_LF);
- }
+ if (delta_lf[i])
+ enable_segfeature(xd, i, SEG_LVL_ALT_LF);
+ else
+ disable_segfeature(xd, i, SEG_LVL_ALT_LF);
+ }
- // Initialise the feature data structure
- // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
- vp8_set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
+ // Initialise the feature data structure
+ // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
+ vp8_set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
- return 0;
+ return 0;
}
-int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols)
-{
- VP8_COMP *cpi = (VP8_COMP *) comp;
+int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols) {
+ VP8_COMP *cpi = (VP8_COMP *) comp;
- if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols)
- {
- if (map)
- {
- vpx_memcpy(cpi->active_map, map, rows * cols);
- cpi->active_map_enabled = 1;
- }
- else
- cpi->active_map_enabled = 0;
+ if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
+ if (map) {
+ vpx_memcpy(cpi->active_map, map, rows * cols);
+ cpi->active_map_enabled = 1;
+ } else
+ cpi->active_map_enabled = 0;
- return 0;
- }
- else
- {
- //cpi->active_map_enabled = 0;
- return -1 ;
- }
+ return 0;
+ } else {
+ // cpi->active_map_enabled = 0;
+ return -1;
+ }
}
-int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode)
-{
- VP8_COMP *cpi = (VP8_COMP *) comp;
+int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
+ VP8_COMP *cpi = (VP8_COMP *) comp;
- if (horiz_mode <= ONETWO)
- cpi->common.horiz_scale = horiz_mode;
- else
- return -1;
+ if (horiz_mode <= ONETWO)
+ cpi->common.horiz_scale = horiz_mode;
+ else
+ return -1;
- if (vert_mode <= ONETWO)
- cpi->common.vert_scale = vert_mode;
- else
- return -1;
+ if (vert_mode <= ONETWO)
+ cpi->common.vert_scale = vert_mode;
+ else
+ return -1;
- return 0;
+ return 0;
}
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd)
-{
- int i, j;
- int Total = 0;
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) {
+ int i, j;
+ int Total = 0;
- unsigned char *src = source->y_buffer;
- unsigned char *dst = dest->y_buffer;
- (void)rtcd;
+ unsigned char *src = source->y_buffer;
+ unsigned char *dst = dest->y_buffer;
+ (void)rtcd;
- // Loop through the Y plane raw and reconstruction data summing (square differences)
- for (i = 0; i < source->y_height; i += 16)
- {
- for (j = 0; j < source->y_width; j += 16)
- {
- unsigned int sse;
- Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
- }
-
- src += 16 * source->y_stride;
- dst += 16 * dest->y_stride;
+ // Loop through the Y plane raw and reconstruction data summing (square differences)
+ for (i = 0; i < source->y_height; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
}
- return Total;
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return Total;
}
-int vp8_get_quantizer(VP8_PTR c)
-{
- VP8_COMP *cpi = (VP8_COMP *) c;
- return cpi->common.base_qindex;
+int vp8_get_quantizer(VP8_PTR c) {
+ VP8_COMP *cpi = (VP8_COMP *) c;
+ return cpi->common.base_qindex;
}
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index c838eccad..1fa8b0588 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -31,7 +31,7 @@
#include "vp8/common/findnearmv.h"
#include "lookahead.h"
-//#define SPEEDSTATS 1
+// #define SPEEDSTATS 1
#define MIN_GF_INTERVAL 4
#define DEFAULT_GF_INTERVAL 7
@@ -67,705 +67,688 @@
#define VP8_TEMPORAL_ALT_REF 1
-typedef struct
-{
- MV_CONTEXT mvc[2];
- int mvcosts[2][MVvals+1];
+typedef struct {
+ MV_CONTEXT mvc[2];
+ int mvcosts[2][MVvals + 1];
#if CONFIG_HIGH_PRECISION_MV
- MV_CONTEXT_HP mvc_hp[2];
- int mvcosts_hp[2][MVvals_hp+1];
+ MV_CONTEXT_HP mvc_hp[2];
+ int mvcosts_hp[2][MVvals_hp + 1];
#endif
#ifdef MODE_STATS
- // Stats
- int y_modes[VP8_YMODES];
- int uv_modes[VP8_UV_MODES];
- int i8x8_modes[VP8_I8X8_MODES];
- int b_modes[B_MODE_COUNT];
- int inter_y_modes[MB_MODE_COUNT];
- int inter_uv_modes[VP8_UV_MODES];
- int inter_b_modes[B_MODE_COUNT];
+ // Stats
+ int y_modes[VP8_YMODES];
+ int uv_modes[VP8_UV_MODES];
+ int i8x8_modes[VP8_I8X8_MODES];
+ int b_modes[B_MODE_COUNT];
+ int inter_y_modes[MB_MODE_COUNT];
+ int inter_uv_modes[VP8_UV_MODES];
+ int inter_b_modes[B_MODE_COUNT];
#endif
- vp8_prob segment_pred_probs[PREDICTION_PROBS];
- unsigned char ref_pred_probs_update[PREDICTION_PROBS];
- vp8_prob ref_pred_probs[PREDICTION_PROBS];
- vp8_prob prob_comppred[COMP_PRED_CONTEXTS];
+ vp8_prob segment_pred_probs[PREDICTION_PROBS];
+ unsigned char ref_pred_probs_update[PREDICTION_PROBS];
+ vp8_prob ref_pred_probs[PREDICTION_PROBS];
+ vp8_prob prob_comppred[COMP_PRED_CONTEXTS];
- unsigned char * last_frame_seg_map_copy;
+ unsigned char *last_frame_seg_map_copy;
- // 0 = Intra, Last, GF, ARF
- signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
- // 0 = BPRED, ZERO_MV, MV, SPLIT
- signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
+ // 0 = Intra, Last, GF, ARF
+ signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
+ // 0 = BPRED, ZERO_MV, MV, SPLIT
+ signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
- vp8_prob coef_probs[BLOCK_TYPES]
- [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
- vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8]
- [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+ vp8_prob coef_probs[BLOCK_TYPES]
+ [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+ vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8]
+ [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
- vp8_prob ymode_prob [VP8_YMODES-1]; /* interframe intra mode probs */
- vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES-1];
- vp8_prob bmode_prob [VP8_BINTRAMODES-1];
- vp8_prob i8x8_mode_prob [VP8_I8X8_MODES-1];
- vp8_prob sub_mv_ref_prob [SUBMVREF_COUNT][VP8_SUBMVREFS-1];
- vp8_prob mbsplit_prob [VP8_NUMMBSPLITS-1];
+ vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
+ vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
+ vp8_prob bmode_prob [VP8_BINTRAMODES - 1];
+ vp8_prob i8x8_mode_prob [VP8_I8X8_MODES - 1];
+ vp8_prob sub_mv_ref_prob [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
+ vp8_prob mbsplit_prob [VP8_NUMMBSPLITS - 1];
- int mv_ref_ct[6][4][2];
- int mode_context[6][4];
- int mv_ref_ct_a[6][4][2];
- int mode_context_a[6][4];
+ int mv_ref_ct[6][4][2];
+ int mode_context[6][4];
+ int mv_ref_ct_a[6][4][2];
+ int mode_context_a[6][4];
} CODING_CONTEXT;
-typedef struct
-{
- double frame;
- double intra_error;
- double coded_error;
- double sr_coded_error;
- double ssim_weighted_pred_err;
- double pcnt_inter;
- double pcnt_motion;
- double pcnt_second_ref;
- double pcnt_neutral;
- double MVr;
- double mvr_abs;
- double MVc;
- double mvc_abs;
- double MVrv;
- double MVcv;
- double mv_in_out_count;
- double new_mv_count;
- double duration;
- double count;
+typedef struct {
+ double frame;
+ double intra_error;
+ double coded_error;
+ double sr_coded_error;
+ double ssim_weighted_pred_err;
+ double pcnt_inter;
+ double pcnt_motion;
+ double pcnt_second_ref;
+ double pcnt_neutral;
+ double MVr;
+ double mvr_abs;
+ double MVc;
+ double mvc_abs;
+ double MVrv;
+ double MVcv;
+ double mv_in_out_count;
+ double new_mv_count;
+ double duration;
+ double count;
}
FIRSTPASS_STATS;
-typedef struct
-{
- int frames_so_far;
- double frame_intra_error;
- double frame_coded_error;
- double frame_pcnt_inter;
- double frame_pcnt_motion;
- double frame_mvr;
- double frame_mvr_abs;
- double frame_mvc;
- double frame_mvc_abs;
+typedef struct {
+ int frames_so_far;
+ double frame_intra_error;
+ double frame_coded_error;
+ double frame_pcnt_inter;
+ double frame_pcnt_motion;
+ double frame_mvr;
+ double frame_mvr_abs;
+ double frame_mvc;
+ double frame_mvc_abs;
} ONEPASS_FRAMESTATS;
-typedef struct
-{
- struct {
- int err;
- union {
- int_mv mv;
- MB_PREDICTION_MODE mode;
- } m;
- } ref[MAX_REF_FRAMES];
+typedef struct {
+ struct {
+ int err;
+ union {
+ int_mv mv;
+ MB_PREDICTION_MODE mode;
+ } m;
+ } ref[MAX_REF_FRAMES];
} MBGRAPH_MB_STATS;
-typedef struct
-{
- MBGRAPH_MB_STATS *mb_stats;
+typedef struct {
+ MBGRAPH_MB_STATS *mb_stats;
} MBGRAPH_FRAME_STATS;
#if CONFIG_PRED_FILTER
-typedef enum
-{
- THR_ZEROMV,
- THR_ZEROMV_FILT,
- THR_DC,
-
- THR_NEARESTMV,
- THR_NEARESTMV_FILT,
- THR_NEARMV,
- THR_NEARMV_FILT,
-
- THR_ZEROG,
- THR_ZEROG_FILT,
- THR_NEARESTG,
- THR_NEARESTG_FILT,
-
- THR_ZEROA,
- THR_ZEROA_FILT,
- THR_NEARESTA,
- THR_NEARESTA_FILT,
-
- THR_NEARG,
- THR_NEARG_FILT,
- THR_NEARA,
- THR_NEARA_FILT,
-
- THR_V_PRED,
- THR_H_PRED,
+typedef enum {
+ THR_ZEROMV,
+ THR_ZEROMV_FILT,
+ THR_DC,
+
+ THR_NEARESTMV,
+ THR_NEARESTMV_FILT,
+ THR_NEARMV,
+ THR_NEARMV_FILT,
+
+ THR_ZEROG,
+ THR_ZEROG_FILT,
+ THR_NEARESTG,
+ THR_NEARESTG_FILT,
+
+ THR_ZEROA,
+ THR_ZEROA_FILT,
+ THR_NEARESTA,
+ THR_NEARESTA_FILT,
+
+ THR_NEARG,
+ THR_NEARG_FILT,
+ THR_NEARA,
+ THR_NEARA_FILT,
+
+ THR_V_PRED,
+ THR_H_PRED,
#if CONFIG_NEWINTRAMODES
- THR_D45_PRED,
- THR_D135_PRED,
- THR_D117_PRED,
- THR_D153_PRED,
- THR_D27_PRED,
- THR_D63_PRED,
+ THR_D45_PRED,
+ THR_D135_PRED,
+ THR_D117_PRED,
+ THR_D153_PRED,
+ THR_D27_PRED,
+ THR_D63_PRED,
#endif
- THR_TM,
+ THR_TM,
- THR_NEWMV,
- THR_NEWMV_FILT,
- THR_NEWG,
- THR_NEWG_FILT,
- THR_NEWA,
- THR_NEWA_FILT,
+ THR_NEWMV,
+ THR_NEWMV_FILT,
+ THR_NEWG,
+ THR_NEWG_FILT,
+ THR_NEWA,
+ THR_NEWA_FILT,
- THR_SPLITMV,
- THR_SPLITG,
- THR_SPLITA,
+ THR_SPLITMV,
+ THR_SPLITG,
+ THR_SPLITA,
- THR_B_PRED,
- THR_I8X8_PRED,
+ THR_B_PRED,
+ THR_I8X8_PRED,
- THR_COMP_ZEROLG,
- THR_COMP_NEARESTLG,
- THR_COMP_NEARLG,
+ THR_COMP_ZEROLG,
+ THR_COMP_NEARESTLG,
+ THR_COMP_NEARLG,
- THR_COMP_ZEROLA,
- THR_COMP_NEARESTLA,
- THR_COMP_NEARLA,
+ THR_COMP_ZEROLA,
+ THR_COMP_NEARESTLA,
+ THR_COMP_NEARLA,
- THR_COMP_ZEROGA,
- THR_COMP_NEARESTGA,
- THR_COMP_NEARGA,
+ THR_COMP_ZEROGA,
+ THR_COMP_NEARESTGA,
+ THR_COMP_NEARGA,
- THR_COMP_NEWLG,
- THR_COMP_NEWLA,
- THR_COMP_NEWGA,
+ THR_COMP_NEWLG,
+ THR_COMP_NEWLA,
+ THR_COMP_NEWGA,
- THR_COMP_SPLITLG,
- THR_COMP_SPLITLA,
- THR_COMP_SPLITGA,
+ THR_COMP_SPLITLG,
+ THR_COMP_SPLITLA,
+ THR_COMP_SPLITGA,
}
THR_MODES;
#else
-typedef enum
-{
- THR_ZEROMV,
- THR_DC,
+typedef enum {
+ THR_ZEROMV,
+ THR_DC,
- THR_NEARESTMV,
- THR_NEARMV,
+ THR_NEARESTMV,
+ THR_NEARMV,
- THR_ZEROG,
- THR_NEARESTG,
+ THR_ZEROG,
+ THR_NEARESTG,
- THR_ZEROA,
- THR_NEARESTA,
+ THR_ZEROA,
+ THR_NEARESTA,
- THR_NEARG,
- THR_NEARA,
+ THR_NEARG,
+ THR_NEARA,
- THR_V_PRED,
- THR_H_PRED,
+ THR_V_PRED,
+ THR_H_PRED,
#if CONFIG_NEWINTRAMODES
- THR_D45_PRED,
- THR_D135_PRED,
- THR_D117_PRED,
- THR_D153_PRED,
- THR_D27_PRED,
- THR_D63_PRED,
+ THR_D45_PRED,
+ THR_D135_PRED,
+ THR_D117_PRED,
+ THR_D153_PRED,
+ THR_D27_PRED,
+ THR_D63_PRED,
#endif
- THR_TM,
+ THR_TM,
- THR_NEWMV,
- THR_NEWG,
- THR_NEWA,
+ THR_NEWMV,
+ THR_NEWG,
+ THR_NEWA,
- THR_SPLITMV,
- THR_SPLITG,
- THR_SPLITA,
+ THR_SPLITMV,
+ THR_SPLITG,
+ THR_SPLITA,
- THR_B_PRED,
- THR_I8X8_PRED,
+ THR_B_PRED,
+ THR_I8X8_PRED,
- THR_COMP_ZEROLG,
- THR_COMP_NEARESTLG,
- THR_COMP_NEARLG,
+ THR_COMP_ZEROLG,
+ THR_COMP_NEARESTLG,
+ THR_COMP_NEARLG,
- THR_COMP_ZEROLA,
- THR_COMP_NEARESTLA,
- THR_COMP_NEARLA,
+ THR_COMP_ZEROLA,
+ THR_COMP_NEARESTLA,
+ THR_COMP_NEARLA,
- THR_COMP_ZEROGA,
- THR_COMP_NEARESTGA,
- THR_COMP_NEARGA,
+ THR_COMP_ZEROGA,
+ THR_COMP_NEARESTGA,
+ THR_COMP_NEARGA,
- THR_COMP_NEWLG,
- THR_COMP_NEWLA,
- THR_COMP_NEWGA,
+ THR_COMP_NEWLG,
+ THR_COMP_NEWLA,
+ THR_COMP_NEWGA,
- THR_COMP_SPLITLG,
- THR_COMP_SPLITLA,
- THR_COMP_SPLITGA
+ THR_COMP_SPLITLG,
+ THR_COMP_SPLITLA,
+ THR_COMP_SPLITGA
}
THR_MODES;
#endif
-typedef enum
-{
- DIAMOND = 0,
- NSTEP = 1,
- HEX = 2
+typedef enum {
+ DIAMOND = 0,
+ NSTEP = 1,
+ HEX = 2
} SEARCH_METHODS;
-typedef struct
-{
- int RD;
- SEARCH_METHODS search_method;
- int improved_dct;
- int auto_filter;
- int recode_loop;
- int iterative_sub_pixel;
- int half_pixel_search;
- int quarter_pixel_search;
- int thresh_mult[MAX_MODES];
- int max_step_search_steps;
- int first_step;
- int optimize_coefficients;
- int no_skip_block4x4_search;
- int improved_mv_pred;
+typedef struct {
+ int RD;
+ SEARCH_METHODS search_method;
+ int improved_dct;
+ int auto_filter;
+ int recode_loop;
+ int iterative_sub_pixel;
+ int half_pixel_search;
+ int quarter_pixel_search;
+ int thresh_mult[MAX_MODES];
+ int max_step_search_steps;
+ int first_step;
+ int optimize_coefficients;
+ int no_skip_block4x4_search;
+ int improved_mv_pred;
#if CONFIG_ENHANCED_INTERP
- int search_best_filter;
+ int search_best_filter;
#endif
} SPEED_FEATURES;
-typedef struct
-{
- MACROBLOCK mb;
- int totalrate;
+typedef struct {
+ MACROBLOCK mb;
+ int totalrate;
} MB_ROW_COMP;
-typedef struct
-{
- TOKENEXTRA *start;
- TOKENEXTRA *stop;
+typedef struct {
+ TOKENEXTRA *start;
+ TOKENEXTRA *stop;
} TOKENLIST;
-typedef struct
-{
- int ithread;
- void *ptr1;
- void *ptr2;
+typedef struct {
+ int ithread;
+ void *ptr1;
+ void *ptr2;
} ENCODETHREAD_DATA;
-typedef struct
-{
- int ithread;
- void *ptr1;
+typedef struct {
+ int ithread;
+ void *ptr1;
} LPFTHREAD_DATA;
-typedef struct VP8_ENCODER_RTCD
-{
- VP8_COMMON_RTCD *common;
- vp8_variance_rtcd_vtable_t variance;
- vp8_fdct_rtcd_vtable_t fdct;
- vp8_encodemb_rtcd_vtable_t encodemb;
- vp8_search_rtcd_vtable_t search;
- vp8_temporal_rtcd_vtable_t temporal;
+typedef struct VP8_ENCODER_RTCD {
+ VP8_COMMON_RTCD *common;
+ vp8_variance_rtcd_vtable_t variance;
+ vp8_fdct_rtcd_vtable_t fdct;
+ vp8_encodemb_rtcd_vtable_t encodemb;
+ vp8_search_rtcd_vtable_t search;
+ vp8_temporal_rtcd_vtable_t temporal;
} VP8_ENCODER_RTCD;
-enum
-{
- BLOCK_16X8,
- BLOCK_8X16,
- BLOCK_8X8,
- BLOCK_4X4,
- BLOCK_16X16,
- BLOCK_MAX_SEGMENTS
+enum {
+ BLOCK_16X8,
+ BLOCK_8X16,
+ BLOCK_8X8,
+ BLOCK_4X4,
+ BLOCK_16X16,
+ BLOCK_MAX_SEGMENTS
};
-typedef struct VP8_COMP
-{
+typedef struct VP8_COMP {
- DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, Y1quant_shift[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, Y1quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, Y2quant_shift[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, Y2quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, UVquant_shift[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, UVquant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(64, short, Y1zbin_8x8[QINDEX_RANGE][64]);
- DECLARE_ALIGNED(64, short, Y2zbin_8x8[QINDEX_RANGE][64]);
- DECLARE_ALIGNED(64, short, UVzbin_8x8[QINDEX_RANGE][64]);
- DECLARE_ALIGNED(64, short, zrun_zbin_boost_y1_8x8[QINDEX_RANGE][64]);
- DECLARE_ALIGNED(64, short, zrun_zbin_boost_y2_8x8[QINDEX_RANGE][64]);
- DECLARE_ALIGNED(64, short, zrun_zbin_boost_uv_8x8[QINDEX_RANGE][64]);
-
- MACROBLOCK mb;
- VP8_COMMON common;
- vp8_writer bc, bc2;
- // bool_writer *bc2;
-
- VP8_CONFIG oxcf;
-
- struct lookahead_ctx *lookahead;
- struct lookahead_entry *source;
- struct lookahead_entry *alt_ref_source;
+ DECLARE_ALIGNED(64, short, Y1zbin_8x8[QINDEX_RANGE][64]);
+ DECLARE_ALIGNED(64, short, Y2zbin_8x8[QINDEX_RANGE][64]);
+ DECLARE_ALIGNED(64, short, UVzbin_8x8[QINDEX_RANGE][64]);
+ DECLARE_ALIGNED(64, short, zrun_zbin_boost_y1_8x8[QINDEX_RANGE][64]);
+ DECLARE_ALIGNED(64, short, zrun_zbin_boost_y2_8x8[QINDEX_RANGE][64]);
+ DECLARE_ALIGNED(64, short, zrun_zbin_boost_uv_8x8[QINDEX_RANGE][64]);
+
+ MACROBLOCK mb;
+ VP8_COMMON common;
+ vp8_writer bc, bc2;
+ // bool_writer *bc2;
+
+ VP8_CONFIG oxcf;
+
+ struct lookahead_ctx *lookahead;
+ struct lookahead_entry *source;
+ struct lookahead_entry *alt_ref_source;
- YV12_BUFFER_CONFIG *Source;
- YV12_BUFFER_CONFIG *un_scaled_source;
- YV12_BUFFER_CONFIG scaled_source;
+ YV12_BUFFER_CONFIG *Source;
+ YV12_BUFFER_CONFIG *un_scaled_source;
+ YV12_BUFFER_CONFIG scaled_source;
- int source_alt_ref_pending; // frame in src_buffers has been identified to be encoded as an alt ref
- int source_alt_ref_active; // an alt ref frame has been encoded and is usable
+ int source_alt_ref_pending; // frame in src_buffers has been identified to be encoded as an alt ref
+ int source_alt_ref_active; // an alt ref frame has been encoded and is usable
- int is_src_frame_alt_ref; // source of frame to encode is an exact copy of an alt ref frame
+ int is_src_frame_alt_ref; // source of frame to encode is an exact copy of an alt ref frame
- int gold_is_last; // golden frame same as last frame ( short circuit gold searches)
- int alt_is_last; // Alt reference frame same as last ( short circuit altref search)
- int gold_is_alt; // don't do both alt and gold search ( just do gold).
+ int gold_is_last; // golden frame same as last frame ( short circuit gold searches)
+ int alt_is_last; // Alt reference frame same as last ( short circuit altref search)
+ int gold_is_alt; // don't do both alt and gold search ( just do gold).
- //int refresh_alt_ref_frame;
- YV12_BUFFER_CONFIG last_frame_uf;
+ // int refresh_alt_ref_frame;
+ YV12_BUFFER_CONFIG last_frame_uf;
- TOKENEXTRA *tok;
- unsigned int tok_count;
+ TOKENEXTRA *tok;
+ unsigned int tok_count;
- unsigned int frames_since_key;
- unsigned int key_frame_frequency;
- unsigned int this_key_frame_forced;
- unsigned int next_key_frame_forced;
+ unsigned int frames_since_key;
+ unsigned int key_frame_frequency;
+ unsigned int this_key_frame_forced;
+ unsigned int next_key_frame_forced;
- // Ambient reconstruction err target for force key frames
- int ambient_err;
+ // Ambient reconstruction err target for force key frames
+ int ambient_err;
- unsigned int mode_check_freq[MAX_MODES];
- unsigned int mode_test_hit_counts[MAX_MODES];
- unsigned int mode_chosen_counts[MAX_MODES];
+ unsigned int mode_check_freq[MAX_MODES];
+ unsigned int mode_test_hit_counts[MAX_MODES];
+ unsigned int mode_chosen_counts[MAX_MODES];
- int rd_thresh_mult[MAX_MODES];
- int rd_baseline_thresh[MAX_MODES];
- int rd_threshes[MAX_MODES];
- int64_t rd_single_diff, rd_comp_diff, rd_hybrid_diff;
- int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
- int comp_pred_count[COMP_PRED_CONTEXTS];
- int single_pred_count[COMP_PRED_CONTEXTS];
+ int rd_thresh_mult[MAX_MODES];
+ int rd_baseline_thresh[MAX_MODES];
+ int rd_threshes[MAX_MODES];
+ int64_t rd_single_diff, rd_comp_diff, rd_hybrid_diff;
+ int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
+ int comp_pred_count[COMP_PRED_CONTEXTS];
+ int single_pred_count[COMP_PRED_CONTEXTS];
- int RDMULT;
- int RDDIV ;
+ int RDMULT;
+ int RDDIV;
- CODING_CONTEXT coding_context;
+ CODING_CONTEXT coding_context;
- // Rate targetting variables
- int64_t prediction_error;
- int64_t last_prediction_error;
- int64_t intra_error;
- int64_t last_intra_error;
+ // Rate targetting variables
+ int64_t prediction_error;
+ int64_t last_prediction_error;
+ int64_t intra_error;
+ int64_t last_intra_error;
- int this_frame_target;
- int projected_frame_size;
- int last_q[2]; // Separate values for Intra/Inter
- int last_boosted_qindex; // Last boosted GF/KF/ARF q
+ int this_frame_target;
+ int projected_frame_size;
+ int last_q[2]; // Separate values for Intra/Inter
+ int last_boosted_qindex; // Last boosted GF/KF/ARF q
- double rate_correction_factor;
- double key_frame_rate_correction_factor;
- double gf_rate_correction_factor;
+ double rate_correction_factor;
+ double key_frame_rate_correction_factor;
+ double gf_rate_correction_factor;
- int frames_till_gf_update_due; // Count down till next GF
- int current_gf_interval; // GF interval chosen when we coded the last GF
+ int frames_till_gf_update_due; // Count down till next GF
+ int current_gf_interval; // GF interval chosen when we coded the last GF
- int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
+ int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
- int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
+ int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
- int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
- int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
- int max_gf_interval;
- int baseline_gf_interval;
- int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
+ int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
+ int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
+ int max_gf_interval;
+ int baseline_gf_interval;
+ int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
- int64_t key_frame_count;
- int prior_key_frame_distance[KEY_FRAME_CONTEXT];
- int per_frame_bandwidth; // Current section per frame bandwidth target
- int av_per_frame_bandwidth; // Average frame size target for clip
- int min_frame_bandwidth; // Minimum allocation that should be used for any frame
- int inter_frame_target;
- double output_frame_rate;
- int64_t last_time_stamp_seen;
- int64_t last_end_time_stamp_seen;
- int64_t first_time_stamp_ever;
+ int64_t key_frame_count;
+ int prior_key_frame_distance[KEY_FRAME_CONTEXT];
+ int per_frame_bandwidth; // Current section per frame bandwidth target
+ int av_per_frame_bandwidth; // Average frame size target for clip
+ int min_frame_bandwidth; // Minimum allocation that should be used for any frame
+ int inter_frame_target;
+ double output_frame_rate;
+ int64_t last_time_stamp_seen;
+ int64_t last_end_time_stamp_seen;
+ int64_t first_time_stamp_ever;
- int ni_av_qi;
- int ni_tot_qi;
- int ni_frames;
- int avg_frame_qindex;
- double tot_q;
- double avg_q;
+ int ni_av_qi;
+ int ni_tot_qi;
+ int ni_frames;
+ int avg_frame_qindex;
+ double tot_q;
+ double avg_q;
- int zbin_over_quant;
- int zbin_mode_boost;
- int zbin_mode_boost_enabled;
+ int zbin_over_quant;
+ int zbin_mode_boost;
+ int zbin_mode_boost_enabled;
- int64_t total_byte_count;
+ int64_t total_byte_count;
- int buffered_mode;
+ int buffered_mode;
- int buffer_level;
- int bits_off_target;
+ int buffer_level;
+ int bits_off_target;
- int rolling_target_bits;
- int rolling_actual_bits;
+ int rolling_target_bits;
+ int rolling_actual_bits;
- int long_rolling_target_bits;
- int long_rolling_actual_bits;
+ int long_rolling_target_bits;
+ int long_rolling_actual_bits;
- int64_t total_actual_bits;
- int total_target_vs_actual; // debug stats
+ int64_t total_actual_bits;
+ int total_target_vs_actual; // debug stats
- int worst_quality;
- int active_worst_quality;
- int best_quality;
- int active_best_quality;
+ int worst_quality;
+ int active_worst_quality;
+ int best_quality;
+ int active_best_quality;
- int cq_target_quality;
+ int cq_target_quality;
- int ymode_count [VP8_YMODES]; /* intra MB type cts this frame */
- int bmode_count [VP8_BINTRAMODES];
- int i8x8_mode_count [VP8_I8X8_MODES];
- int sub_mv_ref_count [SUBMVREF_COUNT][VP8_SUBMVREFS];
- int mbsplit_count [VP8_NUMMBSPLITS];
- //int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
- int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
-
- unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
+ int ymode_count [VP8_YMODES]; /* intra MB type cts this frame */
+ int bmode_count [VP8_BINTRAMODES];
+ int i8x8_mode_count [VP8_I8X8_MODES];
+ int sub_mv_ref_count [SUBMVREF_COUNT][VP8_SUBMVREFS];
+ int mbsplit_count [VP8_NUMMBSPLITS];
+ // int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
+ int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
+
+ unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
#if CONFIG_HIGH_PRECISION_MV
- unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
+ unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
#endif
- unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
- //DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
- //save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
- vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
- unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
- unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
- vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
- unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+ unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
+ // DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
+ // save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
+ vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
+ unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+ unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
+ vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
+ unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
- int gfu_boost;
- int last_boost;
- int kf_boost;
- int kf_zeromotion_pct;
+ int gfu_boost;
+ int last_boost;
+ int kf_boost;
+ int kf_zeromotion_pct;
- int target_bandwidth;
- struct vpx_codec_pkt_list *output_pkt_list;
+ int target_bandwidth;
+ struct vpx_codec_pkt_list *output_pkt_list;
#if 0
- // Experimental code for lagged and one pass
- ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
- int one_pass_frame_index;
+ // Experimental code for lagged and one pass
+ ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
+ int one_pass_frame_index;
#endif
- MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
- int mbgraph_n_frames; // number of frames filled in the above
- int static_mb_pct; // % forced skip mbs by segmentation
- int seg0_progress, seg0_idx, seg0_cnt;
- int ref_pred_count[3][2];
-
- int decimation_factor;
- int decimation_count;
-
- // for real time encoding
- int avg_encode_time; //microsecond
- int avg_pick_mode_time; //microsecond
- int Speed;
- unsigned int cpu_freq; //Mhz
- int compressor_speed;
-
- int interquantizer;
- int goldfreq;
- int auto_worst_q;
- int cpu_used;
- int horiz_scale;
- int vert_scale;
- int pass;
+ MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
+ int mbgraph_n_frames; // number of frames filled in the above
+ int static_mb_pct; // % forced skip mbs by segmentation
+ int seg0_progress, seg0_idx, seg0_cnt;
+ int ref_pred_count[3][2];
+
+ int decimation_factor;
+ int decimation_count;
+
+ // for real time encoding
+ int avg_encode_time; // microsecond
+ int avg_pick_mode_time; // microsecond
+ int Speed;
+ unsigned int cpu_freq; // Mhz
+ int compressor_speed;
+
+ int interquantizer;
+ int goldfreq;
+ int auto_worst_q;
+ int cpu_used;
+ int horiz_scale;
+ int vert_scale;
+ int pass;
#if CONFIG_NEWENTROPY
- vp8_prob last_skip_false_probs[3][MBSKIP_CONTEXTS];
+ vp8_prob last_skip_false_probs[3][MBSKIP_CONTEXTS];
#else
- vp8_prob prob_skip_false;
- vp8_prob last_skip_false_probs[3];
+ vp8_prob prob_skip_false;
+ vp8_prob last_skip_false_probs[3];
#endif
- int last_skip_probs_q[3];
+ int last_skip_probs_q[3];
- int recent_ref_frame_usage[MAX_REF_FRAMES];
- int count_mb_ref_frame_usage[MAX_REF_FRAMES];
- int ref_frame_flags;
+ int recent_ref_frame_usage[MAX_REF_FRAMES];
+ int count_mb_ref_frame_usage[MAX_REF_FRAMES];
+ int ref_frame_flags;
- unsigned char ref_pred_probs_update[PREDICTION_PROBS];
+ unsigned char ref_pred_probs_update[PREDICTION_PROBS];
- SPEED_FEATURES sf;
- int error_bins[1024];
+ SPEED_FEATURES sf;
+ int error_bins[1024];
- // Data used for real time conferencing mode to help determine if it would be good to update the gf
- int inter_zz_count;
- int gf_bad_count;
- int gf_update_recommended;
+ // Data used for real time conferencing mode to help determine if it would be good to update the gf
+ int inter_zz_count;
+ int gf_bad_count;
+ int gf_update_recommended;
#if CONFIG_NEWENTROPY
- int skip_true_count[3];
- int skip_false_count[3];
+ int skip_true_count[3];
+ int skip_false_count[3];
#else
- int skip_true_count;
- int skip_false_count;
+ int skip_true_count;
+ int skip_false_count;
#endif
- int t4x4_count;
- int t8x8_count;
+ int t4x4_count;
+ int t8x8_count;
- unsigned char *segmentation_map;
+ unsigned char *segmentation_map;
- // segment threashold for encode breakout
- int segment_encode_breakout[MAX_MB_SEGMENTS];
+ // segment threashold for encode breakout
+ int segment_encode_breakout[MAX_MB_SEGMENTS];
- unsigned char *active_map;
- unsigned int active_map_enabled;
+ unsigned char *active_map;
+ unsigned int active_map_enabled;
- TOKENLIST *tplist;
+ TOKENLIST *tplist;
- fractional_mv_step_fp *find_fractional_mv_step;
- vp8_full_search_fn_t full_search_sad;
- vp8_refining_search_fn_t refining_search_sad;
- vp8_diamond_search_fn_t diamond_search_sad;
- vp8_variance_fn_ptr_t fn_ptr[BLOCK_MAX_SEGMENTS];
- unsigned int time_receive_data;
- unsigned int time_compress_data;
- unsigned int time_pick_lpf;
- unsigned int time_encode_mb_row;
+ fractional_mv_step_fp *find_fractional_mv_step;
+ vp8_full_search_fn_t full_search_sad;
+ vp8_refining_search_fn_t refining_search_sad;
+ vp8_diamond_search_fn_t diamond_search_sad;
+ vp8_variance_fn_ptr_t fn_ptr[BLOCK_MAX_SEGMENTS];
+ unsigned int time_receive_data;
+ unsigned int time_compress_data;
+ unsigned int time_pick_lpf;
+ unsigned int time_encode_mb_row;
#if CONFIG_NEWENTROPY
- int base_skip_false_prob[QINDEX_RANGE][3];
+ int base_skip_false_prob[QINDEX_RANGE][3];
#else
- int base_skip_false_prob[QINDEX_RANGE];
+ int base_skip_false_prob[QINDEX_RANGE];
#endif
- struct twopass_rc
- {
- unsigned int section_intra_rating;
- unsigned int next_iiratio;
- unsigned int this_iiratio;
- FIRSTPASS_STATS *total_stats;
- FIRSTPASS_STATS *this_frame_stats;
- FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
- FIRSTPASS_STATS *total_left_stats;
- int first_pass_done;
- int64_t bits_left;
- int64_t clip_bits_total;
- double avg_iiratio;
- double modified_error_total;
- double modified_error_used;
- double modified_error_left;
- double kf_intra_err_min;
- double gf_intra_err_min;
- int frames_to_key;
- int maxq_max_limit;
- int maxq_min_limit;
- int static_scene_max_gf_interval;
- int kf_bits;
- int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
-
- // Projected total bits available for a key frame group of frames
- int64_t kf_group_bits;
-
- // Error score of frames still to be coded in kf group
- int64_t kf_group_error_left;
-
- int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
- int gf_bits; // Bits for the golden frame or ARF - 2 pass only
- int alt_extra_bits;
-
- int sr_update_lag;
- double est_max_qcorrection_factor;
- } twopass;
+ struct twopass_rc {
+ unsigned int section_intra_rating;
+ unsigned int next_iiratio;
+ unsigned int this_iiratio;
+ FIRSTPASS_STATS *total_stats;
+ FIRSTPASS_STATS *this_frame_stats;
+ FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
+ FIRSTPASS_STATS *total_left_stats;
+ int first_pass_done;
+ int64_t bits_left;
+ int64_t clip_bits_total;
+ double avg_iiratio;
+ double modified_error_total;
+ double modified_error_used;
+ double modified_error_left;
+ double kf_intra_err_min;
+ double gf_intra_err_min;
+ int frames_to_key;
+ int maxq_max_limit;
+ int maxq_min_limit;
+ int static_scene_max_gf_interval;
+ int kf_bits;
+ int gf_group_error_left; // Remaining error from uncoded frames in a gf group. Two pass use only
+
+ // Projected total bits available for a key frame group of frames
+ int64_t kf_group_bits;
+
+ // Error score of frames still to be coded in kf group
+ int64_t kf_group_error_left;
+
+ int gf_group_bits; // Projected Bits available for a group of frames including 1 GF or ARF
+ int gf_bits; // Bits for the golden frame or ARF - 2 pass only
+ int alt_extra_bits;
+
+ int sr_update_lag;
+ double est_max_qcorrection_factor;
+ } twopass;
#if CONFIG_RUNTIME_CPU_DETECT
- VP8_ENCODER_RTCD rtcd;
+ VP8_ENCODER_RTCD rtcd;
#endif
#if VP8_TEMPORAL_ALT_REF
- YV12_BUFFER_CONFIG alt_ref_buffer;
- YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS];
- int fixed_divide[512];
+ YV12_BUFFER_CONFIG alt_ref_buffer;
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS];
+ int fixed_divide[512];
#endif
#if CONFIG_INTERNAL_STATS
- int count;
- double total_y;
- double total_u;
- double total_v;
- double total ;
- double total_sq_error;
- double totalp_y;
- double totalp_u;
- double totalp_v;
- double totalp;
- double total_sq_error2;
- int bytes;
- double summed_quality;
- double summed_weights;
- unsigned int tot_recode_hits;
-
-
- double total_ssimg_y;
- double total_ssimg_u;
- double total_ssimg_v;
- double total_ssimg_all;
-
- int b_calculate_ssimg;
+ int count;
+ double total_y;
+ double total_u;
+ double total_v;
+ double total;
+ double total_sq_error;
+ double totalp_y;
+ double totalp_u;
+ double totalp_v;
+ double totalp;
+ double total_sq_error2;
+ int bytes;
+ double summed_quality;
+ double summed_weights;
+ unsigned int tot_recode_hits;
+
+
+ double total_ssimg_y;
+ double total_ssimg_u;
+ double total_ssimg_v;
+ double total_ssimg_all;
+
+ int b_calculate_ssimg;
#endif
- int b_calculate_psnr;
+ int b_calculate_psnr;
- // Per MB activity measurement
- unsigned int activity_avg;
- unsigned int * mb_activity_map;
- int * mb_norm_activity_map;
+ // Per MB activity measurement
+ unsigned int activity_avg;
+ unsigned int *mb_activity_map;
+ int *mb_norm_activity_map;
- // Record of which MBs still refer to last golden frame either
- // directly or through 0,0
- unsigned char *gf_active_flags;
- int gf_active_count;
+ // Record of which MBs still refer to last golden frame either
+ // directly or through 0,0
+ unsigned char *gf_active_flags;
+ int gf_active_count;
- int output_partition;
+ int output_partition;
- //Store last frame's MV info for next frame MV prediction
- int_mv *lfmv;
- int *lf_ref_frame_sign_bias;
- int *lf_ref_frame;
+ // Store last frame's MV info for next frame MV prediction
+ int_mv *lfmv;
+ int *lf_ref_frame_sign_bias;
+ int *lf_ref_frame;
- /* force next frame to intra when kf_auto says so */
- int force_next_frame_intra;
+ /* force next frame to intra when kf_auto says so */
+ int force_next_frame_intra;
- int droppable;
+ int droppable;
- // Global store for SB left contexts, one for each MB row in the SB
- ENTROPY_CONTEXT_PLANES left_context[2];
+ // Global store for SB left contexts, one for each MB row in the SB
+ ENTROPY_CONTEXT_PLANES left_context[2];
- // TODO Do we still need this??
- int update_context;
+ // TODO Do we still need this??
+ int update_context;
- int dummy_packing; /* flag to indicate if packing is dummy */
+ int dummy_packing; /* flag to indicate if packing is dummy */
#if CONFIG_PRED_FILTER
- int pred_filter_on_count;
- int pred_filter_off_count;
+ int pred_filter_on_count;
+ int pred_filter_off_count;
#endif
} VP8_COMP;
@@ -786,18 +769,18 @@ void vp8_set_speed_features(VP8_COMP *cpi);
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval,expr) do {\
- lval = (expr); \
- if(!lval) \
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\
- "Failed to allocate "#lval" at %s:%d", \
- __FILE__,__LINE__);\
- } while(0)
+ lval = (expr); \
+ if(!lval) \
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\
+ "Failed to allocate "#lval" at %s:%d", \
+ __FILE__,__LINE__);\
+ } while(0)
#else
#define CHECK_MEM_ERROR(lval,expr) do {\
- lval = (expr); \
- if(!lval) \
- vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\
- "Failed to allocate "#lval);\
- } while(0)
+ lval = (expr); \
+ if(!lval) \
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\
+ "Failed to allocate "#lval);\
+ } while(0)
#endif
#endif
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index 171ec3a4d..d17dd9219 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -39,635 +39,597 @@ extern void
extern void vp8_loop_filter_frame_segment
(
- VP8_COMMON *cm,
- MACROBLOCKD *xd,
- int default_filt_lvl,
- int segment
+ VP8_COMMON *cm,
+ MACROBLOCKD *xd,
+ int default_filt_lvl,
+ int segment
);
void
-vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction)
-{
- unsigned char *src_y, *dst_y;
- int yheight;
- int ystride;
- int border;
- int yoffset;
- int linestocopy;
+vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction) {
+ unsigned char *src_y, *dst_y;
+ int yheight;
+ int ystride;
+ int border;
+ int yoffset;
+ int linestocopy;
- border = src_ybc->border;
- yheight = src_ybc->y_height;
- ystride = src_ybc->y_stride;
+ border = src_ybc->border;
+ yheight = src_ybc->y_height;
+ ystride = src_ybc->y_stride;
- linestocopy = (yheight >> (Fraction + 4));
+ linestocopy = (yheight >> (Fraction + 4));
- if (linestocopy < 1)
- linestocopy = 1;
+ if (linestocopy < 1)
+ linestocopy = 1;
- linestocopy <<= 4;
+ linestocopy <<= 4;
- yoffset = ystride * ((yheight >> 5) * 16 - 8);
- src_y = src_ybc->y_buffer + yoffset;
- dst_y = dst_ybc->y_buffer + yoffset;
+ yoffset = ystride * ((yheight >> 5) * 16 - 8);
+ src_y = src_ybc->y_buffer + yoffset;
+ dst_y = dst_ybc->y_buffer + yoffset;
- vpx_memcpy(dst_y, src_y, ystride *(linestocopy + 16));
+ vpx_memcpy(dst_y, src_y, ystride * (linestocopy + 16));
}
-static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd)
-{
- int i, j;
- int Total = 0;
- int srcoffset, dstoffset;
- unsigned char *src = source->y_buffer;
- unsigned char *dst = dest->y_buffer;
+static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd) {
+ int i, j;
+ int Total = 0;
+ int srcoffset, dstoffset;
+ unsigned char *src = source->y_buffer;
+ unsigned char *dst = dest->y_buffer;
- int linestocopy = (source->y_height >> (Fraction + 4));
- (void)rtcd;
+ int linestocopy = (source->y_height >> (Fraction + 4));
+ (void)rtcd;
- if (linestocopy < 1)
- linestocopy = 1;
+ if (linestocopy < 1)
+ linestocopy = 1;
- linestocopy <<= 4;
+ linestocopy <<= 4;
- srcoffset = source->y_stride * (dest->y_height >> 5) * 16;
- dstoffset = dest->y_stride * (dest->y_height >> 5) * 16;
+ srcoffset = source->y_stride * (dest->y_height >> 5) * 16;
+ dstoffset = dest->y_stride * (dest->y_height >> 5) * 16;
- src += srcoffset;
- dst += dstoffset;
+ src += srcoffset;
+ dst += dstoffset;
- // Loop through the Y plane raw and reconstruction data summing (square differences)
- for (i = 0; i < linestocopy; i += 16)
- {
- for (j = 0; j < source->y_width; j += 16)
- {
- unsigned int sse;
- Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
- }
-
- src += 16 * source->y_stride;
- dst += 16 * dest->y_stride;
+ // Loop through the Y plane raw and reconstruction data summing (square differences)
+ for (i = 0; i < linestocopy; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
}
- return Total;
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return Total;
}
// Enforce a minimum filter level based upon baseline Q
-static int get_min_filter_level(VP8_COMP *cpi, int base_qindex)
-{
- int min_filter_level;
- /*int q = (int) vp8_convert_qindex_to_q(base_qindex);
-
- if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && !cpi->common.refresh_alt_ref_frame)
- min_filter_level = 0;
- else
- {
- if (q <= 10)
- min_filter_level = 0;
- else if (q <= 64)
- min_filter_level = 1;
- else
- min_filter_level = (q >> 6);
- }
- */
- min_filter_level = 0;
-
- return min_filter_level;
+static int get_min_filter_level(VP8_COMP *cpi, int base_qindex) {
+ int min_filter_level;
+ /*int q = (int) vp8_convert_qindex_to_q(base_qindex);
+
+ if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && !cpi->common.refresh_alt_ref_frame)
+ min_filter_level = 0;
+ else
+ {
+ if (q <= 10)
+ min_filter_level = 0;
+ else if (q <= 64)
+ min_filter_level = 1;
+ else
+ min_filter_level = (q >> 6);
+ }
+ */
+ min_filter_level = 0;
+
+ return min_filter_level;
}
// Enforce a maximum filter level based upon baseline Q
-static int get_max_filter_level(VP8_COMP *cpi, int base_qindex)
-{
- // PGW August 2006: Highest filter values almost always a bad idea
+static int get_max_filter_level(VP8_COMP *cpi, int base_qindex) {
+ // PGW August 2006: Highest filter values almost always a bad idea
- // jbb chg: 20100118 - not so any more with this overquant stuff allow high values
- // with lots of intra coming in.
- int max_filter_level = MAX_LOOP_FILTER ;//* 3 / 4;
- (void)base_qindex;
+ // jbb chg: 20100118 - not so any more with this overquant stuff allow high values
+ // with lots of intra coming in.
+ int max_filter_level = MAX_LOOP_FILTER;// * 3 / 4;
+ (void)base_qindex;
- if (cpi->twopass.section_intra_rating > 8)
- max_filter_level = MAX_LOOP_FILTER * 3 / 4;
+ if (cpi->twopass.section_intra_rating > 8)
+ max_filter_level = MAX_LOOP_FILTER * 3 / 4;
- return max_filter_level;
+ return max_filter_level;
}
-void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
-
- int best_err = 0;
- int filt_err = 0;
- int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
- int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
- int filt_val;
- int best_filt_val = cm->filter_level;
-
- // Make a copy of the unfiltered / processed recon buffer
- vp8_yv12_copy_partial_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf, 3);
-
- if (cm->frame_type == KEY_FRAME)
- cm->sharpness_level = 0;
- else
- cm->sharpness_level = cpi->oxcf.Sharpness;
-
- if (cm->sharpness_level != cm->last_sharpness_level)
- {
- vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
- cm->last_sharpness_level = cm->sharpness_level;
- }
+void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
- // Start the search at the previous frame filter level unless it is now out of range.
- if (cm->filter_level < min_filter_level)
- cm->filter_level = min_filter_level;
- else if (cm->filter_level > max_filter_level)
- cm->filter_level = max_filter_level;
+ int best_err = 0;
+ int filt_err = 0;
+ int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+ int filt_val;
+ int best_filt_val = cm->filter_level;
- filt_val = cm->filter_level;
- best_filt_val = filt_val;
+ // Make a copy of the unfiltered / processed recon buffer
+ vp8_yv12_copy_partial_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf, 3);
- // Get the err using the previous frame's filter value.
- vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+ if (cm->frame_type == KEY_FRAME)
+ cm->sharpness_level = 0;
+ else
+ cm->sharpness_level = cpi->oxcf.Sharpness;
- best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+ if (cm->sharpness_level != cm->last_sharpness_level) {
+ vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
+ cm->last_sharpness_level = cm->sharpness_level;
+ }
- // Re-instate the unfiltered frame
- vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
+ // Start the search at the previous frame filter level unless it is now out of range.
+ if (cm->filter_level < min_filter_level)
+ cm->filter_level = min_filter_level;
+ else if (cm->filter_level > max_filter_level)
+ cm->filter_level = max_filter_level;
- filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
+ filt_val = cm->filter_level;
+ best_filt_val = filt_val;
- // Search lower filter levels
- while (filt_val >= min_filter_level)
- {
- // Apply the loop filter
- vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+ // Get the err using the previous frame's filter value.
+ vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
- // Get the err for filtered frame
- filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+ best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
- // Re-instate the unfiltered frame
- vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
+ // Re-instate the unfiltered frame
+ vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
+ filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
- // Update the best case record or exit loop.
- if (filt_err < best_err)
- {
- best_err = filt_err;
- best_filt_val = filt_val;
- }
- else
- break;
-
- // Adjust filter level
- filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
- }
+ // Search lower filter levels
+ while (filt_val >= min_filter_level) {
+ // Apply the loop filter
+ vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+
+ // Get the err for filtered frame
+ filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+
+ // Re-instate the unfiltered frame
+ vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
- // Search up (note that we have already done filt_val = cm->filter_level)
- filt_val = cm->filter_level + (1 + ((filt_val > 10) ? 1 : 0));
- if (best_filt_val == cm->filter_level)
- {
- // Resist raising filter level for very small gains
- best_err -= (best_err >> 10);
+ // Update the best case record or exit loop.
+ if (filt_err < best_err) {
+ best_err = filt_err;
+ best_filt_val = filt_val;
+ } else
+ break;
+
+ // Adjust filter level
+ filt_val -= (1 + ((filt_val > 10) ? 1 : 0));
+ }
- while (filt_val < max_filter_level)
- {
- // Apply the loop filter
- vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
+ // Search up (note that we have already done filt_val = cm->filter_level)
+ filt_val = cm->filter_level + (1 + ((filt_val > 10) ? 1 : 0));
- // Get the err for filtered frame
- filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+ if (best_filt_val == cm->filter_level) {
+ // Resist raising filter level for very small gains
+ best_err -= (best_err >> 10);
- // Re-instate the unfiltered frame
- vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
+ while (filt_val < max_filter_level) {
+ // Apply the loop filter
+ vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
- // Update the best case record or exit loop.
- if (filt_err < best_err)
- {
- // Do not raise filter level if improvement is < 1 part in 4096
- best_err = filt_err - (filt_err >> 10);
+ // Get the err for filtered frame
+ filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
- best_filt_val = filt_val;
- }
- else
- break;
+ // Re-instate the unfiltered frame
+ vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
- // Adjust filter level
- filt_val += (1 + ((filt_val > 10) ? 1 : 0));
- }
+ // Update the best case record or exit loop.
+ if (filt_err < best_err) {
+ // Do not raise filter level if improvement is < 1 part in 4096
+ best_err = filt_err - (filt_err >> 10);
+
+ best_filt_val = filt_val;
+ } else
+ break;
+
+ // Adjust filter level
+ filt_val += (1 + ((filt_val > 10) ? 1 : 0));
}
+ }
- cm->filter_level = best_filt_val;
+ cm->filter_level = best_filt_val;
- if (cm->filter_level < min_filter_level)
- cm->filter_level = min_filter_level;
+ if (cm->filter_level < min_filter_level)
+ cm->filter_level = min_filter_level;
- if (cm->filter_level > max_filter_level)
- cm->filter_level = max_filter_level;
+ if (cm->filter_level > max_filter_level)
+ cm->filter_level = max_filter_level;
}
// Stub function for now Alt LF not used
-void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val)
-{
+void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val) {
}
#if CONFIG_FEATUREUPDATES
-void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segment)
-{
- VP8_COMMON *cm = &cpi->common;
+void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segment) {
+ VP8_COMMON *cm = &cpi->common;
- int best_err = 0;
- int filt_err = 0;
- int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
- int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+ int best_err = 0;
+ int filt_err = 0;
+ int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
- int filter_step;
- int filt_high = 0;
- int filt_mid = cm->filter_level; // Start search at previous frame filter level
- int filt_low = 0;
- int filt_best;
- int filt_direction = 0;
+ int filter_step;
+ int filt_high = 0;
+ int filt_mid = cm->filter_level; // Start search at previous frame filter level
+ int filt_low = 0;
+ int filt_best;
+ int filt_direction = 0;
- int Bias = 0; // Bias against raising loop filter and in favour of lowering it
+ int Bias = 0; // Bias against raising loop filter and in favour of lowering it
- // Make a copy of the unfiltered / processed recon buffer
+ // Make a copy of the unfiltered / processed recon buffer
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(cm->frame_to_show, &cpi->last_frame_uf);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(cm->frame_to_show, &cpi->last_frame_uf);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf);
- }
+ {
+ vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf);
+ }
#endif
- if (cm->frame_type == KEY_FRAME)
- cm->sharpness_level = 0;
- else
- cm->sharpness_level = cpi->oxcf.Sharpness;
+ if (cm->frame_type == KEY_FRAME)
+ cm->sharpness_level = 0;
+ else
+ cm->sharpness_level = cpi->oxcf.Sharpness;
- // Start the search at the previous frame filter level unless it is now out of range.
- filt_mid = cm->filter_level;
+ // Start the search at the previous frame filter level unless it is now out of range.
+ filt_mid = cm->filter_level;
- if (filt_mid < min_filter_level)
- filt_mid = min_filter_level;
- else if (filt_mid > max_filter_level)
- filt_mid = max_filter_level;
+ if (filt_mid < min_filter_level)
+ filt_mid = min_filter_level;
+ else if (filt_mid > max_filter_level)
+ filt_mid = max_filter_level;
- // Define the initial step size
- filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
+ // Define the initial step size
+ filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
- // Get baseline error score
- vp8cx_set_alt_lf_level(cpi, filt_mid);
- vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_mid,segment);
+ // Get baseline error score
+ vp8cx_set_alt_lf_level(cpi, filt_mid);
+ vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_mid, segment);
- best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
- filt_best = filt_mid;
+ best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_best = filt_mid;
- // Re-instate the unfiltered frame
+ // Re-instate the unfiltered frame
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#endif
- while (filter_step > 0)
- {
- Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
+ while (filter_step > 0) {
+ Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; // PGW change 12/12/06 for small images
- // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
- if (cpi->twopass.section_intra_rating < 20)
- Bias = Bias * cpi->twopass.section_intra_rating / 20;
+ // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
+ if (cpi->twopass.section_intra_rating < 20)
+ Bias = Bias * cpi->twopass.section_intra_rating / 20;
- // yx, bias less for large block size
- if(cpi->common.txfm_mode == ALLOW_8X8)
- Bias >>= 1;
+ // yx, bias less for large block size
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ Bias >>= 1;
- filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
- filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
+ filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
+ filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
- if ((filt_direction <= 0) && (filt_low != filt_mid))
- {
- // Get Low filter error score
- vp8cx_set_alt_lf_level(cpi, filt_low);
- vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_low, segment);
+ if ((filt_direction <= 0) && (filt_low != filt_mid)) {
+ // Get Low filter error score
+ vp8cx_set_alt_lf_level(cpi, filt_low);
+ vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_low, segment);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
- // Re-instate the unfiltered frame
+ // Re-instate the unfiltered frame
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#endif
- // If value is close to the best so far then bias towards a lower loop filter value.
- if ((filt_err - Bias) < best_err)
- {
- // Was it actually better than the previous best?
- if (filt_err < best_err)
- best_err = filt_err;
+ // If value is close to the best so far then bias towards a lower loop filter value.
+ if ((filt_err - Bias) < best_err) {
+ // Was it actually better than the previous best?
+ if (filt_err < best_err)
+ best_err = filt_err;
- filt_best = filt_low;
- }
- }
+ filt_best = filt_low;
+ }
+ }
- // Now look at filt_high
- if ((filt_direction >= 0) && (filt_high != filt_mid))
- {
- vp8cx_set_alt_lf_level(cpi, filt_high);
- vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_high, segment);
+ // Now look at filt_high
+ if ((filt_direction >= 0) && (filt_high != filt_mid)) {
+ vp8cx_set_alt_lf_level(cpi, filt_high);
+ vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_high, segment);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
- // Re-instate the unfiltered frame
+ // Re-instate the unfiltered frame
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
- }
-#endif
-
- // Was it better than the previous best?
- if (filt_err < (best_err - Bias))
- {
- best_err = filt_err;
- filt_best = filt_high;
- }
- }
-
- // Half the step distance if the best filter value was the same as last time
- if (filt_best == filt_mid)
- {
- filter_step = filter_step / 2;
- filt_direction = 0;
- }
- else
- {
- filt_direction = (filt_best < filt_mid) ? -1 : 1;
- filt_mid = filt_best;
- }
+ {
+ vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
+ }
+#endif
+
+ // Was it better than the previous best?
+ if (filt_err < (best_err - Bias)) {
+ best_err = filt_err;
+ filt_best = filt_high;
+ }
}
- cm->filter_level = filt_best;
+ // Half the step distance if the best filter value was the same as last time
+ if (filt_best == filt_mid) {
+ filter_step = filter_step / 2;
+ filt_direction = 0;
+ } else {
+ filt_direction = (filt_best < filt_mid) ? -1 : 1;
+ filt_mid = filt_best;
+ }
+ }
+
+ cm->filter_level = filt_best;
}
-void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
-{
- VP8_COMMON *oci = &cpi->common;
- MODE_INFO *mi = oci->mi;
- int filt_lev[2];
- int i, j;
- MACROBLOCKD * const xd = &cpi->mb.e_mbd;
- int max_seg;
- int mb_index = 0;
-
- // pick the loop filter for each segment after segment 0
- for (i = 1; i < MAX_MB_SEGMENTS; i++)
- {
- // if the segment loop filter is active
- if (segfeature_active(xd, i, SEG_LVL_ALT_LF))
- {
- set_segdata(xd, i, SEG_LVL_ALT_LF, 0);
- vp8cx_pick_filter_level_sg(sd, cpi, i);
- filt_lev[i] = oci->filter_level;
- }
+void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
+ VP8_COMMON *oci = &cpi->common;
+ MODE_INFO *mi = oci->mi;
+ int filt_lev[2];
+ int i, j;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int max_seg;
+ int mb_index = 0;
+
+ // pick the loop filter for each segment after segment 0
+ for (i = 1; i < MAX_MB_SEGMENTS; i++) {
+ // if the segment loop filter is active
+ if (segfeature_active(xd, i, SEG_LVL_ALT_LF)) {
+ set_segdata(xd, i, SEG_LVL_ALT_LF, 0);
+ vp8cx_pick_filter_level_sg(sd, cpi, i);
+ filt_lev[i] = oci->filter_level;
+ }
+ }
+
+ // do the 0 segment ( this filter also picks the filter value for all
+ // the not enabled features )
+
+ // TODO : Fix the code if segment 0 is the one with seg_lvl_alt_lf on
+ // right now assumes segment 0 gets base loop filter and the rest are
+ // deltas off of segment 0.
+ set_segdata(xd, 0, SEG_LVL_ALT_LF, 0);
+ vp8cx_pick_filter_level_sg(sd, cpi, 0);
+ filt_lev[0] = oci->filter_level;
+
+ // convert the best filter level for the mbs of the segment to
+ // a delta from 0
+ for (i = 1; i < MAX_MB_SEGMENTS; i++)
+ if (segfeature_active(xd, i, SEG_LVL_ALT_LF)) {
+ set_segdata(xd, i, SEG_LVL_ALT_LF, filt_lev[i] - filt_lev[0]);
+ xd->update_mb_segmentation_data !=
+ segfeature_changed(xd, i, SEG_LVL_ALT_LF);
}
-
- // do the 0 segment ( this filter also picks the filter value for all
- // the not enabled features )
-
- // TODO : Fix the code if segment 0 is the one with seg_lvl_alt_lf on
- // right now assumes segment 0 gets base loop filter and the rest are
- // deltas off of segment 0.
- set_segdata(xd, 0, SEG_LVL_ALT_LF, 0);
- vp8cx_pick_filter_level_sg(sd, cpi, 0);
- filt_lev[0] = oci->filter_level;
-
- // convert the best filter level for the mbs of the segment to
- // a delta from 0
- for (i = 1; i < MAX_MB_SEGMENTS; i++)
- if (segfeature_active(xd, i, SEG_LVL_ALT_LF))
- {
- set_segdata(xd, i, SEG_LVL_ALT_LF, filt_lev[i] - filt_lev[0]);
- xd->update_mb_segmentation_data !=
- segfeature_changed( xd,i,SEG_LVL_ALT_LF);
- }
}
#else
-void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
+void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
- int best_err = 0;
- int filt_err = 0;
- int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
- int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+ int best_err = 0;
+ int filt_err = 0;
+ int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
- int filter_step;
- int filt_high = 0;
- int filt_mid = cm->filter_level; // Start search at previous frame filter level
- int filt_low = 0;
- int filt_best;
- int filt_direction = 0;
+ int filter_step;
+ int filt_high = 0;
+ int filt_mid = cm->filter_level; // Start search at previous frame filter level
+ int filt_low = 0;
+ int filt_best;
+ int filt_direction = 0;
- int Bias = 0; // Bias against raising loop filter and in favour of lowering it
+ int Bias = 0; // Bias against raising loop filter and in favour of lowering it
- // Make a copy of the unfiltered / processed recon buffer
+ // Make a copy of the unfiltered / processed recon buffer
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(cm->frame_to_show, &cpi->last_frame_uf);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(cm->frame_to_show, &cpi->last_frame_uf);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf);
- }
+ {
+ vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cpi->last_frame_uf);
+ }
#endif
- if (cm->frame_type == KEY_FRAME)
- cm->sharpness_level = 0;
- else
- cm->sharpness_level = cpi->oxcf.Sharpness;
+ if (cm->frame_type == KEY_FRAME)
+ cm->sharpness_level = 0;
+ else
+ cm->sharpness_level = cpi->oxcf.Sharpness;
- // Start the search at the previous frame filter level unless it is now out of range.
- filt_mid = cm->filter_level;
+ // Start the search at the previous frame filter level unless it is now out of range.
+ filt_mid = cm->filter_level;
- if (filt_mid < min_filter_level)
- filt_mid = min_filter_level;
- else if (filt_mid > max_filter_level)
- filt_mid = max_filter_level;
+ if (filt_mid < min_filter_level)
+ filt_mid = min_filter_level;
+ else if (filt_mid > max_filter_level)
+ filt_mid = max_filter_level;
- // Define the initial step size
- filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
+ // Define the initial step size
+ filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
- // Get baseline error score
- vp8cx_set_alt_lf_level(cpi, filt_mid);
- vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
+ // Get baseline error score
+ vp8cx_set_alt_lf_level(cpi, filt_mid);
+ vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
- best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
- filt_best = filt_mid;
+ best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_best = filt_mid;
- // Re-instate the unfiltered frame
+ // Re-instate the unfiltered frame
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#endif
- while (filter_step > 0)
- {
- Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; //PGW change 12/12/06 for small images
+ while (filter_step > 0) {
+ Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; // PGW change 12/12/06 for small images
- // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
- if (cpi->twopass.section_intra_rating < 20)
- Bias = Bias * cpi->twopass.section_intra_rating / 20;
+ // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
+ if (cpi->twopass.section_intra_rating < 20)
+ Bias = Bias * cpi->twopass.section_intra_rating / 20;
- // yx, bias less for large block size
- if(cpi->common.txfm_mode == ALLOW_8X8)
- Bias >>= 1;
+ // yx, bias less for large block size
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ Bias >>= 1;
- filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
- filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
+ filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
+ filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
- if ((filt_direction <= 0) && (filt_low != filt_mid))
- {
- // Get Low filter error score
- vp8cx_set_alt_lf_level(cpi, filt_low);
- vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
+ if ((filt_direction <= 0) && (filt_low != filt_mid)) {
+ // Get Low filter error score
+ vp8cx_set_alt_lf_level(cpi, filt_low);
+ vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
- // Re-instate the unfiltered frame
+ // Re-instate the unfiltered frame
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#endif
- // If value is close to the best so far then bias towards a lower loop filter value.
- if ((filt_err - Bias) < best_err)
- {
- // Was it actually better than the previous best?
- if (filt_err < best_err)
- best_err = filt_err;
+ // If value is close to the best so far then bias towards a lower loop filter value.
+ if ((filt_err - Bias) < best_err) {
+ // Was it actually better than the previous best?
+ if (filt_err < best_err)
+ best_err = filt_err;
- filt_best = filt_low;
- }
- }
+ filt_best = filt_low;
+ }
+ }
- // Now look at filt_high
- if ((filt_direction >= 0) && (filt_high != filt_mid))
- {
- vp8cx_set_alt_lf_level(cpi, filt_high);
- vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
+ // Now look at filt_high
+ if ((filt_direction >= 0) && (filt_high != filt_mid)) {
+ vp8cx_set_alt_lf_level(cpi, filt_high);
+ vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
- // Re-instate the unfiltered frame
+ // Re-instate the unfiltered frame
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
- }
+ {
+ vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(&cpi->last_frame_uf, cm->frame_to_show);
+ }
#if CONFIG_RUNTIME_CPU_DETECT
- else
+ else
#endif
#endif
#if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT
- {
- vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
- }
-#endif
-
- // Was it better than the previous best?
- if (filt_err < (best_err - Bias))
- {
- best_err = filt_err;
- filt_best = filt_high;
- }
- }
-
- // Half the step distance if the best filter value was the same as last time
- if (filt_best == filt_mid)
- {
- filter_step = filter_step / 2;
- filt_direction = 0;
- }
- else
- {
- filt_direction = (filt_best < filt_mid) ? -1 : 1;
- filt_mid = filt_best;
- }
+ {
+ vp8_yv12_copy_frame_yonly_ptr(&cpi->last_frame_uf, cm->frame_to_show);
+ }
+#endif
+
+ // Was it better than the previous best?
+ if (filt_err < (best_err - Bias)) {
+ best_err = filt_err;
+ filt_best = filt_high;
+ }
+ }
+
+ // Half the step distance if the best filter value was the same as last time
+ if (filt_best == filt_mid) {
+ filter_step = filter_step / 2;
+ filt_direction = 0;
+ } else {
+ filt_direction = (filt_best < filt_mid) ? -1 : 1;
+ filt_mid = filt_best;
}
+ }
- cm->filter_level = filt_best;
+ cm->filter_level = filt_best;
}
#endif
diff --git a/vp8/encoder/ppc/csystemdependent.c b/vp8/encoder/ppc/csystemdependent.c
index 49b1dd6a9..96c9a0384 100644
--- a/vp8/encoder/ppc/csystemdependent.c
+++ b/vp8/encoder/ppc/csystemdependent.c
@@ -115,42 +115,41 @@ extern sub_pixel_variance_function vp8_sub_pixel_variance16x16_ppc;
extern unsigned int vp8_get8x8var_ppc(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum);
extern unsigned int vp8_get16x16var_ppc(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum);
-void vp8_cmachine_specific_config(void)
-{
- // Pure C:
- vp8_mbuverror = vp8_mbuverror_c;
- vp8_fast_quantize_b = vp8_fast_quantize_b_c;
- vp8_short_fdct4x4 = vp8_short_fdct4x4_ppc;
- vp8_short_fdct8x4 = vp8_short_fdct8x4_ppc;
- vp8_fast_fdct4x4 = vp8_short_fdct4x4_ppc;
- vp8_fast_fdct8x4 = vp8_short_fdct8x4_ppc;
- short_walsh4x4 = vp8_short_walsh4x4_c;
-
- vp8_variance4x4 = vp8_variance4x4_ppc;
- vp8_variance8x8 = vp8_variance8x8_ppc;
- vp8_variance8x16 = vp8_variance8x16_ppc;
- vp8_variance16x8 = vp8_variance16x8_ppc;
- vp8_variance16x16 = vp8_variance16x16_ppc;
- vp8_mse16x16 = vp8_mse16x16_ppc;
-
- vp8_sub_pixel_variance4x4 = vp8_sub_pixel_variance4x4_ppc;
- vp8_sub_pixel_variance8x8 = vp8_sub_pixel_variance8x8_ppc;
- vp8_sub_pixel_variance8x16 = vp8_sub_pixel_variance8x16_ppc;
- vp8_sub_pixel_variance16x8 = vp8_sub_pixel_variance16x8_ppc;
- vp8_sub_pixel_variance16x16 = vp8_sub_pixel_variance16x16_ppc;
-
- vp8_get_mb_ss = vp8_get_mb_ss_c;
-
- vp8_sad16x16 = vp8_sad16x16_ppc;
- vp8_sad16x8 = vp8_sad16x8_ppc;
- vp8_sad8x16 = vp8_sad8x16_ppc;
- vp8_sad8x8 = vp8_sad8x8_ppc;
- vp8_sad4x4 = vp8_sad4x4_ppc;
-
- vp8_block_error = vp8_block_error_ppc;
- vp8_mbblock_error = vp8_mbblock_error_c;
-
- vp8_subtract_b = vp8_subtract_b_c;
- vp8_subtract_mby = vp8_subtract_mby_ppc;
- vp8_subtract_mbuv = vp8_subtract_mbuv_ppc;
+void vp8_cmachine_specific_config(void) {
+ // Pure C:
+ vp8_mbuverror = vp8_mbuverror_c;
+ vp8_fast_quantize_b = vp8_fast_quantize_b_c;
+ vp8_short_fdct4x4 = vp8_short_fdct4x4_ppc;
+ vp8_short_fdct8x4 = vp8_short_fdct8x4_ppc;
+ vp8_fast_fdct4x4 = vp8_short_fdct4x4_ppc;
+ vp8_fast_fdct8x4 = vp8_short_fdct8x4_ppc;
+ short_walsh4x4 = vp8_short_walsh4x4_c;
+
+ vp8_variance4x4 = vp8_variance4x4_ppc;
+ vp8_variance8x8 = vp8_variance8x8_ppc;
+ vp8_variance8x16 = vp8_variance8x16_ppc;
+ vp8_variance16x8 = vp8_variance16x8_ppc;
+ vp8_variance16x16 = vp8_variance16x16_ppc;
+ vp8_mse16x16 = vp8_mse16x16_ppc;
+
+ vp8_sub_pixel_variance4x4 = vp8_sub_pixel_variance4x4_ppc;
+ vp8_sub_pixel_variance8x8 = vp8_sub_pixel_variance8x8_ppc;
+ vp8_sub_pixel_variance8x16 = vp8_sub_pixel_variance8x16_ppc;
+ vp8_sub_pixel_variance16x8 = vp8_sub_pixel_variance16x8_ppc;
+ vp8_sub_pixel_variance16x16 = vp8_sub_pixel_variance16x16_ppc;
+
+ vp8_get_mb_ss = vp8_get_mb_ss_c;
+
+ vp8_sad16x16 = vp8_sad16x16_ppc;
+ vp8_sad16x8 = vp8_sad16x8_ppc;
+ vp8_sad8x16 = vp8_sad8x16_ppc;
+ vp8_sad8x8 = vp8_sad8x8_ppc;
+ vp8_sad4x4 = vp8_sad4x4_ppc;
+
+ vp8_block_error = vp8_block_error_ppc;
+ vp8_mbblock_error = vp8_mbblock_error_c;
+
+ vp8_subtract_b = vp8_subtract_b_c;
+ vp8_subtract_mby = vp8_subtract_mby_ppc;
+ vp8_subtract_mbuv = vp8_subtract_mbuv_ppc;
}
diff --git a/vp8/encoder/psnr.c b/vp8/encoder/psnr.c
index 7ecf6d7cd..5aa5587ee 100644
--- a/vp8/encoder/psnr.c
+++ b/vp8/encoder/psnr.c
@@ -15,17 +15,16 @@
#define MAX_PSNR 100
-double vp8_mse2psnr(double Samples, double Peak, double Mse)
-{
- double psnr;
+double vp8_mse2psnr(double Samples, double Peak, double Mse) {
+ double psnr;
- if ((double)Mse > 0.0)
- psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
- else
- psnr = MAX_PSNR; // Limit to prevent / 0
+ if ((double)Mse > 0.0)
+ psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
+ else
+ psnr = MAX_PSNR; // Limit to prevent / 0
- if (psnr > MAX_PSNR)
- psnr = MAX_PSNR;
+ if (psnr > MAX_PSNR)
+ psnr = MAX_PSNR;
- return psnr;
+ return psnr;
}
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index dace31cba..821507c62 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -22,97 +22,89 @@
extern int enc_debug;
#endif
-void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *zbin_boost_ptr = b->zrun_zbin_boost;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- unsigned char *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- short zbin_oq_value = b->zbin_extra;
-
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
-
- for (i = 0; i < b->eob_max_offset; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
-
- zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
- zbin_boost_ptr ++;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
- }
- }
+void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
+ int i, rc, eob;
+ int zbin;
+ int x, y, z, sz;
+ short *zbin_boost_ptr = b->zrun_zbin_boost;
+ short *coeff_ptr = b->coeff;
+ short *zbin_ptr = b->zbin;
+ short *round_ptr = b->round;
+ short *quant_ptr = b->quant;
+ unsigned char *quant_shift_ptr = b->quant_shift;
+ short *qcoeff_ptr = d->qcoeff;
+ short *dqcoeff_ptr = d->dqcoeff;
+ short *dequant_ptr = d->dequant;
+ short zbin_oq_value = b->zbin_extra;
+
+ vpx_memset(qcoeff_ptr, 0, 32);
+ vpx_memset(dqcoeff_ptr, 0, 32);
+
+ eob = -1;
+
+ for (i = 0; i < b->eob_max_offset; i++) {
+ rc = vp8_default_zig_zag1d[i];
+ z = coeff_ptr[rc];
+
+ zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
+ zbin_boost_ptr++;
+
+ sz = (z >> 31); // sign of z
+ x = (z ^ sz) - sz; // x = abs(z)
+
+ if (x >= zbin) {
+ x += round_ptr[rc];
+ y = (((x * quant_ptr[rc]) >> 16) + x)
+ >> quant_shift_ptr[rc]; // quantize (x)
+ x = (y ^ sz) - sz; // get the sign back
+ qcoeff_ptr[rc] = x; // write to destination
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
+
+ if (y) {
+ eob = i; // last nonzero coeffs
+ zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
+ }
}
+ }
- d->eob = eob + 1;
+ d->eob = eob + 1;
}
-void vp8_quantize_mby_c(MACROBLOCK *x)
-{
- int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+void vp8_quantize_mby_c(MACROBLOCK *x) {
+ int i;
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for (i = 0; i < 16; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ for (i = 0; i < 16; i++)
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
- if(has_2nd_order)
- x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
+ if (has_2nd_order)
+ x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
}
-void vp8_quantize_mb_c(MACROBLOCK *x)
-{
- int i;
- int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+void vp8_quantize_mb_c(MACROBLOCK *x) {
+ int i;
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for (i = 0; i < 24+has_2nd_order; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ for (i = 0; i < 24 + has_2nd_order; i++)
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
-void vp8_quantize_mbuv_c(MACROBLOCK *x)
-{
- int i;
+void vp8_quantize_mbuv_c(MACROBLOCK *x) {
+ int i;
- for (i = 16; i < 24; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ for (i = 16; i < 24; i++)
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
-void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
-{
+void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -127,14 +119,13 @@ void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
- //double q2nd = 4;
+ // double q2nd = 4;
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
- for (i = 0; i < b->eob_max_offset_8x8; i++)
- {
+ for (i = 0; i < b->eob_max_offset_8x8; i++) {
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
@@ -145,8 +136,7 @@ void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
- if (x >= zbin)
- {
+ if (x >= zbin) {
x += (round_ptr[rc]);
y = ((int)((int)(x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
@@ -154,8 +144,7 @@ void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
- if (y)
- {
+ if (y) {
eob = i; // last nonzero coeffs
zbin_zrun_index = 0;
}
@@ -165,8 +154,7 @@ void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d)
-{
+void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -181,33 +169,30 @@ void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d)
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
- vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
- vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
+ vpx_memset(qcoeff_ptr, 0, 64 * sizeof(short));
+ vpx_memset(dqcoeff_ptr, 0, 64 * sizeof(short));
eob = -1;
- for (i = 0; i < b->eob_max_offset_8x8; i++)
- {
+ for (i = 0; i < b->eob_max_offset_8x8; i++) {
rc = vp8_default_zig_zag1d_8x8[i];
z = coeff_ptr[rc];
- zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
- zbin_boost_ptr ++;
+ zbin = (zbin_ptr[rc != 0] + *zbin_boost_ptr + zbin_oq_value);
+ zbin_boost_ptr++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
- if (x >= zbin)
- {
- x += (round_ptr[rc!=0]);
- y = ((int)(((int)(x * quant_ptr[rc!=0]) >> 16) + x))
- >> quant_shift_ptr[rc!=0]; // quantize (x)
+ if (x >= zbin) {
+ x += (round_ptr[rc != 0]);
+ y = ((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x))
+ >> quant_shift_ptr[rc != 0]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0]; // dequantized value
- if (y)
- {
+ if (y) {
eob = i; // last nonzero coeffs
zbin_boost_ptr = b->zrun_zbin_boost_8x8;
}
@@ -217,17 +202,15 @@ void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-void vp8_quantize_mby_8x8(MACROBLOCK *x)
-{
+void vp8_quantize_mby_8x8(MACROBLOCK *x) {
int i;
- int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for(i = 0; i < 16; i ++)
- {
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ for (i = 0; i < 16; i ++) {
x->e_mbd.block[i].eob = 0;
}
x->e_mbd.block[24].eob = 0;
- for (i = 0; i < 16; i+=4)
+ for (i = 0; i < 16; i += 4)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
if (has_2nd_order)
@@ -235,31 +218,27 @@ void vp8_quantize_mby_8x8(MACROBLOCK *x)
}
-void vp8_quantize_mb_8x8(MACROBLOCK *x)
-{
+void vp8_quantize_mb_8x8(MACROBLOCK *x) {
int i;
- int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for(i = 0; i < 25; i ++)
- {
+ int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ for (i = 0; i < 25; i ++) {
x->e_mbd.block[i].eob = 0;
}
- for (i = 0; i < 24; i+=4)
+ for (i = 0; i < 24; i += 4)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
if (has_2nd_order)
x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
}
-void vp8_quantize_mbuv_8x8(MACROBLOCK *x)
-{
+void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
int i;
- for(i = 16; i < 24; i ++)
- {
+ for (i = 16; i < 24; i ++) {
x->e_mbd.block[i].eob = 0;
}
- for (i = 16; i < 24; i+=4)
+ for (i = 16; i < 24; i += 4)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
@@ -269,347 +248,321 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x)
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
* of blocks. */
-void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
-{
- vp8_regular_quantize_b(b1, d1);
- vp8_regular_quantize_b(b2, d2);
+void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
+ vp8_regular_quantize_b(b1, d1);
+ vp8_regular_quantize_b(b2, d2);
}
static void invert_quant(short *quant,
- unsigned char *shift, short d)
-{
- unsigned t;
- int l;
- t = d;
- for(l = 0; t > 1; l++)
- t>>=1;
- t = 1 + (1<<(16+l))/d;
- *quant = (short)(t - (1<<16));
- *shift = l;
+ unsigned char *shift, short d) {
+ unsigned t;
+ int l;
+ t = d;
+ for (l = 0; t > 1; l++)
+ t >>= 1;
+ t = 1 + (1 << (16 + l)) / d;
+ *quant = (short)(t - (1 << 16));
+ *shift = l;
}
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
- int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
- 24, 28, 32, 36, 40, 44, 44, 44};
+void vp8cx_init_quantizer(VP8_COMP *cpi) {
+ int i;
+ int quant_val;
+ int Q;
+ int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
+ 24, 28, 32, 36, 40, 44, 44, 44
+ };
- int zbin_boost_8x8[64] = { 0, 0, 0, 8, 8, 8, 10, 12,
- 14, 16, 18, 20, 22, 24, 26, 28,
- 30, 32, 34, 36, 38, 40, 42, 44,
- 46, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48 };
+ int zbin_boost_8x8[64] = { 0, 0, 0, 8, 8, 8, 10, 12,
+ 14, 16, 18, 20, 22, 24, 26, 28,
+ 30, 32, 34, 36, 38, 40, 42, 44,
+ 46, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48
+ };
- int qrounding_factor = 48;
+ int qrounding_factor = 48;
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- int qzbin_factor = (vp8_dc_quant(Q,0) < 148) ? 84 : 80;
+ for (Q = 0; Q < QINDEX_RANGE; Q++) {
+ int qzbin_factor = (vp8_dc_quant(Q, 0) < 148) ? 84 : 80;
#if CONFIG_LOSSLESS
- if(cpi->oxcf.lossless)
- {
- if (Q==0)
- {
- qzbin_factor = 64;
- qrounding_factor = 64;
- }
- }
+ if (cpi->oxcf.lossless) {
+ if (Q == 0) {
+ qzbin_factor = 64;
+ qrounding_factor = 64;
+ }
+ }
#endif
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- invert_quant(cpi->Y1quant[Q] + 0,
- cpi->Y1quant_shift[Q] + 0, quant_val);
- cpi->Y1zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->Y1zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
- cpi->zrun_zbin_boost_y1_8x8[Q][0] =
- ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- invert_quant(cpi->Y2quant[Q] + 0,
- cpi->Y2quant_shift[Q] + 0, quant_val);
- cpi->Y2zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->Y2zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
- cpi->zrun_zbin_boost_y2_8x8[Q][0] =
- ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- invert_quant(cpi->UVquant[Q] + 0,
- cpi->UVquant_shift[Q] + 0, quant_val);
- cpi->UVzbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;;
- cpi->UVzbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
- cpi->zrun_zbin_boost_uv_8x8[Q][0] =
- ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
-
- // all the 4x4 ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- invert_quant(cpi->Y1quant[Q] + rc,
- cpi->Y1quant_shift[Q] + rc, quant_val);
- cpi->Y1zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] =
- ((quant_val * zbin_boost[i]) + 64) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- invert_quant(cpi->Y2quant[Q] + rc,
- cpi->Y2quant_shift[Q] + rc, quant_val);
- cpi->Y2zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] =
- ((quant_val * zbin_boost[i]) + 64) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- invert_quant(cpi->UVquant[Q] + rc,
- cpi->UVquant_shift[Q] + rc, quant_val);
- cpi->UVzbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factor * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] =
- ((quant_val * zbin_boost[i]) + 64) >> 7;
- }
-
- // 8x8 structures... only zbin seperated out for now
- // This needs cleaning up for 8x8 especially if we are to add
- // support for non flat Q matices
- for (i = 1; i < 64; i++)
- {
- int rc = vp8_default_zig_zag1d_8x8[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->zrun_zbin_boost_y1_8x8[Q][i] =
- ((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->zrun_zbin_boost_y2_8x8[Q][i] =
- ((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVzbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
- cpi->zrun_zbin_boost_uv_8x8[Q][i] =
- ((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
- }
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ invert_quant(cpi->Y1quant[Q] + 0,
+ cpi->Y1quant_shift[Q] + 0, quant_val);
+ cpi->Y1zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->Y1zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+ cpi->zrun_zbin_boost_y1_8x8[Q][0] =
+ ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ invert_quant(cpi->Y2quant[Q] + 0,
+ cpi->Y2quant_shift[Q] + 0, quant_val);
+ cpi->Y2zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->Y2zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+ cpi->zrun_zbin_boost_y2_8x8[Q][0] =
+ ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ invert_quant(cpi->UVquant[Q] + 0,
+ cpi->UVquant_shift[Q] + 0, quant_val);
+ cpi->UVzbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;;
+ cpi->UVzbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+ cpi->zrun_zbin_boost_uv_8x8[Q][0] =
+ ((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
+
+ // all the 4x4 ac values =;
+ for (i = 1; i < 16; i++) {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ invert_quant(cpi->Y1quant[Q] + rc,
+ cpi->Y1quant_shift[Q] + rc, quant_val);
+ cpi->Y1zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] =
+ ((quant_val * zbin_boost[i]) + 64) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ invert_quant(cpi->Y2quant[Q] + rc,
+ cpi->Y2quant_shift[Q] + rc, quant_val);
+ cpi->Y2zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] =
+ ((quant_val * zbin_boost[i]) + 64) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ invert_quant(cpi->UVquant[Q] + rc,
+ cpi->UVquant_shift[Q] + rc, quant_val);
+ cpi->UVzbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] =
+ ((quant_val * zbin_boost[i]) + 64) >> 7;
}
-}
-void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
-{
- int i;
- int QIndex;
- MACROBLOCKD *xd = &x->e_mbd;
- int zbin_extra;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- // Select the baseline MB Q index allowing for any segment level change.
- if ( segfeature_active( xd, segment_id, SEG_LVL_ALT_Q ) )
- {
- // Abs Value
- if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA)
- QIndex = get_segdata( xd, segment_id, SEG_LVL_ALT_Q );
-
- // Delta Value
- else
- {
- QIndex = cpi->common.base_qindex +
- get_segdata( xd, segment_id, SEG_LVL_ALT_Q );
-
- // Clamp to valid range
- QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
- }
- }
- else
- QIndex = cpi->common.base_qindex;
-
- // Y
- zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 0; i < 16; i++)
- {
- x->block[i].quant = cpi->Y1quant[QIndex];
- x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
- x->block[i].zbin = cpi->Y1zbin[QIndex];
- x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
- x->block[i].round = cpi->Y1round[QIndex];
- x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
- x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
- x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
- x->block[i].zbin_extra = (short)zbin_extra;
-
- // Segment max eob offset feature.
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- x->block[i].eob_max_offset =
- get_segdata( xd, segment_id, SEG_LVL_EOB );
- x->block[i].eob_max_offset_8x8 =
- get_segdata( xd, segment_id, SEG_LVL_EOB );
- }
- else
- {
- x->block[i].eob_max_offset = 16;
- x->block[i].eob_max_offset_8x8 = 64;
- }
+ // 8x8 structures... only zbin seperated out for now
+ // This needs cleaning up for 8x8 especially if we are to add
+ // support for non flat Q matices
+ for (i = 1; i < 64; i++) {
+ int rc = vp8_default_zig_zag1d_8x8[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->zrun_zbin_boost_y1_8x8[Q][i] =
+ ((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->zrun_zbin_boost_y2_8x8[Q][i] =
+ ((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVzbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
+ cpi->zrun_zbin_boost_uv_8x8[Q][i] =
+ ((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
}
+ }
+}
- // UV
- zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- for (i = 16; i < 24; i++)
- {
- x->block[i].quant = cpi->UVquant[QIndex];
- x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
- x->block[i].zbin = cpi->UVzbin[QIndex];
- x->block[i].zbin_8x8 = cpi->UVzbin_8x8[QIndex];
- x->block[i].round = cpi->UVround[QIndex];
- x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
- x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
- x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_uv_8x8[QIndex];
-
- x->block[i].zbin_extra = (short)zbin_extra;
-
- // Segment max eob offset feature.
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- x->block[i].eob_max_offset =
- get_segdata( xd, segment_id, SEG_LVL_EOB );
- x->block[i].eob_max_offset_8x8 =
- get_segdata( xd, segment_id, SEG_LVL_EOB );
- }
- else
- {
- x->block[i].eob_max_offset = 16;
- x->block[i].eob_max_offset_8x8 = 64;
- }
+void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
+ int i;
+ int QIndex;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ // Select the baseline MB Q index allowing for any segment level change.
+ if (segfeature_active(xd, segment_id, SEG_LVL_ALT_Q)) {
+ // Abs Value
+ if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA)
+ QIndex = get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
+
+ // Delta Value
+ else {
+ QIndex = cpi->common.base_qindex +
+ get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
+
+ // Clamp to valid range
+ QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
}
+ } else
+ QIndex = cpi->common.base_qindex;
+
+ // Y
+ zbin_extra = (cpi->common.Y1dequant[QIndex][1] *
+ (cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj)) >> 7;
+
+ for (i = 0; i < 16; i++) {
+ x->block[i].quant = cpi->Y1quant[QIndex];
+ x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
+ x->block[i].zbin = cpi->Y1zbin[QIndex];
+ x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
+ x->block[i].round = cpi->Y1round[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
+ x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
+ x->block[i].zbin_extra = (short)zbin_extra;
- // Y2
- zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
- ( (cpi->zbin_over_quant / 2) +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
-
- x->block[24].quant = cpi->Y2quant[QIndex];
- x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
- x->block[24].zbin = cpi->Y2zbin[QIndex];
- x->block[24].zbin_8x8 = cpi->Y2zbin_8x8[QIndex];
- x->block[24].round = cpi->Y2round[QIndex];
- x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
- x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
- x->block[24].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y2_8x8[QIndex];
- x->block[24].zbin_extra = (short)zbin_extra;
-
- // TBD perhaps not use for Y2
// Segment max eob offset feature.
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- x->block[24].eob_max_offset =
- get_segdata( xd, segment_id, SEG_LVL_EOB );
- x->block[24].eob_max_offset_8x8 =
- get_segdata( xd, segment_id, SEG_LVL_EOB );
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ x->block[i].eob_max_offset =
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+ x->block[i].eob_max_offset_8x8 =
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+ } else {
+ x->block[i].eob_max_offset = 16;
+ x->block[i].eob_max_offset_8x8 = 64;
}
- else
- {
- x->block[24].eob_max_offset = 16;
- x->block[24].eob_max_offset_8x8 = 4;
+ }
+
+ // UV
+ zbin_extra = (cpi->common.UVdequant[QIndex][1] *
+ (cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj)) >> 7;
+
+ for (i = 16; i < 24; i++) {
+ x->block[i].quant = cpi->UVquant[QIndex];
+ x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
+ x->block[i].zbin = cpi->UVzbin[QIndex];
+ x->block[i].zbin_8x8 = cpi->UVzbin_8x8[QIndex];
+ x->block[i].round = cpi->UVround[QIndex];
+ x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
+ x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
+ x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_uv_8x8[QIndex];
+
+ x->block[i].zbin_extra = (short)zbin_extra;
+
+ // Segment max eob offset feature.
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ x->block[i].eob_max_offset =
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+ x->block[i].eob_max_offset_8x8 =
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+ } else {
+ x->block[i].eob_max_offset = 16;
+ x->block[i].eob_max_offset_8x8 = 64;
}
+ }
+
+ // Y2
+ zbin_extra = (cpi->common.Y2dequant[QIndex][1] *
+ ((cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj)) >> 7;
+
+ x->block[24].quant = cpi->Y2quant[QIndex];
+ x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
+ x->block[24].zbin = cpi->Y2zbin[QIndex];
+ x->block[24].zbin_8x8 = cpi->Y2zbin_8x8[QIndex];
+ x->block[24].round = cpi->Y2round[QIndex];
+ x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
+ x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
+ x->block[24].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y2_8x8[QIndex];
+ x->block[24].zbin_extra = (short)zbin_extra;
+
+ // TBD perhaps not use for Y2
+ // Segment max eob offset feature.
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ x->block[24].eob_max_offset =
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+ x->block[24].eob_max_offset_8x8 =
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+ } else {
+ x->block[24].eob_max_offset = 16;
+ x->block[24].eob_max_offset_8x8 = 4;
+ }
- /* save this macroblock QIndex for vp8_update_zbin_extra() */
- x->q_index = QIndex;
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
}
-void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
-{
- int i;
- int QIndex = x->q_index;
- int zbin_extra;
-
- // Y
- zbin_extra = ( cpi->common.Y1dequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
- for (i = 0; i < 16; i++)
- {
- x->block[i].zbin_extra = (short)zbin_extra;
- }
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ // Y
+ zbin_extra = (cpi->common.Y1dequant[QIndex][1] *
+ (cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj)) >> 7;
+ for (i = 0; i < 16; i++) {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
- // UV
- zbin_extra = ( cpi->common.UVdequant[QIndex][1] *
- ( cpi->zbin_over_quant +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
+ // UV
+ zbin_extra = (cpi->common.UVdequant[QIndex][1] *
+ (cpi->zbin_over_quant +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj)) >> 7;
- for (i = 16; i < 24; i++)
- {
- x->block[i].zbin_extra = (short)zbin_extra;
- }
+ for (i = 16; i < 24; i++) {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
- // Y2
- zbin_extra = ( cpi->common.Y2dequant[QIndex][1] *
- ( (cpi->zbin_over_quant / 2) +
- cpi->zbin_mode_boost +
- x->act_zbin_adj ) ) >> 7;
+ // Y2
+ zbin_extra = (cpi->common.Y2dequant[QIndex][1] *
+ ((cpi->zbin_over_quant / 2) +
+ cpi->zbin_mode_boost +
+ x->act_zbin_adj)) >> 7;
- x->block[24].zbin_extra = (short)zbin_extra;
+ x->block[24].zbin_extra = (short)zbin_extra;
}
-void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
-{
- // Clear Zbin mode boost for default case
- cpi->zbin_mode_boost = 0;
+void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
- // MB level quantizer setup
- vp8cx_mb_init_quantizer(cpi, &cpi->mb);
+ // MB level quantizer setup
+ vp8cx_mb_init_quantizer(cpi, &cpi->mb);
}
-void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
-{
- VP8_COMMON *cm = &cpi->common;
+void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
+ VP8_COMMON *cm = &cpi->common;
- cm->base_qindex = Q;
+ cm->base_qindex = Q;
- // if any of the delta_q values are changing update flag will
- // have to be set.
- cm->y1dc_delta_q = 0;
- cm->y2ac_delta_q = 0;
- cm->uvdc_delta_q = 0;
- cm->uvac_delta_q = 0;
- cm->y2dc_delta_q = 0;
+ // if any of the delta_q values are changing update flag will
+ // have to be set.
+ cm->y1dc_delta_q = 0;
+ cm->y2ac_delta_q = 0;
+ cm->uvdc_delta_q = 0;
+ cm->uvac_delta_q = 0;
+ cm->y2dc_delta_q = 0;
- // quantizer has to be reinitialized if any delta_q changes.
- // As there are not any here for now this is inactive code.
- //if(update)
- // vp8cx_init_quantizer(cpi);
+ // quantizer has to be reinitialized if any delta_q changes.
+ // As there are not any here for now this is inactive code.
+ // if(update)
+ // vp8cx_init_quantizer(cpi);
}
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index 5cde006bc..4106064f5 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -15,13 +15,13 @@
#include "block.h"
#define prototype_quantize_block(sym) \
- void (sym)(BLOCK *b,BLOCKD *d)
+ void (sym)(BLOCK *b,BLOCKD *d)
#define prototype_quantize_block_pair(sym) \
- void (sym)(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
+ void (sym)(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
#define prototype_quantize_mb(sym) \
- void (sym)(MACROBLOCK *x)
+ void (sym)(MACROBLOCK *x)
#if ARCH_X86 || ARCH_X86_64
#include "x86/quantize_x86.h"
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 52424aa7a..9885f8742 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -44,46 +44,42 @@ extern unsigned int inter_b_modes[B_MODE_COUNT];
#define BPER_MB_NORMBITS 9
// % adjustment to target kf size based on seperation from previous frame
-static const int kf_boost_seperation_adjustment[16] =
-{
- 30, 40, 50, 55, 60, 65, 70, 75,
- 80, 85, 90, 95, 100, 100, 100, 100,
+static const int kf_boost_seperation_adjustment[16] = {
+ 30, 40, 50, 55, 60, 65, 70, 75,
+ 80, 85, 90, 95, 100, 100, 100, 100,
};
-static const int gf_adjust_table[101] =
-{
- 100,
- 115, 130, 145, 160, 175, 190, 200, 210, 220, 230,
- 240, 260, 270, 280, 290, 300, 310, 320, 330, 340,
- 350, 360, 370, 380, 390, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
- 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+static const int gf_adjust_table[101] = {
+ 100,
+ 115, 130, 145, 160, 175, 190, 200, 210, 220, 230,
+ 240, 260, 270, 280, 290, 300, 310, 320, 330, 340,
+ 350, 360, 370, 380, 390, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
};
-static const int gf_intra_usage_adjustment[20] =
-{
- 125, 120, 115, 110, 105, 100, 95, 85, 80, 75,
- 70, 65, 60, 55, 50, 50, 50, 50, 50, 50,
+static const int gf_intra_usage_adjustment[20] = {
+ 125, 120, 115, 110, 105, 100, 95, 85, 80, 75,
+ 70, 65, 60, 55, 50, 50, 50, 50, 50, 50,
};
-static const int gf_interval_table[101] =
-{
- 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+static const int gf_interval_table[101] = {
+ 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
};
static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3, 4, 5 };
@@ -91,274 +87,260 @@ static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3,
// These functions use formulaic calculations to make playing with the
// quantizer tables easier. If necessary they can be replaced by lookup
// tables if and when things settle down in the experimental bitstream
-double vp8_convert_qindex_to_q( int qindex )
-{
- // Convert the index to a real Q value (scaled down to match old Q values)
- return (double)vp8_ac_yquant( qindex ) / 4.0;
+double vp8_convert_qindex_to_q(int qindex) {
+ // Convert the index to a real Q value (scaled down to match old Q values)
+ return (double)vp8_ac_yquant(qindex) / 4.0;
}
-int vp8_gfboost_qadjust( int qindex )
-{
- int retval;
- double q;
+int vp8_gfboost_qadjust(int qindex) {
+ int retval;
+ double q;
- q = vp8_convert_qindex_to_q(qindex);
- retval = (int)( ( 0.00000828 * q * q * q ) +
- ( -0.0055 * q * q ) +
- ( 1.32 * q ) + 79.3 );
- return retval;
+ q = vp8_convert_qindex_to_q(qindex);
+ retval = (int)((0.00000828 * q * q * q) +
+ (-0.0055 * q * q) +
+ (1.32 * q) + 79.3);
+ return retval;
}
-int kfboost_qadjust( int qindex )
-{
- int retval;
- double q;
+int kfboost_qadjust(int qindex) {
+ int retval;
+ double q;
- q = vp8_convert_qindex_to_q(qindex);
- retval = (int)( ( 0.00000973 * q * q * q ) +
- ( -0.00613 * q * q ) +
- ( 1.316 * q ) + 121.2 );
- return retval;
+ q = vp8_convert_qindex_to_q(qindex);
+ retval = (int)((0.00000973 * q * q * q) +
+ (-0.00613 * q * q) +
+ (1.316 * q) + 121.2);
+ return retval;
}
-int vp8_bits_per_mb( FRAME_TYPE frame_type, int qindex )
-{
- if ( frame_type == KEY_FRAME )
- return (int)(4500000 / vp8_convert_qindex_to_q(qindex));
- else
- return (int)(2850000 / vp8_convert_qindex_to_q(qindex));
+int vp8_bits_per_mb(FRAME_TYPE frame_type, int qindex) {
+ if (frame_type == KEY_FRAME)
+ return (int)(4500000 / vp8_convert_qindex_to_q(qindex));
+ else
+ return (int)(2850000 / vp8_convert_qindex_to_q(qindex));
}
-void vp8_save_coding_context(VP8_COMP *cpi)
-{
- CODING_CONTEXT *const cc = & cpi->coding_context;
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
+void vp8_save_coding_context(VP8_COMP *cpi) {
+ CODING_CONTEXT *const cc = & cpi->coding_context;
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
- // Stores a snapshot of key state variables which can subsequently be
- // restored with a call to vp8_restore_coding_context. These functions are
- // intended for use in a re-code loop in vp8_compress_frame where the
- // quantizer value is adjusted between loop iterations.
+ // Stores a snapshot of key state variables which can subsequently be
+ // restored with a call to vp8_restore_coding_context. These functions are
+ // intended for use in a re-code loop in vp8_compress_frame where the
+ // quantizer value is adjusted between loop iterations.
- vp8_copy(cc->mvc, cm->fc.mvc);
- vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
+ vp8_copy(cc->mvc, cm->fc.mvc);
+ vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
#if CONFIG_HIGH_PRECISION_MV
- vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
- vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
+ vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
+ vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
#endif
- vp8_copy( cc->mv_ref_ct, cm->fc.mv_ref_ct );
- vp8_copy( cc->mode_context, cm->fc.mode_context );
- vp8_copy( cc->mv_ref_ct_a, cm->fc.mv_ref_ct_a );
- vp8_copy( cc->mode_context_a, cm->fc.mode_context_a );
+ vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
+ vp8_copy(cc->mode_context, cm->fc.mode_context);
+ vp8_copy(cc->mv_ref_ct_a, cm->fc.mv_ref_ct_a);
+ vp8_copy(cc->mode_context_a, cm->fc.mode_context_a);
- vp8_copy( cc->ymode_prob, cm->fc.ymode_prob );
- vp8_copy( cc->bmode_prob, cm->fc.bmode_prob );
- vp8_copy( cc->uv_mode_prob, cm->fc.uv_mode_prob );
- vp8_copy( cc->i8x8_mode_prob, cm->fc.i8x8_mode_prob );
- vp8_copy( cc->sub_mv_ref_prob, cm->fc.sub_mv_ref_prob );
- vp8_copy( cc->mbsplit_prob, cm->fc.mbsplit_prob );
+ vp8_copy(cc->ymode_prob, cm->fc.ymode_prob);
+ vp8_copy(cc->bmode_prob, cm->fc.bmode_prob);
+ vp8_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
+ vp8_copy(cc->i8x8_mode_prob, cm->fc.i8x8_mode_prob);
+ vp8_copy(cc->sub_mv_ref_prob, cm->fc.sub_mv_ref_prob);
+ vp8_copy(cc->mbsplit_prob, cm->fc.mbsplit_prob);
- // Stats
+ // Stats
#ifdef MODE_STATS
- vp8_copy(cc->y_modes, y_modes);
- vp8_copy(cc->uv_modes, uv_modes);
- vp8_copy(cc->b_modes, b_modes);
- vp8_copy(cc->inter_y_modes, inter_y_modes);
- vp8_copy(cc->inter_uv_modes, inter_uv_modes);
- vp8_copy(cc->inter_b_modes, inter_b_modes);
+ vp8_copy(cc->y_modes, y_modes);
+ vp8_copy(cc->uv_modes, uv_modes);
+ vp8_copy(cc->b_modes, b_modes);
+ vp8_copy(cc->inter_y_modes, inter_y_modes);
+ vp8_copy(cc->inter_uv_modes, inter_uv_modes);
+ vp8_copy(cc->inter_b_modes, inter_b_modes);
#endif
- vp8_copy( cc->segment_pred_probs, cm->segment_pred_probs );
- vp8_copy( cc->ref_pred_probs_update, cpi->ref_pred_probs_update );
- vp8_copy( cc->ref_pred_probs, cm->ref_pred_probs );
- vp8_copy( cc->prob_comppred, cm->prob_comppred );
+ vp8_copy(cc->segment_pred_probs, cm->segment_pred_probs);
+ vp8_copy(cc->ref_pred_probs_update, cpi->ref_pred_probs_update);
+ vp8_copy(cc->ref_pred_probs, cm->ref_pred_probs);
+ vp8_copy(cc->prob_comppred, cm->prob_comppred);
- vpx_memcpy( cpi->coding_context.last_frame_seg_map_copy,
- cm->last_frame_seg_map, (cm->mb_rows * cm->mb_cols) );
+ vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy,
+ cm->last_frame_seg_map, (cm->mb_rows * cm->mb_cols));
- vp8_copy( cc->last_ref_lf_deltas, xd->last_ref_lf_deltas );
- vp8_copy( cc->last_mode_lf_deltas, xd->last_mode_lf_deltas );
+ vp8_copy(cc->last_ref_lf_deltas, xd->last_ref_lf_deltas);
+ vp8_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas);
- vp8_copy( cc->coef_probs, cm->fc.coef_probs );
- vp8_copy( cc->coef_probs_8x8, cm->fc.coef_probs_8x8 );
+ vp8_copy(cc->coef_probs, cm->fc.coef_probs);
+ vp8_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8);
}
-void vp8_restore_coding_context(VP8_COMP *cpi)
-{
- CODING_CONTEXT *const cc = & cpi->coding_context;
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
+void vp8_restore_coding_context(VP8_COMP *cpi) {
+ CODING_CONTEXT *const cc = & cpi->coding_context;
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
- // Restore key state variables to the snapshot state stored in the
- // previous call to vp8_save_coding_context.
+ // Restore key state variables to the snapshot state stored in the
+ // previous call to vp8_save_coding_context.
- vp8_copy(cm->fc.mvc, cc->mvc);
- vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
+ vp8_copy(cm->fc.mvc, cc->mvc);
+ vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
#if CONFIG_HIGH_PRECISION_MV
- vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
- vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
+ vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
+ vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
#endif
- vp8_copy( cm->fc.mv_ref_ct, cc->mv_ref_ct );
- vp8_copy( cm->fc.mode_context, cc->mode_context );
- vp8_copy( cm->fc.mv_ref_ct_a, cc->mv_ref_ct_a );
- vp8_copy( cm->fc.mode_context_a, cc->mode_context_a );
+ vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
+ vp8_copy(cm->fc.mode_context, cc->mode_context);
+ vp8_copy(cm->fc.mv_ref_ct_a, cc->mv_ref_ct_a);
+ vp8_copy(cm->fc.mode_context_a, cc->mode_context_a);
- vp8_copy( cm->fc.ymode_prob, cc->ymode_prob);
- vp8_copy( cm->fc.bmode_prob, cc->bmode_prob);
- vp8_copy( cm->fc.i8x8_mode_prob, cc->i8x8_mode_prob);
- vp8_copy( cm->fc.uv_mode_prob, cc->uv_mode_prob);
- vp8_copy( cm->fc.sub_mv_ref_prob, cc->sub_mv_ref_prob);
- vp8_copy( cm->fc.mbsplit_prob, cc->mbsplit_prob );
+ vp8_copy(cm->fc.ymode_prob, cc->ymode_prob);
+ vp8_copy(cm->fc.bmode_prob, cc->bmode_prob);
+ vp8_copy(cm->fc.i8x8_mode_prob, cc->i8x8_mode_prob);
+ vp8_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
+ vp8_copy(cm->fc.sub_mv_ref_prob, cc->sub_mv_ref_prob);
+ vp8_copy(cm->fc.mbsplit_prob, cc->mbsplit_prob);
- // Stats
+ // Stats
#ifdef MODE_STATS
- vp8_copy(y_modes, cc->y_modes);
- vp8_copy(uv_modes, cc->uv_modes);
- vp8_copy(b_modes, cc->b_modes);
- vp8_copy(inter_y_modes, cc->inter_y_modes);
- vp8_copy(inter_uv_modes, cc->inter_uv_modes);
- vp8_copy(inter_b_modes, cc->inter_b_modes);
+ vp8_copy(y_modes, cc->y_modes);
+ vp8_copy(uv_modes, cc->uv_modes);
+ vp8_copy(b_modes, cc->b_modes);
+ vp8_copy(inter_y_modes, cc->inter_y_modes);
+ vp8_copy(inter_uv_modes, cc->inter_uv_modes);
+ vp8_copy(inter_b_modes, cc->inter_b_modes);
#endif
- vp8_copy( cm->segment_pred_probs, cc->segment_pred_probs );
- vp8_copy( cpi->ref_pred_probs_update, cc->ref_pred_probs_update );
- vp8_copy( cm->ref_pred_probs, cc->ref_pred_probs );
- vp8_copy( cm->prob_comppred, cc->prob_comppred );
+ vp8_copy(cm->segment_pred_probs, cc->segment_pred_probs);
+ vp8_copy(cpi->ref_pred_probs_update, cc->ref_pred_probs_update);
+ vp8_copy(cm->ref_pred_probs, cc->ref_pred_probs);
+ vp8_copy(cm->prob_comppred, cc->prob_comppred);
- vpx_memcpy( cm->last_frame_seg_map,
- cpi->coding_context.last_frame_seg_map_copy,
- (cm->mb_rows * cm->mb_cols) );
+ vpx_memcpy(cm->last_frame_seg_map,
+ cpi->coding_context.last_frame_seg_map_copy,
+ (cm->mb_rows * cm->mb_cols));
- vp8_copy( xd->last_ref_lf_deltas, cc->last_ref_lf_deltas );
- vp8_copy( xd->last_mode_lf_deltas, cc->last_mode_lf_deltas );
+ vp8_copy(xd->last_ref_lf_deltas, cc->last_ref_lf_deltas);
+ vp8_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas);
- vp8_copy( cm->fc.coef_probs, cc->coef_probs );
- vp8_copy( cm->fc.coef_probs_8x8, cc->coef_probs_8x8 );
+ vp8_copy(cm->fc.coef_probs, cc->coef_probs);
+ vp8_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8);
}
-void vp8_setup_key_frame(VP8_COMP *cpi)
-{
- // Setup for Key frame:
- vp8_default_coef_probs(& cpi->common);
- vp8_kf_default_bmode_probs(cpi->common.kf_bmode_prob);
- vp8_init_mbmode_probs(& cpi->common);
+void vp8_setup_key_frame(VP8_COMP *cpi) {
+ // Setup for Key frame:
+ vp8_default_coef_probs(& cpi->common);
+ vp8_kf_default_bmode_probs(cpi->common.kf_bmode_prob);
+ vp8_init_mbmode_probs(& cpi->common);
- vpx_memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
- {
- int flag[2] = {1, 1};
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
- }
+ vpx_memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+ {
+ int flag[2] = {1, 1};
+ vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
+ }
#if CONFIG_HIGH_PRECISION_MV
- vpx_memcpy(cpi->common.fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
- {
- int flag[2] = {1, 1};
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
- }
+ vpx_memcpy(cpi->common.fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
+ {
+ int flag[2] = {1, 1};
+ vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
+ }
#endif
- cpi->common.txfm_mode = ALLOW_8X8;
+ cpi->common.txfm_mode = ALLOW_8X8;
#if CONFIG_LOSSLESS
- if(cpi->oxcf.lossless)
- cpi->common.txfm_mode = ONLY_4X4;
+ if (cpi->oxcf.lossless)
+ cpi->common.txfm_mode = ONLY_4X4;
#endif
- //cpi->common.filter_level = 0; // Reset every key frame.
- cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
+ // cpi->common.filter_level = 0; // Reset every key frame.
+ cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
- // interval before next GF
- cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ // interval before next GF
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
- cpi->common.refresh_golden_frame = TRUE;
- cpi->common.refresh_alt_ref_frame = TRUE;
+ cpi->common.refresh_golden_frame = TRUE;
+ cpi->common.refresh_alt_ref_frame = TRUE;
- vp8_init_mode_contexts(&cpi->common);
- vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
- vpx_memcpy(&cpi->common.lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
+ vp8_init_mode_contexts(&cpi->common);
+ vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
+ vpx_memcpy(&cpi->common.lfc_a, &cpi->common.fc, sizeof(cpi->common.fc));
- /*
- vpx_memcpy( cpi->common.fc.vp8_mode_contexts,
- cpi->common.fc.mode_context,
- sizeof(cpi->common.fc.mode_context));
- */
- vpx_memcpy( cpi->common.fc.vp8_mode_contexts,
- default_vp8_mode_contexts,
- sizeof(default_vp8_mode_contexts));
+ /*
+ vpx_memcpy( cpi->common.fc.vp8_mode_contexts,
+ cpi->common.fc.mode_context,
+ sizeof(cpi->common.fc.mode_context));
+ */
+ vpx_memcpy(cpi->common.fc.vp8_mode_contexts,
+ default_vp8_mode_contexts,
+ sizeof(default_vp8_mode_contexts));
}
-void vp8_setup_inter_frame(VP8_COMP *cpi)
-{
+void vp8_setup_inter_frame(VP8_COMP *cpi) {
- cpi->common.txfm_mode = ALLOW_8X8;
+ cpi->common.txfm_mode = ALLOW_8X8;
#if CONFIG_LOSSLESS
- if(cpi->oxcf.lossless)
- cpi->common.txfm_mode = ONLY_4X4;
+ if (cpi->oxcf.lossless)
+ cpi->common.txfm_mode = ONLY_4X4;
#endif
- if(cpi->common.refresh_alt_ref_frame)
- {
- vpx_memcpy( &cpi->common.fc,
- &cpi->common.lfc_a,
- sizeof(cpi->common.fc));
- vpx_memcpy( cpi->common.fc.vp8_mode_contexts,
- cpi->common.fc.mode_context_a,
- sizeof(cpi->common.fc.vp8_mode_contexts));
- }
- else
- {
- vpx_memcpy( &cpi->common.fc,
- &cpi->common.lfc,
- sizeof(cpi->common.fc));
- vpx_memcpy( cpi->common.fc.vp8_mode_contexts,
- cpi->common.fc.mode_context,
- sizeof(cpi->common.fc.vp8_mode_contexts));
- }
+ if (cpi->common.refresh_alt_ref_frame) {
+ vpx_memcpy(&cpi->common.fc,
+ &cpi->common.lfc_a,
+ sizeof(cpi->common.fc));
+ vpx_memcpy(cpi->common.fc.vp8_mode_contexts,
+ cpi->common.fc.mode_context_a,
+ sizeof(cpi->common.fc.vp8_mode_contexts));
+ } else {
+ vpx_memcpy(&cpi->common.fc,
+ &cpi->common.lfc,
+ sizeof(cpi->common.fc));
+ vpx_memcpy(cpi->common.fc.vp8_mode_contexts,
+ cpi->common.fc.mode_context,
+ sizeof(cpi->common.fc.vp8_mode_contexts));
+ }
}
static int estimate_bits_at_q(int frame_kind, int Q, int MBs,
- double correction_factor)
-{
- int Bpm = (int)(.5 + correction_factor * vp8_bits_per_mb(frame_kind, Q));
-
- /* Attempt to retain reasonable accuracy without overflow. The cutoff is
- * chosen such that the maximum product of Bpm and MBs fits 31 bits. The
- * largest Bpm takes 20 bits.
- */
- if (MBs > (1 << 11))
- return (Bpm >> BPER_MB_NORMBITS) * MBs;
- else
- return (Bpm * MBs) >> BPER_MB_NORMBITS;
+ double correction_factor) {
+ int Bpm = (int)(.5 + correction_factor * vp8_bits_per_mb(frame_kind, Q));
+
+ /* Attempt to retain reasonable accuracy without overflow. The cutoff is
+ * chosen such that the maximum product of Bpm and MBs fits 31 bits. The
+ * largest Bpm takes 20 bits.
+ */
+ if (MBs > (1 << 11))
+ return (Bpm >> BPER_MB_NORMBITS) * MBs;
+ else
+ return (Bpm * MBs) >> BPER_MB_NORMBITS;
}
-static void calc_iframe_target_size(VP8_COMP *cpi)
-{
- // boost defaults to half second
- int target;
+static void calc_iframe_target_size(VP8_COMP *cpi) {
+ // boost defaults to half second
+ int target;
- // Clear down mmx registers to allow floating point in what follows
- vp8_clear_system_state(); //__asm emms;
+ // Clear down mmx registers to allow floating point in what follows
+ vp8_clear_system_state(); // __asm emms;
- // New Two pass RC
- target = cpi->per_frame_bandwidth;
+ // New Two pass RC
+ target = cpi->per_frame_bandwidth;
- if (cpi->oxcf.rc_max_intra_bitrate_pct)
- {
- unsigned int max_rate = cpi->per_frame_bandwidth
- * cpi->oxcf.rc_max_intra_bitrate_pct / 100;
+ if (cpi->oxcf.rc_max_intra_bitrate_pct) {
+ unsigned int max_rate = cpi->per_frame_bandwidth
+ * cpi->oxcf.rc_max_intra_bitrate_pct / 100;
- if (target > max_rate)
- target = max_rate;
- }
+ if (target > max_rate)
+ target = max_rate;
+ }
- cpi->this_frame_target = target;
+ cpi->this_frame_target = target;
}
@@ -368,417 +350,370 @@ static void calc_iframe_target_size(VP8_COMP *cpi)
//
// In this experimental code only two pass is supported
// so we just use the interval determined in the two pass code.
-static void calc_gf_params(VP8_COMP *cpi)
-{
- // Set the gf interval
- cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+static void calc_gf_params(VP8_COMP *cpi) {
+ // Set the gf interval
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
}
-static void calc_pframe_target_size(VP8_COMP *cpi)
-{
- int min_frame_target;
+static void calc_pframe_target_size(VP8_COMP *cpi) {
+ int min_frame_target;
- min_frame_target = 0;
+ min_frame_target = 0;
- min_frame_target = cpi->min_frame_bandwidth;
+ min_frame_target = cpi->min_frame_bandwidth;
- if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5))
- min_frame_target = cpi->av_per_frame_bandwidth >> 5;
+ if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5))
+ min_frame_target = cpi->av_per_frame_bandwidth >> 5;
- // Special alt reference frame case
- if (cpi->common.refresh_alt_ref_frame)
- {
- // Per frame bit target for the alt ref frame
- cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
- cpi->this_frame_target = cpi->per_frame_bandwidth;
- }
+ // Special alt reference frame case
+ if (cpi->common.refresh_alt_ref_frame) {
+ // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
- // Normal frames (gf,and inter)
- else
- {
- cpi->this_frame_target = cpi->per_frame_bandwidth;
- }
+ // Normal frames (gf,and inter)
+ else {
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
- // Sanity check that the total sum of adjustments is not above the maximum allowed
- // That is that having allowed for KF and GF penalties we have not pushed the
- // current interframe target to low. If the adjustment we apply here is not capable of recovering
- // all the extra bits we have spent in the KF or GF then the remainder will have to be recovered over
- // a longer time span via other buffer / rate control mechanisms.
- if (cpi->this_frame_target < min_frame_target)
- cpi->this_frame_target = min_frame_target;
-
- if (!cpi->common.refresh_alt_ref_frame)
- // Note the baseline target data rate for this inter frame.
- cpi->inter_frame_target = cpi->this_frame_target;
-
- // Adjust target frame size for Golden Frames:
- if ( cpi->frames_till_gf_update_due == 0 )
- {
- //int Boost = 0;
- int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
-
- cpi->common.refresh_golden_frame = TRUE;
-
- calc_gf_params(cpi);
-
- // If we are using alternate ref instead of gf then do not apply the boost
- // It will instead be applied to the altref update
- // Jims modified boost
- if (!cpi->source_alt_ref_active)
- {
- if (cpi->oxcf.fixed_q < 0)
- {
- // The spend on the GF is defined in the two pass code
- // for two pass encodes
- cpi->this_frame_target = cpi->per_frame_bandwidth;
- }
- else
- cpi->this_frame_target =
- (estimate_bits_at_q(1, Q, cpi->common.MBs, 1.0)
- * cpi->last_boost) / 100;
+ // Sanity check that the total sum of adjustments is not above the maximum allowed
+ // That is that having allowed for KF and GF penalties we have not pushed the
+ // current interframe target to low. If the adjustment we apply here is not capable of recovering
+ // all the extra bits we have spent in the KF or GF then the remainder will have to be recovered over
+ // a longer time span via other buffer / rate control mechanisms.
+ if (cpi->this_frame_target < min_frame_target)
+ cpi->this_frame_target = min_frame_target;
- }
- // If there is an active ARF at this location use the minimum
- // bits on this frame even if it is a contructed arf.
- // The active maximum quantizer insures that an appropriate
- // number of bits will be spent if needed for contstructed ARFs.
- else
- {
- cpi->this_frame_target = 0;
- }
+ if (!cpi->common.refresh_alt_ref_frame)
+ // Note the baseline target data rate for this inter frame.
+ cpi->inter_frame_target = cpi->this_frame_target;
+
+ // Adjust target frame size for Golden Frames:
+ if (cpi->frames_till_gf_update_due == 0) {
+ // int Boost = 0;
+ int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q;
+
+ cpi->common.refresh_golden_frame = TRUE;
- cpi->current_gf_interval = cpi->frames_till_gf_update_due;
+ calc_gf_params(cpi);
+
+ // If we are using alternate ref instead of gf then do not apply the boost
+ // It will instead be applied to the altref update
+ // Jims modified boost
+ if (!cpi->source_alt_ref_active) {
+ if (cpi->oxcf.fixed_q < 0) {
+ // The spend on the GF is defined in the two pass code
+ // for two pass encodes
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ } else
+ cpi->this_frame_target =
+ (estimate_bits_at_q(1, Q, cpi->common.MBs, 1.0)
+ * cpi->last_boost) / 100;
+
+ }
+ // If there is an active ARF at this location use the minimum
+ // bits on this frame even if it is a contructed arf.
+ // The active maximum quantizer insures that an appropriate
+ // number of bits will be spent if needed for contstructed ARFs.
+ else {
+ cpi->this_frame_target = 0;
}
+
+ cpi->current_gf_interval = cpi->frames_till_gf_update_due;
+ }
}
-void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
-{
- int Q = cpi->common.base_qindex;
- int correction_factor = 100;
- double rate_correction_factor;
- double adjustment_limit;
+void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
+ int Q = cpi->common.base_qindex;
+ int correction_factor = 100;
+ double rate_correction_factor;
+ double adjustment_limit;
- int projected_size_based_on_q = 0;
+ int projected_size_based_on_q = 0;
- // Clear down mmx registers to allow floating point in what follows
- vp8_clear_system_state(); //__asm emms;
+ // Clear down mmx registers to allow floating point in what follows
+ vp8_clear_system_state(); // __asm emms;
- if (cpi->common.frame_type == KEY_FRAME)
- {
- rate_correction_factor = cpi->key_frame_rate_correction_factor;
- }
+ if (cpi->common.frame_type == KEY_FRAME) {
+ rate_correction_factor = cpi->key_frame_rate_correction_factor;
+ } else {
+ if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ rate_correction_factor = cpi->gf_rate_correction_factor;
else
- {
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
- rate_correction_factor = cpi->gf_rate_correction_factor;
- else
- rate_correction_factor = cpi->rate_correction_factor;
- }
-
- // Work out how big we would have expected the frame to be at this Q given the current correction factor.
- // Stay in double to avoid int overflow when values are large
- projected_size_based_on_q =
- (int)(((.5 + rate_correction_factor *
- vp8_bits_per_mb(cpi->common.frame_type, Q)) *
- cpi->common.MBs) / (1 << BPER_MB_NORMBITS));
-
- // Make some allowance for cpi->zbin_over_quant
- if (cpi->zbin_over_quant > 0)
- {
- int Z = cpi->zbin_over_quant;
- double Factor = 0.99;
- double factor_adjustment = 0.01 / 256.0; //(double)ZBIN_OQ_MAX;
-
- while (Z > 0)
- {
- Z --;
- projected_size_based_on_q =
- (int)(Factor * projected_size_based_on_q);
- Factor += factor_adjustment;
-
- if (Factor >= 0.999)
- Factor = 0.999;
- }
+ rate_correction_factor = cpi->rate_correction_factor;
+ }
+
+ // Work out how big we would have expected the frame to be at this Q given the current correction factor.
+ // Stay in double to avoid int overflow when values are large
+ projected_size_based_on_q =
+ (int)(((.5 + rate_correction_factor *
+ vp8_bits_per_mb(cpi->common.frame_type, Q)) *
+ cpi->common.MBs) / (1 << BPER_MB_NORMBITS));
+
+ // Make some allowance for cpi->zbin_over_quant
+ if (cpi->zbin_over_quant > 0) {
+ int Z = cpi->zbin_over_quant;
+ double Factor = 0.99;
+ double factor_adjustment = 0.01 / 256.0; // (double)ZBIN_OQ_MAX;
+
+ while (Z > 0) {
+ Z--;
+ projected_size_based_on_q =
+ (int)(Factor * projected_size_based_on_q);
+ Factor += factor_adjustment;
+
+ if (Factor >= 0.999)
+ Factor = 0.999;
}
+ }
- // Work out a size correction factor.
- //if ( cpi->this_frame_target > 0 )
- // correction_factor = (100 * cpi->projected_frame_size) / cpi->this_frame_target;
- if (projected_size_based_on_q > 0)
- correction_factor = (100 * cpi->projected_frame_size) / projected_size_based_on_q;
+ // Work out a size correction factor.
+ // if ( cpi->this_frame_target > 0 )
+ // correction_factor = (100 * cpi->projected_frame_size) / cpi->this_frame_target;
+ if (projected_size_based_on_q > 0)
+ correction_factor = (100 * cpi->projected_frame_size) / projected_size_based_on_q;
- // More heavily damped adjustment used if we have been oscillating either side of target
- switch (damp_var)
- {
+ // More heavily damped adjustment used if we have been oscillating either side of target
+ switch (damp_var) {
case 0:
- adjustment_limit = 0.75;
- break;
+ adjustment_limit = 0.75;
+ break;
case 1:
- adjustment_limit = 0.375;
- break;
+ adjustment_limit = 0.375;
+ break;
case 2:
default:
- adjustment_limit = 0.25;
- break;
- }
-
- //if ( (correction_factor > 102) && (Q < cpi->active_worst_quality) )
- if (correction_factor > 102)
- {
- // We are not already at the worst allowable quality
- correction_factor = (int)(100.5 + ((correction_factor - 100) * adjustment_limit));
- rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
-
- // Keep rate_correction_factor within limits
- if (rate_correction_factor > MAX_BPB_FACTOR)
- rate_correction_factor = MAX_BPB_FACTOR;
- }
- //else if ( (correction_factor < 99) && (Q > cpi->active_best_quality) )
- else if (correction_factor < 99)
- {
- // We are not already at the best allowable quality
- correction_factor = (int)(100.5 - ((100 - correction_factor) * adjustment_limit));
- rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
-
- // Keep rate_correction_factor within limits
- if (rate_correction_factor < MIN_BPB_FACTOR)
- rate_correction_factor = MIN_BPB_FACTOR;
- }
-
- if (cpi->common.frame_type == KEY_FRAME)
- cpi->key_frame_rate_correction_factor = rate_correction_factor;
+ adjustment_limit = 0.25;
+ break;
+ }
+
+ // if ( (correction_factor > 102) && (Q < cpi->active_worst_quality) )
+ if (correction_factor > 102) {
+ // We are not already at the worst allowable quality
+ correction_factor = (int)(100.5 + ((correction_factor - 100) * adjustment_limit));
+ rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
+
+ // Keep rate_correction_factor within limits
+ if (rate_correction_factor > MAX_BPB_FACTOR)
+ rate_correction_factor = MAX_BPB_FACTOR;
+ }
+ // else if ( (correction_factor < 99) && (Q > cpi->active_best_quality) )
+ else if (correction_factor < 99) {
+ // We are not already at the best allowable quality
+ correction_factor = (int)(100.5 - ((100 - correction_factor) * adjustment_limit));
+ rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
+
+ // Keep rate_correction_factor within limits
+ if (rate_correction_factor < MIN_BPB_FACTOR)
+ rate_correction_factor = MIN_BPB_FACTOR;
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME)
+ cpi->key_frame_rate_correction_factor = rate_correction_factor;
+ else {
+ if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ cpi->gf_rate_correction_factor = rate_correction_factor;
else
- {
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
- cpi->gf_rate_correction_factor = rate_correction_factor;
- else
- cpi->rate_correction_factor = rate_correction_factor;
- }
+ cpi->rate_correction_factor = rate_correction_factor;
+ }
}
-int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
-{
- int Q = cpi->active_worst_quality;
+int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
+ int Q = cpi->active_worst_quality;
- int i;
- int last_error = INT_MAX;
- int target_bits_per_mb;
- int bits_per_mb_at_this_q;
- double correction_factor;
+ int i;
+ int last_error = INT_MAX;
+ int target_bits_per_mb;
+ int bits_per_mb_at_this_q;
+ double correction_factor;
- // Reset Zbin OQ value
- cpi->zbin_over_quant = 0;
+ // Reset Zbin OQ value
+ cpi->zbin_over_quant = 0;
- // Select the appropriate correction factor based upon type of frame.
- if (cpi->common.frame_type == KEY_FRAME)
- correction_factor = cpi->key_frame_rate_correction_factor;
+ // Select the appropriate correction factor based upon type of frame.
+ if (cpi->common.frame_type == KEY_FRAME)
+ correction_factor = cpi->key_frame_rate_correction_factor;
+ else {
+ if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ correction_factor = cpi->gf_rate_correction_factor;
else
- {
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
- correction_factor = cpi->gf_rate_correction_factor;
- else
- correction_factor = cpi->rate_correction_factor;
- }
+ correction_factor = cpi->rate_correction_factor;
+ }
- // Calculate required scaling factor based on target frame size and size of frame produced using previous Q
- if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS))
- target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS; // Case where we would overflow int
- else
- target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
+ // Calculate required scaling factor based on target frame size and size of frame produced using previous Q
+ if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS))
+ target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS; // Case where we would overflow int
+ else
+ target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
- i = cpi->active_best_quality;
+ i = cpi->active_best_quality;
- do
- {
- bits_per_mb_at_this_q =
- (int)(.5 + correction_factor *
- vp8_bits_per_mb(cpi->common.frame_type, i ));
+ do {
+ bits_per_mb_at_this_q =
+ (int)(.5 + correction_factor *
+ vp8_bits_per_mb(cpi->common.frame_type, i));
- if (bits_per_mb_at_this_q <= target_bits_per_mb)
- {
- if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
- Q = i;
- else
- Q = i - 1;
+ if (bits_per_mb_at_this_q <= target_bits_per_mb) {
+ if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
+ Q = i;
+ else
+ Q = i - 1;
- break;
- }
- else
- last_error = bits_per_mb_at_this_q - target_bits_per_mb;
- }
- while (++i <= cpi->active_worst_quality);
+ break;
+ } else
+ last_error = bits_per_mb_at_this_q - target_bits_per_mb;
+ } while (++i <= cpi->active_worst_quality);
- // If we are at MAXQ then enable Q over-run which seeks to claw back additional bits through things like
- // the RD multiplier and zero bin size.
- if (Q >= MAXQ)
- {
- int zbin_oqmax;
+ // If we are at MAXQ then enable Q over-run which seeks to claw back additional bits through things like
+ // the RD multiplier and zero bin size.
+ if (Q >= MAXQ) {
+ int zbin_oqmax;
- double Factor = 0.99;
- double factor_adjustment = 0.01 / 256.0; //(double)ZBIN_OQ_MAX;
+ double Factor = 0.99;
+ double factor_adjustment = 0.01 / 256.0; // (double)ZBIN_OQ_MAX;
- if (cpi->common.frame_type == KEY_FRAME)
- zbin_oqmax = 0; //ZBIN_OQ_MAX/16
- else if (cpi->common.refresh_alt_ref_frame || (cpi->common.refresh_golden_frame && !cpi->source_alt_ref_active))
- zbin_oqmax = 16;
- else
- zbin_oqmax = ZBIN_OQ_MAX;
+ if (cpi->common.frame_type == KEY_FRAME)
+ zbin_oqmax = 0; // ZBIN_OQ_MAX/16
+ else if (cpi->common.refresh_alt_ref_frame || (cpi->common.refresh_golden_frame && !cpi->source_alt_ref_active))
+ zbin_oqmax = 16;
+ else
+ zbin_oqmax = ZBIN_OQ_MAX;
- // Each incrment in the zbin is assumed to have a fixed effect on bitrate. This is not of course true.
- // The effect will be highly clip dependent and may well have sudden steps.
- // The idea here is to acheive higher effective quantizers than the normal maximum by expanding the zero
- // bin and hence decreasing the number of low magnitude non zero coefficients.
- while (cpi->zbin_over_quant < zbin_oqmax)
- {
- cpi->zbin_over_quant ++;
+ // Each incrment in the zbin is assumed to have a fixed effect on bitrate. This is not of course true.
+ // The effect will be highly clip dependent and may well have sudden steps.
+ // The idea here is to acheive higher effective quantizers than the normal maximum by expanding the zero
+ // bin and hence decreasing the number of low magnitude non zero coefficients.
+ while (cpi->zbin_over_quant < zbin_oqmax) {
+ cpi->zbin_over_quant++;
- if (cpi->zbin_over_quant > zbin_oqmax)
- cpi->zbin_over_quant = zbin_oqmax;
+ if (cpi->zbin_over_quant > zbin_oqmax)
+ cpi->zbin_over_quant = zbin_oqmax;
- // Adjust bits_per_mb_at_this_q estimate
- bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
- Factor += factor_adjustment;
+ // Adjust bits_per_mb_at_this_q estimate
+ bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
+ Factor += factor_adjustment;
- if (Factor >= 0.999)
- Factor = 0.999;
-
- if (bits_per_mb_at_this_q <= target_bits_per_mb) // Break out if we get down to the target rate
- break;
- }
+ if (Factor >= 0.999)
+ Factor = 0.999;
+ if (bits_per_mb_at_this_q <= target_bits_per_mb) // Break out if we get down to the target rate
+ break;
}
- return Q;
+ }
+
+ return Q;
}
-static int estimate_keyframe_frequency(VP8_COMP *cpi)
-{
- int i;
+static int estimate_keyframe_frequency(VP8_COMP *cpi) {
+ int i;
- // Average key frame frequency
- int av_key_frame_frequency = 0;
+ // Average key frame frequency
+ int av_key_frame_frequency = 0;
- /* First key frame at start of sequence is a special case. We have no
- * frequency data.
+ /* First key frame at start of sequence is a special case. We have no
+ * frequency data.
+ */
+ if (cpi->key_frame_count == 1) {
+ /* Assume a default of 1 kf every 2 seconds, or the max kf interval,
+ * whichever is smaller.
*/
- if (cpi->key_frame_count == 1)
- {
- /* Assume a default of 1 kf every 2 seconds, or the max kf interval,
- * whichever is smaller.
- */
- int key_freq = cpi->oxcf.key_freq>0 ? cpi->oxcf.key_freq : 1;
- av_key_frame_frequency = (int)cpi->output_frame_rate * 2;
-
- if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
- av_key_frame_frequency = cpi->oxcf.key_freq;
-
- cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
- = av_key_frame_frequency;
- }
- else
- {
- unsigned int total_weight = 0;
- int last_kf_interval =
- (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1;
-
- /* reset keyframe context and calculate weighted average of last
- * KEY_FRAME_CONTEXT keyframes
- */
- for (i = 0; i < KEY_FRAME_CONTEXT; i++)
- {
- if (i < KEY_FRAME_CONTEXT - 1)
- cpi->prior_key_frame_distance[i]
- = cpi->prior_key_frame_distance[i+1];
- else
- cpi->prior_key_frame_distance[i] = last_kf_interval;
-
- av_key_frame_frequency += prior_key_frame_weight[i]
- * cpi->prior_key_frame_distance[i];
- total_weight += prior_key_frame_weight[i];
- }
+ int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1;
+ av_key_frame_frequency = (int)cpi->output_frame_rate * 2;
- av_key_frame_frequency /= total_weight;
+ if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
+ av_key_frame_frequency = cpi->oxcf.key_freq;
+ cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
+ = av_key_frame_frequency;
+ } else {
+ unsigned int total_weight = 0;
+ int last_kf_interval =
+ (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1;
+
+ /* reset keyframe context and calculate weighted average of last
+ * KEY_FRAME_CONTEXT keyframes
+ */
+ for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
+ if (i < KEY_FRAME_CONTEXT - 1)
+ cpi->prior_key_frame_distance[i]
+ = cpi->prior_key_frame_distance[i + 1];
+ else
+ cpi->prior_key_frame_distance[i] = last_kf_interval;
+
+ av_key_frame_frequency += prior_key_frame_weight[i]
+ * cpi->prior_key_frame_distance[i];
+ total_weight += prior_key_frame_weight[i];
}
- return av_key_frame_frequency;
+
+ av_key_frame_frequency /= total_weight;
+
+ }
+ return av_key_frame_frequency;
}
-void vp8_adjust_key_frame_context(VP8_COMP *cpi)
-{
- // Clear down mmx registers to allow floating point in what follows
- vp8_clear_system_state();
+void vp8_adjust_key_frame_context(VP8_COMP *cpi) {
+ // Clear down mmx registers to allow floating point in what follows
+ vp8_clear_system_state();
- cpi->frames_since_key = 0;
- cpi->key_frame_count++;
+ cpi->frames_since_key = 0;
+ cpi->key_frame_count++;
}
-void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit)
-{
- // Set-up bounds on acceptable frame size:
- if (cpi->oxcf.fixed_q >= 0)
- {
- // Fixed Q scenario: frame size never outranges target (there is no target!)
- *frame_under_shoot_limit = 0;
- *frame_over_shoot_limit = INT_MAX;
- }
- else
- {
- if (cpi->common.frame_type == KEY_FRAME)
- {
- *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
- *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit) {
+ // Set-up bounds on acceptable frame size:
+ if (cpi->oxcf.fixed_q >= 0) {
+ // Fixed Q scenario: frame size never outranges target (there is no target!)
+ *frame_under_shoot_limit = 0;
+ *frame_over_shoot_limit = INT_MAX;
+ } else {
+ if (cpi->common.frame_type == KEY_FRAME) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+ } else {
+ if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+ } else {
+ // Stron overshoot limit for constrained quality
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8;
+ } else {
+ *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8;
}
- else
- {
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
- {
- *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
- *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
- }
- else
- {
- // Stron overshoot limit for constrained quality
- if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
- {
- *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
- *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8;
- }
- else
- {
- *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
- *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8;
- }
- }
- }
-
- // For very small rate targets where the fractional adjustment
- // (eg * 7/8) may be tiny make sure there is at least a minimum
- // range.
- *frame_over_shoot_limit += 200;
- *frame_under_shoot_limit -= 200;
- if ( *frame_under_shoot_limit < 0 )
- *frame_under_shoot_limit = 0;
+ }
}
+
+ // For very small rate targets where the fractional adjustment
+ // (eg * 7/8) may be tiny make sure there is at least a minimum
+ // range.
+ *frame_over_shoot_limit += 200;
+ *frame_under_shoot_limit -= 200;
+ if (*frame_under_shoot_limit < 0)
+ *frame_under_shoot_limit = 0;
+ }
}
// return of 0 means drop frame
-int vp8_pick_frame_size(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
+int vp8_pick_frame_size(VP8_COMP *cpi) {
+ VP8_COMMON *cm = &cpi->common;
- if (cm->frame_type == KEY_FRAME)
- calc_iframe_target_size(cpi);
- else
- calc_pframe_target_size(cpi);
+ if (cm->frame_type == KEY_FRAME)
+ calc_iframe_target_size(cpi);
+ else
+ calc_pframe_target_size(cpi);
- return 1;
+ return 1;
}
diff --git a/vp8/encoder/ratectrl.h b/vp8/encoder/ratectrl.h
index 76eff47a0..ac1a76f4c 100644
--- a/vp8/encoder/ratectrl.h
+++ b/vp8/encoder/ratectrl.h
@@ -27,9 +27,9 @@ extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_
// return of 0 means drop frame
extern int vp8_pick_frame_size(VP8_COMP *cpi);
-extern double vp8_convert_qindex_to_q( int qindex );
-extern int vp8_gfboost_qadjust( int qindex );
-extern int vp8_bits_per_mb( FRAME_TYPE frame_type, int qindex );
+extern double vp8_convert_qindex_to_q(int qindex);
+extern int vp8_gfboost_qadjust(int qindex);
+extern int vp8_bits_per_mb(FRAME_TYPE frame_type, int qindex);
void vp8_setup_inter_frame(VP8_COMP *cpi);
#endif
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 47f13cee7..00732fdc7 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -60,187 +60,182 @@ extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
#define INVALID_MV 0x80008000
-static const int auto_speed_thresh[17] =
-{
- 1000,
- 200,
- 150,
- 130,
- 150,
- 125,
- 120,
- 115,
- 115,
- 115,
- 115,
- 115,
- 115,
- 115,
- 115,
- 115,
- 105
+static const int auto_speed_thresh[17] = {
+ 1000,
+ 200,
+ 150,
+ 130,
+ 150,
+ 125,
+ 120,
+ 115,
+ 115,
+ 115,
+ 115,
+ 115,
+ 115,
+ 115,
+ 115,
+ 115,
+ 105
};
#if CONFIG_PRED_FILTER
-const MODE_DEFINITION vp8_mode_order[MAX_MODES] =
-{
- {ZEROMV, LAST_FRAME, 0, 0},
- {ZEROMV, LAST_FRAME, 0, 1},
- {DC_PRED, INTRA_FRAME, 0, 0},
-
- {NEARESTMV, LAST_FRAME, 0, 0},
- {NEARESTMV, LAST_FRAME, 0, 1},
- {NEARMV, LAST_FRAME, 0, 0},
- {NEARMV, LAST_FRAME, 0, 1},
-
- {ZEROMV, GOLDEN_FRAME, 0, 0},
- {ZEROMV, GOLDEN_FRAME, 0, 1},
- {NEARESTMV, GOLDEN_FRAME, 0, 0},
- {NEARESTMV, GOLDEN_FRAME, 0, 1},
-
- {ZEROMV, ALTREF_FRAME, 0, 0},
- {ZEROMV, ALTREF_FRAME, 0, 1},
- {NEARESTMV, ALTREF_FRAME, 0, 0},
- {NEARESTMV, ALTREF_FRAME, 0, 1},
-
- {NEARMV, GOLDEN_FRAME, 0, 0},
- {NEARMV, GOLDEN_FRAME, 0, 1},
- {NEARMV, ALTREF_FRAME, 0, 0},
- {NEARMV, ALTREF_FRAME, 0, 1},
-
- {V_PRED, INTRA_FRAME, 0, 0},
- {H_PRED, INTRA_FRAME, 0, 0},
+const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
+ {ZEROMV, LAST_FRAME, 0, 0},
+ {ZEROMV, LAST_FRAME, 0, 1},
+ {DC_PRED, INTRA_FRAME, 0, 0},
+
+ {NEARESTMV, LAST_FRAME, 0, 0},
+ {NEARESTMV, LAST_FRAME, 0, 1},
+ {NEARMV, LAST_FRAME, 0, 0},
+ {NEARMV, LAST_FRAME, 0, 1},
+
+ {ZEROMV, GOLDEN_FRAME, 0, 0},
+ {ZEROMV, GOLDEN_FRAME, 0, 1},
+ {NEARESTMV, GOLDEN_FRAME, 0, 0},
+ {NEARESTMV, GOLDEN_FRAME, 0, 1},
+
+ {ZEROMV, ALTREF_FRAME, 0, 0},
+ {ZEROMV, ALTREF_FRAME, 0, 1},
+ {NEARESTMV, ALTREF_FRAME, 0, 0},
+ {NEARESTMV, ALTREF_FRAME, 0, 1},
+
+ {NEARMV, GOLDEN_FRAME, 0, 0},
+ {NEARMV, GOLDEN_FRAME, 0, 1},
+ {NEARMV, ALTREF_FRAME, 0, 0},
+ {NEARMV, ALTREF_FRAME, 0, 1},
+
+ {V_PRED, INTRA_FRAME, 0, 0},
+ {H_PRED, INTRA_FRAME, 0, 0},
#if CONFIG_NEWINTRAMODES
- {D45_PRED, INTRA_FRAME, 0, 0},
- {D135_PRED, INTRA_FRAME, 0, 0},
- {D117_PRED, INTRA_FRAME, 0, 0},
- {D153_PRED, INTRA_FRAME, 0, 0},
- {D27_PRED, INTRA_FRAME, 0, 0},
- {D63_PRED, INTRA_FRAME, 0, 0},
-#endif
-
- {TM_PRED, INTRA_FRAME, 0, 0},
-
- {NEWMV, LAST_FRAME, 0, 0},
- {NEWMV, LAST_FRAME, 0, 1},
- {NEWMV, GOLDEN_FRAME, 0, 0},
- {NEWMV, GOLDEN_FRAME, 0, 1},
- {NEWMV, ALTREF_FRAME, 0, 0},
- {NEWMV, ALTREF_FRAME, 0, 1},
-
- {SPLITMV, LAST_FRAME, 0, 0},
- {SPLITMV, GOLDEN_FRAME, 0, 0},
- {SPLITMV, ALTREF_FRAME, 0, 0},
-
- {B_PRED, INTRA_FRAME, 0, 0},
- {I8X8_PRED, INTRA_FRAME, 0, 0},
-
- /* compound prediction modes */
- {ZEROMV, LAST_FRAME, GOLDEN_FRAME, 0},
- {NEARESTMV, LAST_FRAME, GOLDEN_FRAME, 0},
- {NEARMV, LAST_FRAME, GOLDEN_FRAME, 0},
-
- {ZEROMV, ALTREF_FRAME, LAST_FRAME, 0},
- {NEARESTMV, ALTREF_FRAME, LAST_FRAME, 0},
- {NEARMV, ALTREF_FRAME, LAST_FRAME, 0},
-
- {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
- {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
- {NEARMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
-
- {NEWMV, LAST_FRAME, GOLDEN_FRAME, 0},
- {NEWMV, ALTREF_FRAME, LAST_FRAME, 0},
- {NEWMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
-
- {SPLITMV, LAST_FRAME, GOLDEN_FRAME, 0},
- {SPLITMV, ALTREF_FRAME, LAST_FRAME, 0},
- {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME, 0}
+ {D45_PRED, INTRA_FRAME, 0, 0},
+ {D135_PRED, INTRA_FRAME, 0, 0},
+ {D117_PRED, INTRA_FRAME, 0, 0},
+ {D153_PRED, INTRA_FRAME, 0, 0},
+ {D27_PRED, INTRA_FRAME, 0, 0},
+ {D63_PRED, INTRA_FRAME, 0, 0},
+#endif
+
+ {TM_PRED, INTRA_FRAME, 0, 0},
+
+ {NEWMV, LAST_FRAME, 0, 0},
+ {NEWMV, LAST_FRAME, 0, 1},
+ {NEWMV, GOLDEN_FRAME, 0, 0},
+ {NEWMV, GOLDEN_FRAME, 0, 1},
+ {NEWMV, ALTREF_FRAME, 0, 0},
+ {NEWMV, ALTREF_FRAME, 0, 1},
+
+ {SPLITMV, LAST_FRAME, 0, 0},
+ {SPLITMV, GOLDEN_FRAME, 0, 0},
+ {SPLITMV, ALTREF_FRAME, 0, 0},
+
+ {B_PRED, INTRA_FRAME, 0, 0},
+ {I8X8_PRED, INTRA_FRAME, 0, 0},
+
+ /* compound prediction modes */
+ {ZEROMV, LAST_FRAME, GOLDEN_FRAME, 0},
+ {NEARESTMV, LAST_FRAME, GOLDEN_FRAME, 0},
+ {NEARMV, LAST_FRAME, GOLDEN_FRAME, 0},
+
+ {ZEROMV, ALTREF_FRAME, LAST_FRAME, 0},
+ {NEARESTMV, ALTREF_FRAME, LAST_FRAME, 0},
+ {NEARMV, ALTREF_FRAME, LAST_FRAME, 0},
+
+ {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
+ {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
+ {NEARMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
+
+ {NEWMV, LAST_FRAME, GOLDEN_FRAME, 0},
+ {NEWMV, ALTREF_FRAME, LAST_FRAME, 0},
+ {NEWMV, GOLDEN_FRAME, ALTREF_FRAME, 0},
+
+ {SPLITMV, LAST_FRAME, GOLDEN_FRAME, 0},
+ {SPLITMV, ALTREF_FRAME, LAST_FRAME, 0},
+ {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME, 0}
};
#else
-const MODE_DEFINITION vp8_mode_order[MAX_MODES] =
-{
- {ZEROMV, LAST_FRAME, 0},
- {DC_PRED, INTRA_FRAME, 0},
+const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
+ {ZEROMV, LAST_FRAME, 0},
+ {DC_PRED, INTRA_FRAME, 0},
- {NEARESTMV, LAST_FRAME, 0},
- {NEARMV, LAST_FRAME, 0},
+ {NEARESTMV, LAST_FRAME, 0},
+ {NEARMV, LAST_FRAME, 0},
- {ZEROMV, GOLDEN_FRAME, 0},
- {NEARESTMV, GOLDEN_FRAME, 0},
+ {ZEROMV, GOLDEN_FRAME, 0},
+ {NEARESTMV, GOLDEN_FRAME, 0},
- {ZEROMV, ALTREF_FRAME, 0},
- {NEARESTMV, ALTREF_FRAME, 0},
+ {ZEROMV, ALTREF_FRAME, 0},
+ {NEARESTMV, ALTREF_FRAME, 0},
- {NEARMV, GOLDEN_FRAME, 0},
- {NEARMV, ALTREF_FRAME, 0},
+ {NEARMV, GOLDEN_FRAME, 0},
+ {NEARMV, ALTREF_FRAME, 0},
- {V_PRED, INTRA_FRAME, 0},
- {H_PRED, INTRA_FRAME, 0},
+ {V_PRED, INTRA_FRAME, 0},
+ {H_PRED, INTRA_FRAME, 0},
#if CONFIG_NEWINTRAMODES
- {D45_PRED, INTRA_FRAME, 0},
- {D135_PRED, INTRA_FRAME, 0},
- {D117_PRED, INTRA_FRAME, 0},
- {D153_PRED, INTRA_FRAME, 0},
- {D27_PRED, INTRA_FRAME, 0},
- {D63_PRED, INTRA_FRAME, 0},
+ {D45_PRED, INTRA_FRAME, 0},
+ {D135_PRED, INTRA_FRAME, 0},
+ {D117_PRED, INTRA_FRAME, 0},
+ {D153_PRED, INTRA_FRAME, 0},
+ {D27_PRED, INTRA_FRAME, 0},
+ {D63_PRED, INTRA_FRAME, 0},
#endif
- {TM_PRED, INTRA_FRAME, 0},
+ {TM_PRED, INTRA_FRAME, 0},
- {NEWMV, LAST_FRAME, 0},
- {NEWMV, GOLDEN_FRAME, 0},
- {NEWMV, ALTREF_FRAME, 0},
+ {NEWMV, LAST_FRAME, 0},
+ {NEWMV, GOLDEN_FRAME, 0},
+ {NEWMV, ALTREF_FRAME, 0},
- {SPLITMV, LAST_FRAME, 0},
- {SPLITMV, GOLDEN_FRAME, 0},
- {SPLITMV, ALTREF_FRAME, 0},
+ {SPLITMV, LAST_FRAME, 0},
+ {SPLITMV, GOLDEN_FRAME, 0},
+ {SPLITMV, ALTREF_FRAME, 0},
- {B_PRED, INTRA_FRAME, 0},
- {I8X8_PRED, INTRA_FRAME, 0},
+ {B_PRED, INTRA_FRAME, 0},
+ {I8X8_PRED, INTRA_FRAME, 0},
- /* compound prediction modes */
- {ZEROMV, LAST_FRAME, GOLDEN_FRAME},
- {NEARESTMV, LAST_FRAME, GOLDEN_FRAME},
- {NEARMV, LAST_FRAME, GOLDEN_FRAME},
+ /* compound prediction modes */
+ {ZEROMV, LAST_FRAME, GOLDEN_FRAME},
+ {NEARESTMV, LAST_FRAME, GOLDEN_FRAME},
+ {NEARMV, LAST_FRAME, GOLDEN_FRAME},
- {ZEROMV, ALTREF_FRAME, LAST_FRAME},
- {NEARESTMV, ALTREF_FRAME, LAST_FRAME},
- {NEARMV, ALTREF_FRAME, LAST_FRAME},
+ {ZEROMV, ALTREF_FRAME, LAST_FRAME},
+ {NEARESTMV, ALTREF_FRAME, LAST_FRAME},
+ {NEARMV, ALTREF_FRAME, LAST_FRAME},
- {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME},
- {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME},
- {NEARMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {NEARMV, GOLDEN_FRAME, ALTREF_FRAME},
- {NEWMV, LAST_FRAME, GOLDEN_FRAME},
- {NEWMV, ALTREF_FRAME, LAST_FRAME },
- {NEWMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {NEWMV, LAST_FRAME, GOLDEN_FRAME},
+ {NEWMV, ALTREF_FRAME, LAST_FRAME },
+ {NEWMV, GOLDEN_FRAME, ALTREF_FRAME},
- {SPLITMV, LAST_FRAME, GOLDEN_FRAME},
- {SPLITMV, ALTREF_FRAME, LAST_FRAME },
- {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME}
+ {SPLITMV, LAST_FRAME, GOLDEN_FRAME},
+ {SPLITMV, ALTREF_FRAME, LAST_FRAME },
+ {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME}
};
#endif
static void fill_token_costs(
- unsigned int (*c)[COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS],
- const vp8_prob (*p)[COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES],
- int block_type_counts)
-{
- int i, j, k;
-
- for (i = 0; i < block_type_counts; i++)
- for (j = 0; j < COEF_BANDS; j++)
- for (k = 0; k < PREV_COEF_CONTEXTS; k++)
- {
- if(k == 0 && ((j > 0 && i > 0) || (j > 1 && i == 0)))
- vp8_cost_tokens_skip((int *)( c [i][j][k]),
- p [i][j][k],
- vp8_coef_tree);
- else
- vp8_cost_tokens((int *)(c [i][j][k]),
- p [i][j][k],
- vp8_coef_tree);
- }
+ unsigned int (*c)[COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS],
+ const vp8_prob(*p)[COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES],
+ int block_type_counts) {
+ int i, j, k;
+
+ for (i = 0; i < block_type_counts; i++)
+ for (j = 0; j < COEF_BANDS; j++)
+ for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
+ if (k == 0 && ((j > 0 && i > 0) || (j > 1 && i == 0)))
+ vp8_cost_tokens_skip((int *)(c [i][j][k]),
+ p [i][j][k],
+ vp8_coef_tree);
+ else
+ vp8_cost_tokens((int *)(c [i][j][k]),
+ p [i][j][k],
+ vp8_coef_tree);
+ }
}
@@ -256,710 +251,648 @@ static int rd_iifactor [ 32 ] = { 4, 4, 3, 2, 1, 0, 0, 0,
static int sad_per_bit16lut[QINDEX_RANGE];
static int sad_per_bit4lut[QINDEX_RANGE];
-void vp8_init_me_luts()
-{
- int i;
-
- // Initialize the sad lut tables using a formulaic calculation for now
- // This is to make it easier to resolve the impact of experimental changes
- // to the quantizer tables.
- for ( i = 0; i < QINDEX_RANGE; i++ )
- {
- sad_per_bit16lut[i] =
- (int)((0.0418*vp8_convert_qindex_to_q(i)) + 2.4107);
- sad_per_bit4lut[i] = (int)((0.063*vp8_convert_qindex_to_q(i)) + 2.742);
- }
+void vp8_init_me_luts() {
+ int i;
+
+ // Initialize the sad lut tables using a formulaic calculation for now
+ // This is to make it easier to resolve the impact of experimental changes
+ // to the quantizer tables.
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ sad_per_bit16lut[i] =
+ (int)((0.0418 * vp8_convert_qindex_to_q(i)) + 2.4107);
+ sad_per_bit4lut[i] = (int)((0.063 * vp8_convert_qindex_to_q(i)) + 2.742);
+ }
}
-int compute_rd_mult( int qindex )
-{
- int q;
+int compute_rd_mult(int qindex) {
+ int q;
- q = vp8_dc_quant(qindex,0);
- return (11 * q * q) >> 6;
+ q = vp8_dc_quant(qindex, 0);
+ return (11 * q * q) >> 6;
}
-void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex)
-{
- cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
- cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
+void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) {
+ cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
+ cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
}
-void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex)
-{
- int q;
- int i;
+void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
+ int q;
+ int i;
- vp8_clear_system_state(); //__asm emms;
+ vp8_clear_system_state(); // __asm emms;
- // Further tests required to see if optimum is different
- // for key frames, golden frames and arf frames.
- // if (cpi->common.refresh_golden_frame ||
- // cpi->common.refresh_alt_ref_frame)
- QIndex=(QIndex<0)? 0 : ((QIndex>MAXQ)?MAXQ : QIndex);
+ // Further tests required to see if optimum is different
+ // for key frames, golden frames and arf frames.
+ // if (cpi->common.refresh_golden_frame ||
+ // cpi->common.refresh_alt_ref_frame)
+ QIndex = (QIndex < 0) ? 0 : ((QIndex > MAXQ) ? MAXQ : QIndex);
- cpi->RDMULT = compute_rd_mult(QIndex);
+ cpi->RDMULT = compute_rd_mult(QIndex);
- // Extend rate multiplier along side quantizer zbin increases
- if (cpi->zbin_over_quant > 0)
- {
- double oq_factor;
+ // Extend rate multiplier along side quantizer zbin increases
+ if (cpi->zbin_over_quant > 0) {
+ double oq_factor;
- // Experimental code using the same basic equation as used for Q above
- // The units of cpi->zbin_over_quant are 1/128 of Q bin size
- oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
- cpi->RDMULT = (int)((double)cpi->RDMULT * oq_factor * oq_factor);
- }
+ // Experimental code using the same basic equation as used for Q above
+ // The units of cpi->zbin_over_quant are 1/128 of Q bin size
+ oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
+ cpi->RDMULT = (int)((double)cpi->RDMULT * oq_factor * oq_factor);
+ }
- if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME))
- {
- if (cpi->twopass.next_iiratio > 31)
- cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
- else
- cpi->RDMULT +=
- (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
- }
+ if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
+ if (cpi->twopass.next_iiratio > 31)
+ cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
+ else
+ cpi->RDMULT +=
+ (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
+ }
- if (cpi->RDMULT < 7)
- cpi->RDMULT = 7;
+ if (cpi->RDMULT < 7)
+ cpi->RDMULT = 7;
- cpi->mb.errorperbit = (cpi->RDMULT / 110);
- cpi->mb.errorperbit += (cpi->mb.errorperbit==0);
+ cpi->mb.errorperbit = (cpi->RDMULT / 110);
+ cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
- vp8_set_speed_features(cpi);
+ vp8_set_speed_features(cpi);
- q = (int)pow(vp8_dc_quant(QIndex,0)>>2, 1.25);
- q = q << 2;
- cpi->RDMULT = cpi->RDMULT << 4;
+ q = (int)pow(vp8_dc_quant(QIndex, 0) >> 2, 1.25);
+ q = q << 2;
+ cpi->RDMULT = cpi->RDMULT << 4;
- if (q < 8)
- q = 8;
+ if (q < 8)
+ q = 8;
- if (cpi->RDMULT > 1000)
- {
- cpi->RDDIV = 1;
- cpi->RDMULT /= 100;
+ if (cpi->RDMULT > 1000) {
+ cpi->RDDIV = 1;
+ cpi->RDMULT /= 100;
- for (i = 0; i < MAX_MODES; i++)
- {
- if (cpi->sf.thresh_mult[i] < INT_MAX)
- {
- cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
- }
- else
- {
- cpi->rd_threshes[i] = INT_MAX;
- }
+ for (i = 0; i < MAX_MODES; i++) {
+ if (cpi->sf.thresh_mult[i] < INT_MAX) {
+ cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
+ } else {
+ cpi->rd_threshes[i] = INT_MAX;
+ }
- cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
- }
+ cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
}
- else
- {
- cpi->RDDIV = 100;
+ } else {
+ cpi->RDDIV = 100;
- for (i = 0; i < MAX_MODES; i++)
- {
- if (cpi->sf.thresh_mult[i] < (INT_MAX / q))
- {
- cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
- }
- else
- {
- cpi->rd_threshes[i] = INT_MAX;
- }
+ for (i = 0; i < MAX_MODES; i++) {
+ if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) {
+ cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
+ } else {
+ cpi->rd_threshes[i] = INT_MAX;
+ }
- cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
- }
+ cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
}
+ }
- fill_token_costs(
- cpi->mb.token_costs,
- (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs,
- BLOCK_TYPES);
+ fill_token_costs(
+ cpi->mb.token_costs,
+ (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs,
+ BLOCK_TYPES);
- fill_token_costs(
- cpi->mb.token_costs_8x8,
- (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8,
- BLOCK_TYPES_8X8);
+ fill_token_costs(
+ cpi->mb.token_costs_8x8,
+ (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8,
+ BLOCK_TYPES_8X8);
- /*rough estimate for costing*/
- cpi->common.kf_ymode_probs_index = cpi->common.base_qindex>>4;
- vp8_init_mode_costs(cpi);
+ /*rough estimate for costing*/
+ cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
+ vp8_init_mode_costs(cpi);
}
-void vp8_auto_select_speed(VP8_COMP *cpi)
-{
- int milliseconds_for_compress = (int)(1000000 / cpi->oxcf.frame_rate);
+void vp8_auto_select_speed(VP8_COMP *cpi) {
+ int milliseconds_for_compress = (int)(1000000 / cpi->oxcf.frame_rate);
- milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
+ milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
#if 0
- if (0)
- {
- FILE *f;
+ if (0) {
+ FILE *f;
- f = fopen("speed.stt", "a");
- fprintf(f, " %8ld %10ld %10ld %10ld\n",
- cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
- fclose(f);
- }
+ f = fopen("speed.stt", "a");
+ fprintf(f, " %8ld %10ld %10ld %10ld\n",
+ cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
+ fclose(f);
+ }
#endif
- /*
- // this is done during parameter valid check
- if( cpi->oxcf.cpu_used > 16)
- cpi->oxcf.cpu_used = 16;
- if( cpi->oxcf.cpu_used < -16)
- cpi->oxcf.cpu_used = -16;
- */
+ /*
+ // this is done during parameter valid check
+ if( cpi->oxcf.cpu_used > 16)
+ cpi->oxcf.cpu_used = 16;
+ if( cpi->oxcf.cpu_used < -16)
+ cpi->oxcf.cpu_used = -16;
+ */
- if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress)
- {
- if (cpi->avg_pick_mode_time == 0)
- {
- cpi->Speed = 4;
+ if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress) {
+ if (cpi->avg_pick_mode_time == 0) {
+ cpi->Speed = 4;
+ } else {
+ if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95) {
+ cpi->Speed += 2;
+ cpi->avg_pick_mode_time = 0;
+ cpi->avg_encode_time = 0;
+
+ if (cpi->Speed > 16) {
+ cpi->Speed = 16;
}
- else
- {
- if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95)
- {
- cpi->Speed += 2;
- cpi->avg_pick_mode_time = 0;
- cpi->avg_encode_time = 0;
-
- if (cpi->Speed > 16)
- {
- cpi->Speed = 16;
- }
- }
+ }
- if (milliseconds_for_compress * 100 > cpi->avg_encode_time * auto_speed_thresh[cpi->Speed])
- {
- cpi->Speed -= 1;
- cpi->avg_pick_mode_time = 0;
- cpi->avg_encode_time = 0;
+ if (milliseconds_for_compress * 100 > cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) {
+ cpi->Speed -= 1;
+ cpi->avg_pick_mode_time = 0;
+ cpi->avg_encode_time = 0;
- // In real-time mode, cpi->speed is in [4, 16].
- if (cpi->Speed < 4) //if ( cpi->Speed < 0 )
- {
- cpi->Speed = 4; //cpi->Speed = 0;
- }
- }
+ // In real-time mode, cpi->speed is in [4, 16].
+ if (cpi->Speed < 4) { // if ( cpi->Speed < 0 )
+ cpi->Speed = 4; // cpi->Speed = 0;
}
+ }
}
- else
- {
- cpi->Speed += 4;
+ } else {
+ cpi->Speed += 4;
- if (cpi->Speed > 16)
- cpi->Speed = 16;
+ if (cpi->Speed > 16)
+ cpi->Speed = 16;
- cpi->avg_pick_mode_time = 0;
- cpi->avg_encode_time = 0;
- }
+ cpi->avg_pick_mode_time = 0;
+ cpi->avg_encode_time = 0;
+ }
}
-int vp8_block_error_c(short *coeff, short *dqcoeff)
-{
- int i;
- int error = 0;
+int vp8_block_error_c(short *coeff, short *dqcoeff) {
+ int i;
+ int error = 0;
- for (i = 0; i < 16; i++)
- {
- int this_diff = coeff[i] - dqcoeff[i];
- error += this_diff * this_diff;
- }
+ for (i = 0; i < 16; i++) {
+ int this_diff = coeff[i] - dqcoeff[i];
+ error += this_diff * this_diff;
+ }
- return error;
+ return error;
}
-int vp8_mbblock_error_c(MACROBLOCK *mb, int dc)
-{
- BLOCK *be;
- BLOCKD *bd;
- int i, j;
- int berror, error = 0;
+int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) {
+ BLOCK *be;
+ BLOCKD *bd;
+ int i, j;
+ int berror, error = 0;
- for (i = 0; i < 16; i++)
- {
- be = &mb->block[i];
- bd = &mb->e_mbd.block[i];
-
- berror = 0;
+ for (i = 0; i < 16; i++) {
+ be = &mb->block[i];
+ bd = &mb->e_mbd.block[i];
- for (j = dc; j < 16; j++)
- {
- int this_diff = be->coeff[j] - bd->dqcoeff[j];
- berror += this_diff * this_diff;
- }
+ berror = 0;
- error += berror;
+ for (j = dc; j < 16; j++) {
+ int this_diff = be->coeff[j] - bd->dqcoeff[j];
+ berror += this_diff * this_diff;
}
- return error;
+ error += berror;
+ }
+
+ return error;
}
-int vp8_mbuverror_c(MACROBLOCK *mb)
-{
+int vp8_mbuverror_c(MACROBLOCK *mb) {
- BLOCK *be;
- BLOCKD *bd;
+ BLOCK *be;
+ BLOCKD *bd;
- int i;
- int error = 0;
+ int i;
+ int error = 0;
- for (i = 16; i < 24; i++)
- {
- be = &mb->block[i];
- bd = &mb->e_mbd.block[i];
+ for (i = 16; i < 24; i++) {
+ be = &mb->block[i];
+ bd = &mb->e_mbd.block[i];
- error += vp8_block_error_c(be->coeff, bd->dqcoeff);
- }
+ error += vp8_block_error_c(be->coeff, bd->dqcoeff);
+ }
- return error;
+ return error;
}
-int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
-{
- unsigned char *uptr, *vptr;
- unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
- unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
- int uv_stride = x->block[16].src_stride;
-
- unsigned int sse1 = 0;
- unsigned int sse2 = 0;
- int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row;
- int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col;
- int offset;
- int pre_stride = x->e_mbd.block[16].pre_stride;
-
- if (mv_row < 0)
- mv_row -= 1;
- else
- mv_row += 1;
-
- if (mv_col < 0)
- mv_col -= 1;
- else
- mv_col += 1;
-
- mv_row /= 2;
- mv_col /= 2;
-
- offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
- uptr = x->e_mbd.pre.u_buffer + offset;
- vptr = x->e_mbd.pre.v_buffer + offset;
-
- if ((mv_row | mv_col) & 7)
- {
+int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned char *uptr, *vptr;
+ unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
+ unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
+ int uv_stride = x->block[16].src_stride;
+
+ unsigned int sse1 = 0;
+ unsigned int sse2 = 0;
+ int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row;
+ int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col;
+ int offset;
+ int pre_stride = x->e_mbd.block[16].pre_stride;
+
+ if (mv_row < 0)
+ mv_row -= 1;
+ else
+ mv_row += 1;
+
+ if (mv_col < 0)
+ mv_col -= 1;
+ else
+ mv_col += 1;
+
+ mv_row /= 2;
+ mv_col /= 2;
+
+ offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
+ uptr = x->e_mbd.pre.u_buffer + offset;
+ vptr = x->e_mbd.pre.v_buffer + offset;
+
+ if ((mv_row | mv_col) & 7) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
- (mv_col & 7)<<1, (mv_row & 7)<<1, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
- (mv_col & 7)<<1, (mv_row & 7)<<1, vpred_ptr, uv_stride, &sse1);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
+ (mv_col & 7) << 1, (mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
+ (mv_col & 7) << 1, (mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
#else
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
- mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
- mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
-#endif
- sse2 += sse1;
- }
- else
- {
- VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
- upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, var8x8)(vptr, pre_stride,
- vpred_ptr, uv_stride, &sse1);
- sse2 += sse1;
- }
- return sse2;
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
+ mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
+ mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
+#endif
+ sse2 += sse1;
+ } else {
+ VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
+ upred_ptr, uv_stride, &sse2);
+ VARIANCE_INVOKE(rtcd, var8x8)(vptr, pre_stride,
+ vpred_ptr, uv_stride, &sse1);
+ sse2 += sse1;
+ }
+ return sse2;
}
-static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
-{
- int c = !type; /* start at coef 0, unless Y with Y2 */
- int eob = b->eob;
- int pt ; /* surrounding block/prev coef predictor */
- int cost = 0;
- short *qcoeff_ptr = b->qcoeff;
+static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
+ int c = !type; /* start at coef 0, unless Y with Y2 */
+ int eob = b->eob;
+ int pt; /* surrounding block/prev coef predictor */
+ int cost = 0;
+ short *qcoeff_ptr = b->qcoeff;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
# define QC( I) ( qcoeff_ptr [vp8_default_zig_zag1d[I]] )
- for (; c < eob; c++)
- {
- int v = QC(c);
- int t = vp8_dct_value_tokens_ptr[v].Token;
- cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
- }
+ for (; c < eob; c++) {
+ int v = QC(c);
+ int t = vp8_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [t];
+ cost += vp8_dct_value_cost_ptr[v];
+ pt = vp8_prev_token_class[t];
+ }
# undef QC
- if (c < 16)
- cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN];
+ if (c < 16)
+ cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN];
- pt = (c != !type); // is eob first coefficient;
- *a = *l = pt;
+ pt = (c != !type); // is eob first coefficient;
+ *a = *l = pt;
- return cost;
+ return cost;
}
-static int vp8_rdcost_mby(MACROBLOCK *mb)
-{
- int cost = 0;
- int b;
- MACROBLOCKD *x = &mb->e_mbd;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+static int vp8_rdcost_mby(MACROBLOCK *mb) {
+ int cost = 0;
+ int b;
+ MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- for (b = 0; b < 16; b++)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ for (b = 0; b < 16; b++)
+ cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
- cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24]);
+ cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
+ ta + vp8_block2above[24], tl + vp8_block2left[24]);
- return cost;
+ return cost;
}
-static void macro_block_yrd( MACROBLOCK *mb,
- int *Rate,
- int *Distortion,
- const VP8_ENCODER_RTCD *rtcd)
-{
- int b;
- MACROBLOCKD *const x = &mb->e_mbd;
- BLOCK *const mb_y2 = mb->block + 24;
- BLOCKD *const x_y2 = x->block + 24;
- short *Y2DCPtr = mb_y2->src_diff;
- BLOCK *beptr;
- int d;
-
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- mb->e_mbd.predictor,
- mb->block[0].src_stride );
-
- // Fdct and building the 2nd order block
- for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
- {
- mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
- *Y2DCPtr++ = beptr->coeff[0];
- *Y2DCPtr++ = beptr->coeff[16];
- }
+static void macro_block_yrd(MACROBLOCK *mb,
+ int *Rate,
+ int *Distortion,
+ const VP8_ENCODER_RTCD *rtcd) {
+ int b;
+ MACROBLOCKD *const x = &mb->e_mbd;
+ BLOCK *const mb_y2 = mb->block + 24;
+ BLOCKD *const x_y2 = x->block + 24;
+ short *Y2DCPtr = mb_y2->src_diff;
+ BLOCK *beptr;
+ int d;
- // 2nd order fdct
- mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
+ mb->src_diff,
+ *(mb->block[0].base_src),
+ mb->e_mbd.predictor,
+ mb->block[0].src_stride);
- // Quantization
- for (b = 0; b < 16; b++)
- {
- mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
- }
+ // Fdct and building the 2nd order block
+ for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) {
+ mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
+ *Y2DCPtr++ = beptr->coeff[0];
+ *Y2DCPtr++ = beptr->coeff[16];
+ }
+
+ // 2nd order fdct
+ mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
+
+ // Quantization
+ for (b = 0; b < 16; b++) {
+ mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
+ }
- // DC predication and Quantization of 2nd Order block
- mb->quantize_b(mb_y2, x_y2);
+ // DC predication and Quantization of 2nd Order block
+ mb->quantize_b(mb_y2, x_y2);
- // Distortion
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
+ // Distortion
+ d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff);
+ d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff);
- *Distortion = (d >> 2);
- // rate
- *Rate = vp8_rdcost_mby(mb);
+ *Distortion = (d >> 2);
+ // rate
+ *Rate = vp8_rdcost_mby(mb);
}
static int cost_coeffs_2x2(MACROBLOCK *mb,
BLOCKD *b, int type,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
-{
- int c = !type; /* start at coef 0, unless Y with Y2 */
- int eob = b->eob;
- int pt ; /* surrounding block/prev coef predictor */
- int cost = 0;
- short *qcoeff_ptr = b->qcoeff;
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
+ int c = !type; /* start at coef 0, unless Y with Y2 */
+ int eob = b->eob;
+ int pt; /* surrounding block/prev coef predictor */
+ int cost = 0;
+ short *qcoeff_ptr = b->qcoeff;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- assert(eob<=4);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ assert(eob <= 4);
# define QC2X2( I) ( qcoeff_ptr [vp8_default_zig_zag1d[I]] )
- for (; c < eob; c++)
- {
- int v = QC2X2(c);
- int t = vp8_dct_value_tokens_ptr[v].Token;
- cost += mb->token_costs_8x8[type] [vp8_coef_bands[c]] [pt] [t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
- }
+ for (; c < eob; c++) {
+ int v = QC2X2(c);
+ int t = vp8_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs_8x8[type] [vp8_coef_bands[c]] [pt] [t];
+ cost += vp8_dct_value_cost_ptr[v];
+ pt = vp8_prev_token_class[t];
+ }
# undef QC2X2
- if (c < 4)
- cost += mb->token_costs_8x8 [type][vp8_coef_bands[c]]
- [pt] [DCT_EOB_TOKEN];
+ if (c < 4)
+ cost += mb->token_costs_8x8 [type][vp8_coef_bands[c]]
+ [pt] [DCT_EOB_TOKEN];
- pt = (c != !type); // is eob first coefficient;
- *a = *l = pt;
- return cost;
+ pt = (c != !type); // is eob first coefficient;
+ *a = *l = pt;
+ return cost;
}
static int cost_coeffs_8x8(MACROBLOCK *mb,
BLOCKD *b, int type,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
-{
- int c = !type; /* start at coef 0, unless Y with Y2 */
- int eob = b->eob;
- int pt ; /* surrounding block/prev coef predictor */
- int cost = 0;
- short *qcoeff_ptr = b->qcoeff;
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
+ int c = !type; /* start at coef 0, unless Y with Y2 */
+ int eob = b->eob;
+ int pt; /* surrounding block/prev coef predictor */
+ int cost = 0;
+ short *qcoeff_ptr = b->qcoeff;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
# define QC8X8( I) ( qcoeff_ptr [vp8_default_zig_zag1d_8x8[I]] )
- for (; c < eob; c++)
- {
- int v = QC8X8(c);
- int t = vp8_dct_value_tokens_ptr[v].Token;
- cost += mb->token_costs_8x8[type] [vp8_coef_bands_8x8[c]] [pt] [t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
- }
+ for (; c < eob; c++) {
+ int v = QC8X8(c);
+ int t = vp8_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs_8x8[type] [vp8_coef_bands_8x8[c]] [pt] [t];
+ cost += vp8_dct_value_cost_ptr[v];
+ pt = vp8_prev_token_class[t];
+ }
# undef QC8X8
- if (c < 64)
- cost += mb->token_costs_8x8 [type][vp8_coef_bands_8x8[c]]
- [pt] [DCT_EOB_TOKEN];
+ if (c < 64)
+ cost += mb->token_costs_8x8 [type][vp8_coef_bands_8x8[c]]
+ [pt] [DCT_EOB_TOKEN];
- pt = (c != !type); // is eob first coefficient;
- *a = *l = pt;
- return cost;
+ pt = (c != !type); // is eob first coefficient;
+ *a = *l = pt;
+ return cost;
}
-static int vp8_rdcost_mby_8x8(MACROBLOCK *mb)
-{
- int cost = 0;
- int b;
- MACROBLOCKD *x = &mb->e_mbd;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
-
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- for (b = 0; b < 16; b+=4)
- cost += cost_coeffs_8x8(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b]);
-
- cost += cost_coeffs_2x2(mb, x->block + 24, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24]);
- return cost;
+static int vp8_rdcost_mby_8x8(MACROBLOCK *mb) {
+ int cost = 0;
+ int b;
+ MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+
+ vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ for (b = 0; b < 16; b += 4)
+ cost += cost_coeffs_8x8(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b]);
+
+ cost += cost_coeffs_2x2(mb, x->block + 24, PLANE_TYPE_Y2,
+ ta + vp8_block2above[24], tl + vp8_block2left[24]);
+ return cost;
}
-static void macro_block_yrd_8x8( MACROBLOCK *mb,
- int *Rate,
- int *Distortion,
- const VP8_ENCODER_RTCD *rtcd)
-{
- MACROBLOCKD *const x = &mb->e_mbd;
- BLOCK *const mb_y2 = mb->block + 24;
- BLOCKD *const x_y2 = x->block + 24;
- int d;
-
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- mb->e_mbd.predictor,
- mb->block[0].src_stride );
-
- vp8_transform_mby_8x8(mb);
- vp8_quantize_mby_8x8(mb);
-
- /* remove 1st order dc to properly combine 1st/2nd order distortion */
- mb->coeff[0] = 0;
- mb->coeff[64] = 0;
- mb->coeff[128] = 0;
- mb->coeff[192] = 0;
- mb->e_mbd.dqcoeff[0] = 0;
- mb->e_mbd.dqcoeff[64] = 0;
- mb->e_mbd.dqcoeff[128] = 0;
- mb->e_mbd.dqcoeff[192] = 0;
-
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff);
-
- *Distortion = (d >> 2);
- // rate
- *Rate = vp8_rdcost_mby_8x8(mb);
+static void macro_block_yrd_8x8(MACROBLOCK *mb,
+ int *Rate,
+ int *Distortion,
+ const VP8_ENCODER_RTCD *rtcd) {
+ MACROBLOCKD *const x = &mb->e_mbd;
+ BLOCK *const mb_y2 = mb->block + 24;
+ BLOCKD *const x_y2 = x->block + 24;
+ int d;
+
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
+ mb->src_diff,
+ *(mb->block[0].base_src),
+ mb->e_mbd.predictor,
+ mb->block[0].src_stride);
+
+ vp8_transform_mby_8x8(mb);
+ vp8_quantize_mby_8x8(mb);
+
+ /* remove 1st order dc to properly combine 1st/2nd order distortion */
+ mb->coeff[0] = 0;
+ mb->coeff[64] = 0;
+ mb->coeff[128] = 0;
+ mb->coeff[192] = 0;
+ mb->e_mbd.dqcoeff[0] = 0;
+ mb->e_mbd.dqcoeff[64] = 0;
+ mb->e_mbd.dqcoeff[128] = 0;
+ mb->e_mbd.dqcoeff[192] = 0;
+
+ d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
+ d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff);
+
+ *Distortion = (d >> 2);
+ // rate
+ *Rate = vp8_rdcost_mby_8x8(mb);
}
-static void copy_predictor(unsigned char *dst, const unsigned char *predictor)
-{
- const unsigned int *p = (const unsigned int *)predictor;
- unsigned int *d = (unsigned int *)dst;
- d[0] = p[0];
- d[4] = p[4];
- d[8] = p[8];
- d[12] = p[12];
+static void copy_predictor(unsigned char *dst, const unsigned char *predictor) {
+ const unsigned int *p = (const unsigned int *)predictor;
+ unsigned int *d = (unsigned int *)dst;
+ d[0] = p[0];
+ d[4] = p[4];
+ d[8] = p[8];
+ d[12] = p[12];
}
-static void copy_predictor_8x8(unsigned char *dst, const unsigned char *predictor)
-{
- const unsigned int *p = (const unsigned int *)predictor;
- unsigned int *d = (unsigned int *)dst;
- d[0] = p[0];
- d[1] = p[1];
- d[4] = p[4];
- d[5] = p[5];
- d[8] = p[8];
- d[9] = p[9];
- d[12] = p[12];
- d[13] = p[13];
- d[16] = p[16];
- d[17] = p[17];
- d[20] = p[20];
- d[21] = p[21];
- d[24] = p[24];
- d[25] = p[25];
- d[28] = p[28];
- d[29] = p[29];
+static void copy_predictor_8x8(unsigned char *dst, const unsigned char *predictor) {
+ const unsigned int *p = (const unsigned int *)predictor;
+ unsigned int *d = (unsigned int *)dst;
+ d[0] = p[0];
+ d[1] = p[1];
+ d[4] = p[4];
+ d[5] = p[5];
+ d[8] = p[8];
+ d[9] = p[9];
+ d[12] = p[12];
+ d[13] = p[13];
+ d[16] = p[16];
+ d[17] = p[17];
+ d[20] = p[20];
+ d[21] = p[21];
+ d[24] = p[24];
+ d[25] = p[25];
+ d[28] = p[28];
+ d[29] = p[29];
}
static int rd_pick_intra4x4block(
- VP8_COMP *cpi,
- MACROBLOCK *x,
- BLOCK *be,
- BLOCKD *b,
- B_PREDICTION_MODE *best_mode,
+ VP8_COMP *cpi,
+ MACROBLOCK *x,
+ BLOCK *be,
+ BLOCKD *b,
+ B_PREDICTION_MODE *best_mode,
#if CONFIG_COMP_INTRA_PRED
- B_PREDICTION_MODE *best_second_mode,
- int allow_comp,
+ B_PREDICTION_MODE *best_second_mode,
+ int allow_comp,
#endif
- int *bmode_costs,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
-
- int *bestrate,
- int *bestratey,
- int *bestdistortion)
-{
- B_PREDICTION_MODE mode;
-#if CONFIG_COMP_INTRA_PRED
- B_PREDICTION_MODE mode2;
-#endif
- int best_rd = INT_MAX;
- int rate = 0;
- int distortion;
+ int *bmode_costs,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
- ENTROPY_CONTEXT ta = *a, tempa = *a;
- ENTROPY_CONTEXT tl = *l, templ = *l;
- /*
- * The predictor buffer is a 2d buffer with a stride of 16. Create
- * a temp buffer that meets the stride requirements, but we are only
- * interested in the left 4x4 block
- * */
- DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16*4);
- DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16);
-
- for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++)
- {
+ int *bestrate,
+ int *bestratey,
+ int *bestdistortion) {
+ B_PREDICTION_MODE mode;
#if CONFIG_COMP_INTRA_PRED
- for (mode2 = (allow_comp ? 0 : (B_DC_PRED - 1)); mode2 != (allow_comp ? (mode + 1) : 0); mode2++)
- {
+ B_PREDICTION_MODE mode2;
+#endif
+ int best_rd = INT_MAX;
+ int rate = 0;
+ int distortion;
+
+ ENTROPY_CONTEXT ta = *a, tempa = *a;
+ ENTROPY_CONTEXT tl = *l, templ = *l;
+ /*
+ * The predictor buffer is a 2d buffer with a stride of 16. Create
+ * a temp buffer that meets the stride requirements, but we are only
+ * interested in the left 4x4 block
+ * */
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16 * 4);
+ DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16);
+
+ for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++) {
+#if CONFIG_COMP_INTRA_PRED
+ for (mode2 = (allow_comp ? 0 : (B_DC_PRED - 1)); mode2 != (allow_comp ? (mode + 1) : 0); mode2++) {
#endif
- int this_rd;
- int ratey;
+ int this_rd;
+ int ratey;
- // TODO Temporarily ignore modes that need the above-right data. SB
- // encoding means this data is not available for the bottom right MB
- // Do we need to do this for mode2 also?
- if (mode==B_LD_PRED || mode==B_VL_PRED)
- continue;
- rate = bmode_costs[mode];
+ // TODO Temporarily ignore modes that need the above-right data. SB
+ // encoding means this data is not available for the bottom right MB
+ // Do we need to do this for mode2 also?
+ if (mode == B_LD_PRED || mode == B_VL_PRED)
+ continue;
+ rate = bmode_costs[mode];
#if CONFIG_COMP_INTRA_PRED
- if (mode2 == (B_PREDICTION_MODE) (B_DC_PRED - 1))
- {
+ if (mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, intra4x4_predict)
- (b, mode, b->predictor);
+ (b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra4x4_predict)
- (b, mode, mode2, b->predictor);
- rate += bmode_costs[mode2];
- }
-#endif
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
-
- tempa = ta;
- templ = tl;
-
- ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ);
- rate += ratey;
- distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(
- be->coeff, b->dqcoeff) >> 2;
-
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
-
- if (this_rd < best_rd)
- {
- *bestrate = rate;
- *bestratey = ratey;
- *bestdistortion = distortion;
- best_rd = this_rd;
- *best_mode = mode;
+ } else {
+ RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra4x4_predict)
+ (b, mode, mode2, b->predictor);
+ rate += bmode_costs[mode2];
+ }
+#endif
+ ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b(be, b);
+
+ tempa = ta;
+ templ = tl;
+
+ ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ);
+ rate += ratey;
+ distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(
+ be->coeff, b->dqcoeff) >> 2;
+
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestratey = ratey;
+ *bestdistortion = distortion;
+ best_rd = this_rd;
+ *best_mode = mode;
#if CONFIG_COMP_INTRA_PRED
- *best_second_mode = mode2;
+ *best_second_mode = mode2;
#endif
- *a = tempa;
- *l = templ;
- copy_predictor(best_predictor, b->predictor);
- vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
+ *a = tempa;
+ *l = templ;
+ copy_predictor(best_predictor, b->predictor);
+ vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
#if CONFIG_COMP_INTRA_PRED
- }
+ }
#endif
- }
}
- b->bmi.as_mode.first = (B_PREDICTION_MODE)(*best_mode);
+ }
+ b->bmi.as_mode.first = (B_PREDICTION_MODE)(*best_mode);
#if CONFIG_COMP_INTRA_PRED
- b->bmi.as_mode.second = (B_PREDICTION_MODE)(*best_second_mode);
+ b->bmi.as_mode.second = (B_PREDICTION_MODE)(*best_second_mode);
#endif
- IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
- RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
+ RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
- return best_rd;
+ return best_rd;
}
static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
@@ -967,90 +900,84 @@ static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
#if CONFIG_COMP_INTRA_PRED
int allow_comp,
#endif
- int update_contexts)
-{
- int i;
- MACROBLOCKD *const xd = &mb->e_mbd;
- int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
- int distortion = 0;
- int tot_rate_y = 0;
- int64_t total_rd = 0;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
- int *bmode_costs;
-
- if (update_contexts)
- {
- ta = (ENTROPY_CONTEXT *)mb->e_mbd.above_context;
- tl = (ENTROPY_CONTEXT *)mb->e_mbd.left_context;
- }
- else
- {
- vpx_memcpy(&t_above, mb->e_mbd.above_context,
- sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context,
- sizeof(ENTROPY_CONTEXT_PLANES));
+ int update_contexts) {
+ int i;
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
+ int distortion = 0;
+ int tot_rate_y = 0;
+ int64_t total_rd = 0;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+ int *bmode_costs;
+
+ if (update_contexts) {
+ ta = (ENTROPY_CONTEXT *)mb->e_mbd.above_context;
+ tl = (ENTROPY_CONTEXT *)mb->e_mbd.left_context;
+ } else {
+ vpx_memcpy(&t_above, mb->e_mbd.above_context,
+ sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, mb->e_mbd.left_context,
+ sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
- }
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+ }
- // TODO(agrange)
- //vp8_intra_prediction_down_copy(xd);
+ // TODO(agrange)
+ // vp8_intra_prediction_down_copy(xd);
- bmode_costs = mb->inter_bmode_costs;
+ bmode_costs = mb->inter_bmode_costs;
- for (i = 0; i < 16; i++)
- {
- MODE_INFO *const mic = xd->mode_info_context;
- const int mis = xd->mode_info_stride;
- B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
+ for (i = 0; i < 16; i++) {
+ MODE_INFO *const mic = xd->mode_info_context;
+ const int mis = xd->mode_info_stride;
+ B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
#if CONFIG_COMP_INTRA_PRED
- B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_second_mode);
+ B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_second_mode);
#endif
- int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
+ int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
- if (mb->e_mbd.frame_type == KEY_FRAME)
- {
- const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
- const B_PREDICTION_MODE L = left_block_mode(mic, i);
+ if (mb->e_mbd.frame_type == KEY_FRAME) {
+ const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(mic, i);
- bmode_costs = mb->bmode_costs[A][L];
- }
+ bmode_costs = mb->bmode_costs[A][L];
+ }
- total_rd += rd_pick_intra4x4block(
- cpi, mb, mb->block + i, xd->block + i, &best_mode,
+ total_rd += rd_pick_intra4x4block(
+ cpi, mb, mb->block + i, xd->block + i, &best_mode,
#if CONFIG_COMP_INTRA_PRED
- &best_second_mode, allow_comp,
+ & best_second_mode, allow_comp,
#endif
- bmode_costs, ta + vp8_block2above[i],
- tl + vp8_block2left[i], &r, &ry, &d);
+ bmode_costs, ta + vp8_block2above[i],
+ tl + vp8_block2left[i], &r, &ry, &d);
- cost += r;
- distortion += d;
- tot_rate_y += ry;
+ cost += r;
+ distortion += d;
+ tot_rate_y += ry;
- mic->bmi[i].as_mode.first = best_mode;
+ mic->bmi[i].as_mode.first = best_mode;
#if CONFIG_COMP_INTRA_PRED
- mic->bmi[i].as_mode.second = best_second_mode;
+ mic->bmi[i].as_mode.second = best_second_mode;
#endif
- if(total_rd >= (int64_t)best_rd)
- break;
- }
+ if (total_rd >= (int64_t)best_rd)
+ break;
+ }
- if(total_rd >= (int64_t)best_rd)
- return INT_MAX;
+ if (total_rd >= (int64_t)best_rd)
+ return INT_MAX;
#if CONFIG_COMP_INTRA_PRED
- cost += vp8_cost_bit(128, allow_comp);
+ cost += vp8_cost_bit(128, allow_comp);
#endif
- *Rate = cost;
- *rate_y += tot_rate_y;
- *Distortion = distortion;
+ *Rate = cost;
+ *rate_y += tot_rate_y;
+ *Distortion = distortion;
- return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
+ return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
}
@@ -1058,435 +985,407 @@ static int rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
MACROBLOCK *x,
int *Rate,
int *rate_y,
- int *Distortion)
-{
- MB_PREDICTION_MODE mode;
- MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
+ int *Distortion) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
#if CONFIG_COMP_INTRA_PRED
- MB_PREDICTION_MODE mode2;
- MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode2_selected);
+ MB_PREDICTION_MODE mode2;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode2_selected);
#endif
- int rate, ratey;
- int distortion;
- int best_rd = INT_MAX;
- int this_rd;
+ int rate, ratey;
+ int distortion;
+ int best_rd = INT_MAX;
+ int this_rd;
- //Y Search for 16x16 intra prediction mode
- for (mode = DC_PRED; mode <= TM_PRED; mode++)
- {
- x->e_mbd.mode_info_context->mbmi.mode = mode;
+ // Y Search for 16x16 intra prediction mode
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ x->e_mbd.mode_info_context->mbmi.mode = mode;
#if CONFIG_COMP_INTRA_PRED
- for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++)
- {
- x->e_mbd.mode_info_context->mbmi.second_mode = mode2;
- if (mode2 == (MB_PREDICTION_MODE) (DC_PRED - 1))
- {
+ for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++) {
+ x->e_mbd.mode_info_context->mbmi.second_mode = mode2;
+ if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
- (&x->e_mbd);
+ (&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- continue; // i.e. disable for now
- RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)(&x->e_mbd);
- }
+ } else {
+ continue; // i.e. disable for now
+ RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)(&x->e_mbd);
+ }
#endif
- macro_block_yrd_8x8(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd));
- // FIXME add compoundmode cost
- // FIXME add rate for mode2
- rate = ratey + x->mbmode_cost[x->e_mbd.frame_type]
- [x->e_mbd.mode_info_context->mbmi.mode];
+ macro_block_yrd_8x8(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd));
+ // FIXME add compoundmode cost
+ // FIXME add rate for mode2
+ rate = ratey + x->mbmode_cost[x->e_mbd.frame_type]
+ [x->e_mbd.mode_info_context->mbmi.mode];
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
- if (this_rd < best_rd)
- {
- mode_selected = mode;
+ if (this_rd < best_rd) {
+ mode_selected = mode;
#if CONFIG_COMP_INTRA_PRED
- mode2_selected = mode2;
+ mode2_selected = mode2;
#endif
- best_rd = this_rd;
- *Rate = rate;
- *rate_y = ratey;
- *Distortion = distortion;
- }
+ best_rd = this_rd;
+ *Rate = rate;
+ *rate_y = ratey;
+ *Distortion = distortion;
+ }
#if CONFIG_COMP_INTRA_PRED
- }
-#endif
}
+#endif
+ }
- x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
+ x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_mode = mode2_selected;
+ x->e_mbd.mode_info_context->mbmi.second_mode = mode2_selected;
#endif
- return best_rd;
+ return best_rd;
}
static int rd_pick_intra8x8block(
- VP8_COMP *cpi,
- MACROBLOCK *x,
- int ib,
- B_PREDICTION_MODE *best_mode,
+ VP8_COMP *cpi,
+ MACROBLOCK *x,
+ int ib,
+ B_PREDICTION_MODE *best_mode,
#if CONFIG_COMP_INTRA_PRED
- B_PREDICTION_MODE *best_second_mode,
-#endif
- int *mode_costs,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- int *bestrate,
- int *bestratey,
- int *bestdistortion)
-{
- MB_PREDICTION_MODE mode;
+ B_PREDICTION_MODE *best_second_mode,
+#endif
+ int *mode_costs,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ int *bestrate,
+ int *bestratey,
+ int *bestdistortion) {
+ MB_PREDICTION_MODE mode;
#if CONFIG_COMP_INTRA_PRED
- MB_PREDICTION_MODE mode2;
+ MB_PREDICTION_MODE mode2;
#endif
- MACROBLOCKD *xd = &x->e_mbd;
- int best_rd = INT_MAX;
- int rate = 0;
- int distortion;
- BLOCK *be=x->block + ib;
- BLOCKD *b=x->e_mbd.block + ib;
- ENTROPY_CONTEXT ta0, ta1, besta0 = 0, besta1 = 0;
- ENTROPY_CONTEXT tl0, tl1, bestl0 = 0, bestl1 = 0;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int best_rd = INT_MAX;
+ int rate = 0;
+ int distortion;
+ BLOCK *be = x->block + ib;
+ BLOCKD *b = x->e_mbd.block + ib;
+ ENTROPY_CONTEXT ta0, ta1, besta0 = 0, besta1 = 0;
+ ENTROPY_CONTEXT tl0, tl1, bestl0 = 0, bestl1 = 0;
- /*
- * The predictor buffer is a 2d buffer with a stride of 16. Create
- * a temp buffer that meets the stride requirements, but we are only
- * interested in the left 8x8 block
- * */
+ /*
+ * The predictor buffer is a 2d buffer with a stride of 16. Create
+ * a temp buffer that meets the stride requirements, but we are only
+ * interested in the left 8x8 block
+ * */
- DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16*8);
- DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16*4);
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16 * 8);
+ DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16 * 4);
- for (mode = DC_PRED; mode <= TM_PRED; mode++)
- {
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
#if CONFIG_COMP_INTRA_PRED
- for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++)
- {
+ for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++) {
#endif
- int this_rd;
- int rate_t;
+ int this_rd;
+ int rate_t;
- // FIXME rate for compound mode and second intrapred mode
- rate = mode_costs[mode];
+ // FIXME rate for compound mode and second intrapred mode
+ rate = mode_costs[mode];
#if CONFIG_COMP_INTRA_PRED
- if (mode2 == (MB_PREDICTION_MODE) (DC_PRED - 1))
- {
+ if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, intra8x8_predict)
- (b, mode, b->predictor);
+ (b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- continue; // i.e. disable for now
- RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra8x8_predict)
- (b, mode, mode2, b->predictor);
- }
-#endif
-
- vp8_subtract_4b_c(be, b, 16);
-
- x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
- x->vp8_short_fdct8x4(be->src_diff + 64, be->coeff + 64, 32);
-
- x->quantize_b_pair(x->block+ib, x->block+ib+1,
- xd->block+ib, xd->block+ib+1);
- x->quantize_b_pair(x->block+ib+4, x->block+ib+5,
- xd->block+ib+4, xd->block+ib+5);
-
- distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
- ((x->block+ib)->coeff,(xd->block+ib)->dqcoeff)>>2;
- distortion += ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
- ((x->block+ib+1)->coeff,(xd->block+ib+1)->dqcoeff)>>2;
- distortion += ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
- ((x->block+ib+4)->coeff,(xd->block+ib+4)->dqcoeff)>>2;
- distortion += ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
- ((x->block+ib+5)->coeff,(xd->block+ib+5)->dqcoeff)>>2;
-
- ta0 = *(a + vp8_block2above[ib]);
- ta1 = *(a + vp8_block2above[ib+1]);
- tl0 = *(l + vp8_block2above[ib]);
- tl1 = *(l + vp8_block2above[ib+4]);
- rate_t = cost_coeffs(x, xd->block+ib, PLANE_TYPE_Y_WITH_DC,
- &ta0, &tl0);
- rate_t += cost_coeffs(x, xd->block+ib+1, PLANE_TYPE_Y_WITH_DC,
- &ta1, &tl0);
- rate_t += cost_coeffs(x, xd->block+ib+4, PLANE_TYPE_Y_WITH_DC,
- &ta0, &tl1);
- rate_t += cost_coeffs(x, xd->block+ib+5, PLANE_TYPE_Y_WITH_DC,
- &ta1, &tl1);
- rate += rate_t;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
- if (this_rd < best_rd)
- {
- *bestrate = rate;
- *bestratey = rate_t;
- *bestdistortion = distortion;
- besta0 = ta0;
- besta1 = ta1;
- bestl0 = tl0;
- bestl1 = tl1;
- best_rd = this_rd;
- *best_mode = mode;
+ } else {
+ continue; // i.e. disable for now
+ RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra8x8_predict)
+ (b, mode, mode2, b->predictor);
+ }
+#endif
+
+ vp8_subtract_4b_c(be, b, 16);
+
+ x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
+ x->vp8_short_fdct8x4(be->src_diff + 64, be->coeff + 64, 32);
+
+ x->quantize_b_pair(x->block + ib, x->block + ib + 1,
+ xd->block + ib, xd->block + ib + 1);
+ x->quantize_b_pair(x->block + ib + 4, x->block + ib + 5,
+ xd->block + ib + 4, xd->block + ib + 5);
+
+ distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
+ ((x->block + ib)->coeff, (xd->block + ib)->dqcoeff) >> 2;
+ distortion += ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
+ ((x->block + ib + 1)->coeff, (xd->block + ib + 1)->dqcoeff) >> 2;
+ distortion += ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
+ ((x->block + ib + 4)->coeff, (xd->block + ib + 4)->dqcoeff) >> 2;
+ distortion += ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)
+ ((x->block + ib + 5)->coeff, (xd->block + ib + 5)->dqcoeff) >> 2;
+
+ ta0 = *(a + vp8_block2above[ib]);
+ ta1 = *(a + vp8_block2above[ib + 1]);
+ tl0 = *(l + vp8_block2above[ib]);
+ tl1 = *(l + vp8_block2above[ib + 4]);
+ rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC,
+ &ta0, &tl0);
+ rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC,
+ &ta1, &tl0);
+ rate_t += cost_coeffs(x, xd->block + ib + 4, PLANE_TYPE_Y_WITH_DC,
+ &ta0, &tl1);
+ rate_t += cost_coeffs(x, xd->block + ib + 5, PLANE_TYPE_Y_WITH_DC,
+ &ta1, &tl1);
+ rate += rate_t;
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestratey = rate_t;
+ *bestdistortion = distortion;
+ besta0 = ta0;
+ besta1 = ta1;
+ bestl0 = tl0;
+ bestl1 = tl1;
+ best_rd = this_rd;
+ *best_mode = mode;
#if CONFIG_COMP_INTRA_PRED
- *best_second_mode = mode2;
+ *best_second_mode = mode2;
#endif
- copy_predictor_8x8(best_predictor, b->predictor);
- vpx_memcpy(best_dqcoeff, b->dqcoeff, 64);
- vpx_memcpy(best_dqcoeff+32, b->dqcoeff+64, 64);
+ copy_predictor_8x8(best_predictor, b->predictor);
+ vpx_memcpy(best_dqcoeff, b->dqcoeff, 64);
+ vpx_memcpy(best_dqcoeff + 32, b->dqcoeff + 64, 64);
#if CONFIG_COMP_INTRA_PRED
- }
+ }
#endif
- }
}
- b->bmi.as_mode.first = (*best_mode);
+ }
+ b->bmi.as_mode.first = (*best_mode);
#if CONFIG_COMP_INTRA_PRED
- b->bmi.as_mode.second = (*best_second_mode);
-#endif
- vp8_encode_intra8x8 (IF_RTCD(&cpi->rtcd), x, ib);
- *(a + vp8_block2above[ib]) = besta0;
- *(a + vp8_block2above[ib+1]) = besta1;
- *(l + vp8_block2above[ib]) = bestl0;
- *(l + vp8_block2above[ib+4]) = bestl1;
- return best_rd;
+ b->bmi.as_mode.second = (*best_second_mode);
+#endif
+ vp8_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
+ *(a + vp8_block2above[ib]) = besta0;
+ *(a + vp8_block2above[ib + 1]) = besta1;
+ *(l + vp8_block2above[ib]) = bestl0;
+ *(l + vp8_block2above[ib + 4]) = bestl1;
+ return best_rd;
}
-const int vp8_i8x8_block[4]={0, 2, 8, 10};
+const int vp8_i8x8_block[4] = {0, 2, 8, 10};
int rd_pick_intra8x8mby_modes(VP8_COMP *cpi,
- MACROBLOCK *mb,
- int *Rate,
- int *rate_y,
- int *Distortion,
- int best_rd)
-{
- MACROBLOCKD *const xd = &mb->e_mbd;
- int i,ib;
- int cost = mb->mbmode_cost [xd->frame_type] [I8X8_PRED];
- int distortion = 0;
- int tot_rate_y = 0;
- long long total_rd = 0;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
- int *i8x8mode_costs;
-
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- i8x8mode_costs = mb->i8x8_mode_costs;
-
- for (i = 0; i < 4; i++)
- {
- MODE_INFO *const mic = xd->mode_info_context;
- B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
+ MACROBLOCK *mb,
+ int *Rate,
+ int *rate_y,
+ int *Distortion,
+ int best_rd) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ int i, ib;
+ int cost = mb->mbmode_cost [xd->frame_type] [I8X8_PRED];
+ int distortion = 0;
+ int tot_rate_y = 0;
+ long long total_rd = 0;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+ int *i8x8mode_costs;
+
+ vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+
+ i8x8mode_costs = mb->i8x8_mode_costs;
+
+ for (i = 0; i < 4; i++) {
+ MODE_INFO *const mic = xd->mode_info_context;
+ B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
#if CONFIG_COMP_INTRA_PRED
- B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_second_mode);
+ B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_second_mode);
#endif
- int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
+ int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
- ib = vp8_i8x8_block[i];
- total_rd += rd_pick_intra8x8block(
- cpi, mb, ib, &best_mode,
+ ib = vp8_i8x8_block[i];
+ total_rd += rd_pick_intra8x8block(
+ cpi, mb, ib, &best_mode,
#if CONFIG_COMP_INTRA_PRED
- &best_second_mode,
+ & best_second_mode,
#endif
- i8x8mode_costs, ta, tl, &r, &ry, &d);
- cost += r;
- distortion += d;
- tot_rate_y += ry;
- mic->bmi[ib].as_mode.first = best_mode;
+ i8x8mode_costs, ta, tl, &r, &ry, &d);
+ cost += r;
+ distortion += d;
+ tot_rate_y += ry;
+ mic->bmi[ib].as_mode.first = best_mode;
#if CONFIG_COMP_INTRA_PRED
- mic->bmi[ib].as_mode.second = best_second_mode;
+ mic->bmi[ib].as_mode.second = best_second_mode;
#endif
- }
- *Rate = cost;
- *rate_y += tot_rate_y;
- *Distortion = distortion;
- return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
+ }
+ *Rate = cost;
+ *rate_y += tot_rate_y;
+ *Distortion = distortion;
+ return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
}
-static int rd_cost_mbuv(MACROBLOCK *mb)
-{
- int b;
- int cost = 0;
- MACROBLOCKD *x = &mb->e_mbd;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+static int rd_cost_mbuv(MACROBLOCK *mb) {
+ int b;
+ int cost = 0;
+ MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- for (b = 16; b < 24; b++)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ for (b = 16; b < 24; b++)
+ cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
- return cost;
+ return cost;
}
static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
- int *distortion, int fullpixel)
-{
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ int *distortion, int fullpixel) {
+ ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv(x);
+ vp8_quantize_mbuv(x);
- *rate = rd_cost_mbuv(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *rate = rd_cost_mbuv(x);
+ *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
- return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+ return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-static int rd_cost_mbuv_8x8(MACROBLOCK *mb)
-{
- int b;
- int cost = 0;
- MACROBLOCKD *x = &mb->e_mbd;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
+static int rd_cost_mbuv_8x8(MACROBLOCK *mb) {
+ int b;
+ int cost = 0;
+ MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
- for (b = 16; b < 24; b+=4)
- cost += cost_coeffs_8x8(mb, x->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b],
- tl + vp8_block2left_8x8[b]);
+ for (b = 16; b < 24; b += 4)
+ cost += cost_coeffs_8x8(mb, x->block + b, PLANE_TYPE_UV,
+ ta + vp8_block2above_8x8[b],
+ tl + vp8_block2left_8x8[b]);
- return cost;
+ return cost;
}
static int rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
- int *distortion, int fullpixel)
-{
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ int *distortion, int fullpixel) {
+ ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- vp8_transform_mbuv_8x8(x);
+ vp8_transform_mbuv_8x8(x);
- vp8_quantize_mbuv_8x8(x);
+ vp8_quantize_mbuv_8x8(x);
- *rate = rd_cost_mbuv_8x8(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *rate = rd_cost_mbuv_8x8(x);
+ *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
- return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+ return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
- int *distortion, int fullpixel)
-{
- vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ int *distortion, int fullpixel) {
+ vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
+ ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv(x);
+ vp8_quantize_mbuv(x);
- *rate = rd_cost_mbuv(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *rate = rd_cost_mbuv(x);
+ *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
- return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+ return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
MACROBLOCK *x,
int *rate,
int *rate_tokenonly,
- int *distortion)
-{
- MB_PREDICTION_MODE mode;
- MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
+ int *distortion) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
#if CONFIG_COMP_INTRA_PRED
- MB_PREDICTION_MODE mode2;
- MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode2_selected);
+ MB_PREDICTION_MODE mode2;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode2_selected);
#endif
- int best_rd = INT_MAX;
- int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
- int rate_to;
+ int best_rd = INT_MAX;
+ int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
+ int rate_to;
- for (mode = DC_PRED; mode <= TM_PRED; mode++)
- {
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
#if CONFIG_COMP_INTRA_PRED
- for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++)
- {
+ for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++) {
#endif
- int rate;
- int distortion;
- int this_rd;
+ int rate;
+ int distortion;
+ int this_rd;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_uv_mode = mode2;
- if (mode2 == (MB_PREDICTION_MODE) (DC_PRED - 1))
- {
+ x->e_mbd.mode_info_context->mbmi.second_uv_mode = mode2;
+ if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
- (&x->e_mbd);
+ (&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
- }
- else
- {
- continue;
- RECON_INVOKE(&cpi->rtcd.common->recon, build_comp_intra_predictors_mbuv)
- (&x->e_mbd);
- }
+ } else {
+ continue;
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_comp_intra_predictors_mbuv)
+ (&x->e_mbd);
+ }
#endif
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
- x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
+ x->src.uv_stride);
+ vp8_transform_mbuv(x);
+ vp8_quantize_mbuv(x);
- rate_to = rd_cost_mbuv(x);
- rate = rate_to
+ rate_to = rd_cost_mbuv(x);
+ rate = rate_to
+ x->intra_uv_mode_cost[x->e_mbd.frame_type]
- [x->e_mbd.mode_info_context->mbmi.uv_mode];
+ [x->e_mbd.mode_info_context->mbmi.uv_mode];
- distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
- if (this_rd < best_rd)
- {
- best_rd = this_rd;
- d = distortion;
- r = rate;
- *rate_tokenonly = rate_to;
- mode_selected = mode;
+ if (this_rd < best_rd) {
+ best_rd = this_rd;
+ d = distortion;
+ r = rate;
+ *rate_tokenonly = rate_to;
+ mode_selected = mode;
#if CONFIG_COMP_INTRA_PRED
- mode2_selected = mode2;
- }
+ mode2_selected = mode2;
+ }
#endif
- }
}
+ }
- *rate = r;
- *distortion = d;
+ *rate = r;
+ *distortion = d;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_uv_mode = mode2_selected;
+ x->e_mbd.mode_info_context->mbmi.second_uv_mode = mode2_selected;
#endif
}
@@ -1494,261 +1393,240 @@ static void rd_pick_intra_mbuv_mode_8x8(VP8_COMP *cpi,
MACROBLOCK *x,
int *rate,
int *rate_tokenonly,
- int *distortion)
-{
- MB_PREDICTION_MODE mode;
- MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
- int best_rd = INT_MAX;
- int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
- int rate_to;
-
- for (mode = DC_PRED; mode <= TM_PRED; mode++)
- {
- int rate;
- int distortion;
- int this_rd;
+ int *distortion) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
+ int best_rd = INT_MAX;
+ int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
+ int rate_to;
+
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ int rate;
+ int distortion;
+ int this_rd;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
- RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
- (&x->e_mbd);
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
- x->src.uv_stride);
- vp8_transform_mbuv_8x8(x);
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
+ (&x->e_mbd);
+ ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
+ x->src.uv_stride);
+ vp8_transform_mbuv_8x8(x);
- vp8_quantize_mbuv_8x8(x);
+ vp8_quantize_mbuv_8x8(x);
- rate_to = rd_cost_mbuv_8x8(x);
- rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type]
- [x->e_mbd.mode_info_context->mbmi.uv_mode];
+ rate_to = rd_cost_mbuv_8x8(x);
+ rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type]
+ [x->e_mbd.mode_info_context->mbmi.uv_mode];
- distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
- if (this_rd < best_rd)
- {
- best_rd = this_rd;
- d = distortion;
- r = rate;
- *rate_tokenonly = rate_to;
- mode_selected = mode;
- }
+ if (this_rd < best_rd) {
+ best_rd = this_rd;
+ d = distortion;
+ r = rate;
+ *rate_tokenonly = rate_to;
+ mode_selected = mode;
}
- *rate = r;
- *distortion = d;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
+ }
+ *rate = r;
+ *distortion = d;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
}
int vp8_cost_mv_ref(VP8_COMP *cpi,
MB_PREDICTION_MODE m,
- const int near_mv_ref_ct[4])
-{
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- // If the mode coding is done entirely at the segment level
- // we should not account for it at the per mb level in rd code.
- // Note that if the segment level coding is expanded from single mode
- // to multiple mode masks as per reference frame coding we will need
- // to do something different here.
- if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE) )
- {
- VP8_COMMON *pc = &cpi->common;
-
- vp8_prob p [VP8_MVREFS-1];
- assert(NEARESTMV <= m && m <= SPLITMV);
- vp8_mv_ref_probs(pc, p, near_mv_ref_ct);
- return vp8_cost_token(vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
- }
- else
- return 0;
+ const int near_mv_ref_ct[4]) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ // If the mode coding is done entirely at the segment level
+ // we should not account for it at the per mb level in rd code.
+ // Note that if the segment level coding is expanded from single mode
+ // to multiple mode masks as per reference frame coding we will need
+ // to do something different here.
+ if (!segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
+ VP8_COMMON *pc = &cpi->common;
+
+ vp8_prob p [VP8_MVREFS - 1];
+ assert(NEARESTMV <= m && m <= SPLITMV);
+ vp8_mv_ref_probs(pc, p, near_mv_ref_ct);
+ return vp8_cost_token(vp8_mv_ref_tree, p,
+ vp8_mv_ref_encoding_array - NEARESTMV + m);
+ } else
+ return 0;
}
-void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv)
-{
- x->e_mbd.mode_info_context->mbmi.mode = mb;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
+void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
+ x->e_mbd.mode_info_context->mbmi.mode = mb;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
}
static int labels2mode(
- MACROBLOCK *x,
- int const *labelings, int which_label,
- B_PREDICTION_MODE this_mode,
- int_mv *this_mv, int_mv *this_second_mv,
- int_mv seg_mvs[MAX_REF_FRAMES - 1],
- int_mv *best_ref_mv,
- int_mv *second_best_ref_mv,
- int *mvcost[2]
-)
-{
- MACROBLOCKD *const xd = & x->e_mbd;
- MODE_INFO *const mic = xd->mode_info_context;
- const int mis = xd->mode_info_stride;
-
- int cost = 0;
- int thismvcost = 0;
-
- /* We have to be careful retrieving previously-encoded motion vectors.
- Ones from this macroblock have to be pulled from the BLOCKD array
- as they have not yet made it to the bmi array in our MB_MODE_INFO. */
-
- int i = 0;
-
- do
- {
- BLOCKD *const d = xd->block + i;
- const int row = i >> 2, col = i & 3;
-
- B_PREDICTION_MODE m;
-
- if (labelings[i] != which_label)
- continue;
-
- if (col && labelings[i] == labelings[i-1])
- m = LEFT4X4;
- else if (row && labelings[i] == labelings[i-4])
- m = ABOVE4X4;
- else
- {
- // the only time we should do costing for new motion vector or mode
- // is when we are on a new label (jbb May 08, 2007)
- switch (m = this_mode)
- {
- case NEW4X4 :
- if (xd->mode_info_context->mbmi.second_ref_frame)
- {
- this_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.ref_frame - 1].as_int;
- this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
- }
+ MACROBLOCK *x,
+ int const *labelings, int which_label,
+ B_PREDICTION_MODE this_mode,
+ int_mv *this_mv, int_mv *this_second_mv,
+ int_mv seg_mvs[MAX_REF_FRAMES - 1],
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int *mvcost[2]
+) {
+ MACROBLOCKD *const xd = & x->e_mbd;
+ MODE_INFO *const mic = xd->mode_info_context;
+ const int mis = xd->mode_info_stride;
+
+ int cost = 0;
+ int thismvcost = 0;
+
+ /* We have to be careful retrieving previously-encoded motion vectors.
+ Ones from this macroblock have to be pulled from the BLOCKD array
+ as they have not yet made it to the bmi array in our MB_MODE_INFO. */
+
+ int i = 0;
+
+ do {
+ BLOCKD *const d = xd->block + i;
+ const int row = i >> 2, col = i & 3;
+
+ B_PREDICTION_MODE m;
+
+ if (labelings[i] != which_label)
+ continue;
+
+ if (col && labelings[i] == labelings[i - 1])
+ m = LEFT4X4;
+ else if (row && labelings[i] == labelings[i - 4])
+ m = ABOVE4X4;
+ else {
+ // the only time we should do costing for new motion vector or mode
+ // is when we are on a new label (jbb May 08, 2007)
+ switch (m = this_mode) {
+ case NEW4X4 :
+ if (xd->mode_info_context->mbmi.second_ref_frame) {
+ this_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.ref_frame - 1].as_int;
+ this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
+ }
#if CONFIG_HIGH_PRECISION_MV
- thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
- 102, xd->allow_high_precision_mv);
- if (xd->mode_info_context->mbmi.second_ref_frame)
- {
- thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost,
- 102, xd->allow_high_precision_mv);
- }
+ thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
+ 102, xd->allow_high_precision_mv);
+ if (xd->mode_info_context->mbmi.second_ref_frame) {
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost,
+ 102, xd->allow_high_precision_mv);
+ }
#else
- thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
- if (xd->mode_info_context->mbmi.second_ref_frame)
- {
- thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost, 102);
- }
-#endif
- break;
- case LEFT4X4:
- this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
- if (xd->mode_info_context->mbmi.second_ref_frame)
- this_second_mv->as_int = col ? d[-1].bmi.as_mv.second.as_int : left_block_second_mv(mic, i);
- break;
- case ABOVE4X4:
- this_mv->as_int = row ? d[-4].bmi.as_mv.first.as_int : above_block_mv(mic, i, mis);
- if (xd->mode_info_context->mbmi.second_ref_frame)
- this_second_mv->as_int = row ? d[-4].bmi.as_mv.second.as_int : above_block_second_mv(mic, i, mis);
- break;
- case ZERO4X4:
- this_mv->as_int = 0;
- if (xd->mode_info_context->mbmi.second_ref_frame)
- this_second_mv->as_int = 0;
- break;
- default:
- break;
- }
+ thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
+ if (xd->mode_info_context->mbmi.second_ref_frame) {
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost, 102);
+ }
+#endif
+ break;
+ case LEFT4X4:
+ this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = col ? d[-1].bmi.as_mv.second.as_int : left_block_second_mv(mic, i);
+ break;
+ case ABOVE4X4:
+ this_mv->as_int = row ? d[-4].bmi.as_mv.first.as_int : above_block_mv(mic, i, mis);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = row ? d[-4].bmi.as_mv.second.as_int : above_block_second_mv(mic, i, mis);
+ break;
+ case ZERO4X4:
+ this_mv->as_int = 0;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = 0;
+ break;
+ default:
+ break;
+ }
- if (m == ABOVE4X4) // replace above with left if same
- {
- int_mv left_mv, left_second_mv;
+ if (m == ABOVE4X4) { // replace above with left if same
+ int_mv left_mv, left_second_mv;
- left_mv.as_int = col ? d[-1].bmi.as_mv.first.as_int :
- left_block_mv(mic, i);
- if (xd->mode_info_context->mbmi.second_ref_frame)
- left_second_mv.as_int = col ? d[-1].bmi.as_mv.second.as_int :
- left_block_second_mv(mic, i);
+ left_mv.as_int = col ? d[-1].bmi.as_mv.first.as_int :
+ left_block_mv(mic, i);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ left_second_mv.as_int = col ? d[-1].bmi.as_mv.second.as_int :
+ left_block_second_mv(mic, i);
- if (left_mv.as_int == this_mv->as_int &&
- (!xd->mode_info_context->mbmi.second_ref_frame ||
- left_second_mv.as_int == this_second_mv->as_int))
- m = LEFT4X4;
- }
+ if (left_mv.as_int == this_mv->as_int &&
+ (!xd->mode_info_context->mbmi.second_ref_frame ||
+ left_second_mv.as_int == this_second_mv->as_int))
+ m = LEFT4X4;
+ }
- cost = x->inter_bmode_costs[ m];
- }
+ cost = x->inter_bmode_costs[ m];
+ }
- d->bmi.as_mv.first.as_int = this_mv->as_int;
- if (xd->mode_info_context->mbmi.second_ref_frame)
- d->bmi.as_mv.second.as_int = this_second_mv->as_int;
+ d->bmi.as_mv.first.as_int = this_mv->as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ d->bmi.as_mv.second.as_int = this_second_mv->as_int;
- x->partition_info->bmi[i].mode = m;
- x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
- if (xd->mode_info_context->mbmi.second_ref_frame)
- x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
- }
- while (++i < 16);
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
+ } while (++i < 16);
- cost += thismvcost ;
- return cost;
+ cost += thismvcost;
+ return cost;
}
static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
int which_label, ENTROPY_CONTEXT *ta,
- ENTROPY_CONTEXT *tl)
-{
- int cost = 0;
- int b;
- MACROBLOCKD *x = &mb->e_mbd;
+ ENTROPY_CONTEXT *tl) {
+ int cost = 0;
+ int b;
+ MACROBLOCKD *x = &mb->e_mbd;
- for (b = 0; b < 16; b++)
- if (labels[ b] == which_label)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[b],
- tl + vp8_block2left[b]);
+ for (b = 0; b < 16; b++)
+ if (labels[ b] == which_label)
+ cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[b],
+ tl + vp8_block2left[b]);
- return cost;
+ return cost;
}
static unsigned int vp8_encode_inter_mb_segment(
- MACROBLOCK *x,
- int const *labels,
- int which_label,
- const VP8_ENCODER_RTCD *rtcd)
-{
- int i;
- unsigned int distortion = 0;
-
- for (i = 0; i < 16; i++)
- {
- if (labels[i] == which_label)
- {
- BLOCKD *bd = &x->e_mbd.block[i];
- BLOCK *be = &x->block[i];
- int thisdistortion;
-
- vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- vp8_build_2nd_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict_avg);
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
-
- // set to 0 no way to account for 2nd order DC so discount
- //be->coeff[0] = 0;
- x->quantize_b(be, bd);
- thisdistortion = ENCODEMB_INVOKE(&rtcd->encodemb,berr)(
- be->coeff,
- bd->dqcoeff)/4;
- distortion += thisdistortion;
- }
- }
- return distortion;
+ MACROBLOCK *x,
+ int const *labels,
+ int which_label,
+ const VP8_ENCODER_RTCD *rtcd) {
+ int i;
+ unsigned int distortion = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (labels[i] == which_label) {
+ BLOCKD *bd = &x->e_mbd.block[i];
+ BLOCK *be = &x->block[i];
+ int thisdistortion;
+
+ vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ vp8_build_2nd_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict_avg);
+ ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+
+ // set to 0 no way to account for 2nd order DC so discount
+ // be->coeff[0] = 0;
+ x->quantize_b(be, bd);
+ thisdistortion = ENCODEMB_INVOKE(&rtcd->encodemb, berr)(
+ be->coeff,
+ bd->dqcoeff) / 4;
+ distortion += thisdistortion;
+ }
+ }
+ return distortion;
}
static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
-typedef struct
-{
+typedef struct {
int_mv *ref_mv, *second_ref_mv;
int_mv mvp;
@@ -1772,312 +1650,288 @@ typedef struct
static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
BEST_SEG_INFO *bsi, unsigned int segmentation,
- int_mv seg_mvs[16 /* n_blocks */][MAX_REF_FRAMES - 1])
-{
- int i;
- int const *labels;
- int br = 0;
- int bd = 0;
- B_PREDICTION_MODE this_mode;
-
-
- int label_count;
- int this_segment_rd = 0;
- int label_mv_thresh;
- int rate = 0;
- int sbr = 0;
- int sbd = 0;
- int segmentyrate = 0;
-
- vp8_variance_fn_ptr_t *v_fn_ptr;
-
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
- ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
- ENTROPY_CONTEXT *ta_b;
- ENTROPY_CONTEXT *tl_b;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
- ta_b = (ENTROPY_CONTEXT *)&t_above_b;
- tl_b = (ENTROPY_CONTEXT *)&t_left_b;
-
- br = 0;
- bd = 0;
+ int_mv seg_mvs[16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
+ int i;
+ int const *labels;
+ int br = 0;
+ int bd = 0;
+ B_PREDICTION_MODE this_mode;
+
+
+ int label_count;
+ int this_segment_rd = 0;
+ int label_mv_thresh;
+ int rate = 0;
+ int sbr = 0;
+ int sbd = 0;
+ int segmentyrate = 0;
+
+ vp8_variance_fn_ptr_t *v_fn_ptr;
+
+ ENTROPY_CONTEXT_PLANES t_above, t_left;
+ ENTROPY_CONTEXT *ta;
+ ENTROPY_CONTEXT *tl;
+ ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
+ ENTROPY_CONTEXT *ta_b;
+ ENTROPY_CONTEXT *tl_b;
+
+ vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta = (ENTROPY_CONTEXT *)&t_above;
+ tl = (ENTROPY_CONTEXT *)&t_left;
+ ta_b = (ENTROPY_CONTEXT *)&t_above_b;
+ tl_b = (ENTROPY_CONTEXT *)&t_left_b;
+
+ br = 0;
+ bd = 0;
+
+ v_fn_ptr = &cpi->fn_ptr[segmentation];
+ labels = vp8_mbsplits[segmentation];
+ label_count = vp8_mbsplit_count[segmentation];
+
+ // 64 makes this threshold really big effectively
+ // making it so that we very rarely check mvs on
+ // segments. setting this to 1 would make mv thresh
+ // roughly equal to what it is for macroblocks
+ label_mv_thresh = 1 * bsi->mvthresh / label_count;
+
+ // Segmentation method overheads
+ rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
+ rate += vp8_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
+ this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
+ br += rate;
+
+ for (i = 0; i < label_count; i++) {
+ int_mv mode_mv[B_MODE_COUNT], second_mode_mv[B_MODE_COUNT];
+ int best_label_rd = INT_MAX;
+ B_PREDICTION_MODE mode_selected = ZERO4X4;
+ int bestlabelyrate = 0;
+
+ // search for the best motion vector on this segment
+ for (this_mode = LEFT4X4; this_mode <= NEW4X4; this_mode ++) {
+ int this_rd;
+ int distortion;
+ int labelyrate;
+ ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
+ ENTROPY_CONTEXT *ta_s;
+ ENTROPY_CONTEXT *tl_s;
+
+ vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ ta_s = (ENTROPY_CONTEXT *)&t_above_s;
+ tl_s = (ENTROPY_CONTEXT *)&t_left_s;
+
+ // motion search for newmv (single predictor case only)
+ if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4) {
+ int sseshift;
+ int num00;
+ int step_param = 0;
+ int further_steps;
+ int n;
+ int thissme;
+ int bestsme = INT_MAX;
+ int_mv temp_mv;
+ BLOCK *c;
+ BLOCKD *e;
+
+ // Is the best so far sufficiently good that we cant justify doing and new motion search.
+ if (best_label_rd < label_mv_thresh)
+ break;
+
+ if (cpi->compressor_speed) {
+ if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) {
+ bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
+ if (i == 1 && segmentation == BLOCK_16X8)
+ bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
+
+ step_param = bsi->sv_istep[i];
+ }
+
+ // use previous block's result as next block's MV predictor.
+ if (segmentation == BLOCK_4X4 && i > 0) {
+ bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.as_mv.first.as_int;
+ if (i == 4 || i == 8 || i == 12)
+ bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.as_mv.first.as_int;
+ step_param = 2;
+ }
+ }
+
+ further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+
+ {
+ int sadpb = x->sadperbit4;
+ int_mv mvp_full;
+
+ mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
+ mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
+
+ // find first label
+ n = vp8_mbsplit_offset[segmentation][i];
+
+ c = &x->block[n];
+ e = &x->e_mbd.block[n];
+
+ {
+ bestsme = cpi->diamond_search_sad(x, c, e, &mvp_full,
+ &mode_mv[NEW4X4], step_param,
+ sadpb, &num00, v_fn_ptr,
+ XMVCOST,
+ bsi->ref_mv);
+
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ thissme = cpi->diamond_search_sad(x, c, e,
+ &mvp_full, &temp_mv,
+ step_param + n, sadpb,
+ &num00, v_fn_ptr,
+ XMVCOST,
+ bsi->ref_mv);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEW4X4].as_int = temp_mv.as_int;
+ }
+ }
+ }
+ }
- v_fn_ptr = &cpi->fn_ptr[segmentation];
- labels = vp8_mbsplits[segmentation];
- label_count = vp8_mbsplit_count[segmentation];
+ sseshift = segmentation_to_sseshift[segmentation];
- // 64 makes this threshold really big effectively
- // making it so that we very rarely check mvs on
- // segments. setting this to 1 would make mv thresh
- // roughly equal to what it is for macroblocks
- label_mv_thresh = 1 * bsi->mvthresh / label_count ;
+ // Should we do a full search (best quality only)
+ if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
+ /* Check if mvp_full is within the range. */
+ vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
- // Segmentation method overheads
- rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
- rate += vp8_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
- this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
- br += rate;
+ thissme = cpi->full_search_sad(x, c, e, &mvp_full,
+ sadpb, 16, v_fn_ptr,
+ XMVCOST, bsi->ref_mv);
- for (i = 0; i < label_count; i++)
- {
- int_mv mode_mv[B_MODE_COUNT], second_mode_mv[B_MODE_COUNT];
- int best_label_rd = INT_MAX;
- B_PREDICTION_MODE mode_selected = ZERO4X4;
- int bestlabelyrate = 0;
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
+ } else {
+ // The full search result is actually worse so re-instate the previous best vector
+ e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
+ }
+ }
+ }
- // search for the best motion vector on this segment
- for (this_mode = LEFT4X4; this_mode <= NEW4X4 ; this_mode ++)
- {
- int this_rd;
- int distortion;
- int labelyrate;
- ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
- ENTROPY_CONTEXT *ta_s;
- ENTROPY_CONTEXT *tl_s;
-
- vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta_s = (ENTROPY_CONTEXT *)&t_above_s;
- tl_s = (ENTROPY_CONTEXT *)&t_left_s;
-
- // motion search for newmv (single predictor case only)
- if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4)
- {
- int sseshift;
- int num00;
- int step_param = 0;
- int further_steps;
- int n;
- int thissme;
- int bestsme = INT_MAX;
- int_mv temp_mv;
- BLOCK *c;
- BLOCKD *e;
-
- // Is the best so far sufficiently good that we cant justify doing and new motion search.
- if (best_label_rd < label_mv_thresh)
- break;
-
- if(cpi->compressor_speed)
- {
- if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8)
- {
- bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
- if (i==1 && segmentation == BLOCK_16X8)
- bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
-
- step_param = bsi->sv_istep[i];
- }
-
- // use previous block's result as next block's MV predictor.
- if (segmentation == BLOCK_4X4 && i>0)
- {
- bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.as_mv.first.as_int;
- if (i==4 || i==8 || i==12)
- bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.as_mv.first.as_int;
- step_param = 2;
- }
- }
+ if (bestsme < INT_MAX) {
+ int distortion;
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
+ bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
+ &distortion, &sse);
- further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
-
- {
- int sadpb = x->sadperbit4;
- int_mv mvp_full;
-
- mvp_full.as_mv.row = bsi->mvp.as_mv.row >>3;
- mvp_full.as_mv.col = bsi->mvp.as_mv.col >>3;
-
- // find first label
- n = vp8_mbsplit_offset[segmentation][i];
-
- c = &x->block[n];
- e = &x->e_mbd.block[n];
-
- {
- bestsme = cpi->diamond_search_sad(x, c, e, &mvp_full,
- &mode_mv[NEW4X4], step_param,
- sadpb, &num00, v_fn_ptr,
- XMVCOST,
- bsi->ref_mv);
-
- n = num00;
- num00 = 0;
-
- while (n < further_steps)
- {
- n++;
-
- if (num00)
- num00--;
- else
- {
- thissme = cpi->diamond_search_sad(x, c, e,
- &mvp_full, &temp_mv,
- step_param + n, sadpb,
- &num00, v_fn_ptr,
- XMVCOST,
- bsi->ref_mv);
-
- if (thissme < bestsme)
- {
- bestsme = thissme;
- mode_mv[NEW4X4].as_int = temp_mv.as_int;
- }
- }
- }
- }
-
- sseshift = segmentation_to_sseshift[segmentation];
-
- // Should we do a full search (best quality only)
- if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
- {
- /* Check if mvp_full is within the range. */
- vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
-
- thissme = cpi->full_search_sad(x, c, e, &mvp_full,
- sadpb, 16, v_fn_ptr,
- XMVCOST, bsi->ref_mv);
-
- if (thissme < bestsme)
- {
- bestsme = thissme;
- mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
- }
- else
- {
- // The full search result is actually worse so re-instate the previous best vector
- e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
- }
- }
- }
+ // safe motion search result for use in compound prediction
+ seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
+ }
+ } /* NEW4X4 */
+ else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4) {
+ // motion search not completed? Then skip newmv for this block with comppred
+ if (seg_mvs[i][x->e_mbd.mode_info_context->mbmi.second_ref_frame - 1].as_int == INVALID_MV ||
+ seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int == INVALID_MV) {
+ continue;
+ }
+ }
- if (bestsme < INT_MAX)
- {
- int distortion;
- unsigned int sse;
- cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
- bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
- &distortion, &sse);
+ rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
+ &second_mode_mv[this_mode], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
- // safe motion search result for use in compound prediction
- seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
- }
- } /* NEW4X4 */
- else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4)
- {
- // motion search not completed? Then skip newmv for this block with comppred
- if (seg_mvs[i][x->e_mbd.mode_info_context->mbmi.second_ref_frame - 1].as_int == INVALID_MV ||
- seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int == INVALID_MV)
- {
- continue;
- }
- }
+ // Trap vectors that reach beyond the UMV borders
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ if (((second_mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((second_mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((second_mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((second_mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+ }
- rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
- &second_mode_mv[this_mode], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
+ distortion = vp8_encode_inter_mb_segment(
+ x, labels, i,
+ IF_RTCD(&cpi->rtcd));
- // Trap vectors that reach beyond the UMV borders
- if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
- ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
- {
- continue;
- }
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- {
- if (((second_mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
- ((second_mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
- ((second_mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
- ((second_mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
- {
- continue;
- }
- }
+ labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
+ rate += labelyrate;
- distortion = vp8_encode_inter_mb_segment(
- x, labels, i,
- IF_RTCD(&cpi->rtcd));
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
- labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
- rate += labelyrate;
+ if (this_rd < best_label_rd) {
+ sbr = rate;
+ sbd = distortion;
+ bestlabelyrate = labelyrate;
+ mode_selected = this_mode;
+ best_label_rd = this_rd;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
- if (this_rd < best_label_rd)
- {
- sbr = rate;
- sbd = distortion;
- bestlabelyrate = labelyrate;
- mode_selected = this_mode;
- best_label_rd = this_rd;
+ }
+ } /*for each 4x4 mode*/
- vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
- }
- } /*for each 4x4 mode*/
+ labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
+ &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
- vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
+ br += sbr;
+ bd += sbd;
+ segmentyrate += bestlabelyrate;
+ this_segment_rd += best_label_rd;
- labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
- &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
-
- br += sbr;
- bd += sbd;
- segmentyrate += bestlabelyrate;
- this_segment_rd += best_label_rd;
+ if (this_segment_rd >= bsi->segment_rd) {
+ break;
+ }
- if (this_segment_rd >= bsi->segment_rd) {
- break;
- }
+ } /* for each label */
- } /* for each label */
+ if (this_segment_rd < bsi->segment_rd) {
+ bsi->r = br;
+ bsi->d = bd;
+ bsi->segment_yrate = segmentyrate;
+ bsi->segment_rd = this_segment_rd;
+ bsi->segment_num = segmentation;
- if (this_segment_rd < bsi->segment_rd)
- {
- bsi->r = br;
- bsi->d = bd;
- bsi->segment_yrate = segmentyrate;
- bsi->segment_rd = this_segment_rd;
- bsi->segment_num = segmentation;
-
- // store everything needed to come back to this!!
- for (i = 0; i < 16; i++)
- {
- BLOCKD *bd = &x->e_mbd.block[i];
+ // store everything needed to come back to this!!
+ for (i = 0; i < 16; i++) {
+ BLOCKD *bd = &x->e_mbd.block[i];
- bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
- bsi->modes[i] = x->partition_info->bmi[i].mode;
- bsi->eobs[i] = bd->eob;
- }
+ bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
+ bsi->eobs[i] = bd->eob;
}
+ }
}
static __inline
-void vp8_cal_step_param(int sr, int *sp)
-{
- int step = 0;
+void vp8_cal_step_param(int sr, int *sp) {
+ int step = 0;
- if (sr > MAX_FIRST_STEP) sr = MAX_FIRST_STEP;
- else if (sr < 1) sr = 1;
+ if (sr > MAX_FIRST_STEP) sr = MAX_FIRST_STEP;
+ else if (sr < 1) sr = 1;
- while (sr>>=1)
- step++;
+ while (sr >>= 1)
+ step++;
- *sp = MAX_MVSEARCH_STEPS - 1 - step;
+ *sp = MAX_MVSEARCH_STEPS - 1 - step;
}
static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
@@ -2085,1085 +1939,969 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
int *mdcounts, int *returntotrate,
int *returnyrate, int *returndistortion,
int mvthresh,
- int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1])
-{
- int i;
- BEST_SEG_INFO bsi;
-
- vpx_memset(&bsi, 0, sizeof(bsi));
+ int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
+ int i;
+ BEST_SEG_INFO bsi;
+
+ vpx_memset(&bsi, 0, sizeof(bsi));
+
+ bsi.segment_rd = best_rd;
+ bsi.ref_mv = best_ref_mv;
+ bsi.second_ref_mv = second_best_ref_mv;
+ bsi.mvp.as_int = best_ref_mv->as_int;
+ bsi.mvthresh = mvthresh;
+ bsi.mdcounts = mdcounts;
+
+ for (i = 0; i < 16; i++) {
+ bsi.modes[i] = ZERO4X4;
+ }
+
+ if (cpi->compressor_speed == 0) {
+ /* for now, we will keep the original segmentation order
+ when in best quality mode */
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
+ } else {
+ int sr;
+
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
+
+
+ if (bsi.segment_rd < best_rd) {
+ int col_min = (best_ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL + ((best_ref_mv->as_mv.col & 7) ? 1 : 0);
+ int row_min = (best_ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL + ((best_ref_mv->as_mv.row & 7) ? 1 : 0);
+ int col_max = (best_ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (best_ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
+ if (x->mv_col_min < col_min)
+ x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max)
+ x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min)
+ x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max)
+ x->mv_row_max = row_max;
+
+ /* Get 8x8 result */
+ bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
+ bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
+ bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
+ bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
+
+ /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
+ /* block 8X16 */
+ {
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+
+ sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- bsi.segment_rd = best_rd;
- bsi.ref_mv = best_ref_mv;
- bsi.second_ref_mv = second_best_ref_mv;
- bsi.mvp.as_int = best_ref_mv->as_int;
- bsi.mvthresh = mvthresh;
- bsi.mdcounts = mdcounts;
-
- for(i = 0; i < 16; i++)
- {
- bsi.modes[i] = ZERO4X4;
- }
-
- if(cpi->compressor_speed == 0)
- {
- /* for now, we will keep the original segmentation order
- when in best quality mode */
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
- }
- else
- {
- int sr;
-
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
-
-
- if (bsi.segment_rd < best_rd)
- {
- int col_min = (best_ref_mv->as_mv.col>>3) - MAX_FULL_PEL_VAL + ((best_ref_mv->as_mv.col & 7)?1:0);
- int row_min = (best_ref_mv->as_mv.row>>3) - MAX_FULL_PEL_VAL + ((best_ref_mv->as_mv.row & 7)?1:0);
- int col_max = (best_ref_mv->as_mv.col>>3) + MAX_FULL_PEL_VAL;
- int row_max = (best_ref_mv->as_mv.row>>3) + MAX_FULL_PEL_VAL;
-
- int tmp_col_min = x->mv_col_min;
- int tmp_col_max = x->mv_col_max;
- int tmp_row_min = x->mv_row_min;
- int tmp_row_max = x->mv_row_max;
-
- /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
- if (x->mv_col_min < col_min )
- x->mv_col_min = col_min;
- if (x->mv_col_max > col_max )
- x->mv_col_max = col_max;
- if (x->mv_row_min < row_min )
- x->mv_row_min = row_min;
- if (x->mv_row_max > row_max )
- x->mv_row_max = row_max;
-
- /* Get 8x8 result */
- bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
- bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
- bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
- bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
-
- /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
- /* block 8X16 */
- {
- sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col))>>3);
- vp8_cal_step_param(sr, &bsi.sv_istep[0]);
-
- sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
- vp8_cal_step_param(sr, &bsi.sv_istep[1]);
-
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
- }
+ }
- /* block 16X8 */
- {
- sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col))>>3);
- vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+ /* block 16X8 */
+ {
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
- vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+ sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
- }
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
+ }
- /* If 8x8 is better than 16x8/8x16, then do 4x4 search */
- /* Not skip 4x4 if speed=0 (good quality) */
- if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
- {
- bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
- }
+ /* If 8x8 is better than 16x8/8x16, then do 4x4 search */
+ /* Not skip 4x4 if speed=0 (good quality) */
+ if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) { /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
+ bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
+ }
- /* restore UMV window */
- x->mv_col_min = tmp_col_min;
- x->mv_col_max = tmp_col_max;
- x->mv_row_min = tmp_row_min;
- x->mv_row_max = tmp_row_max;
- }
+ /* restore UMV window */
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
}
+ }
- /* set it to the best */
- for (i = 0; i < 16; i++)
- {
- BLOCKD *bd = &x->e_mbd.block[i];
+ /* set it to the best */
+ for (i = 0; i < 16; i++) {
+ BLOCKD *bd = &x->e_mbd.block[i];
- bd->bmi.as_mv.first.as_int = bsi.mvs[i].as_int;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- bd->bmi.as_mv.second.as_int = bsi.second_mvs[i].as_int;
- bd->eob = bsi.eobs[i];
- }
+ bd->bmi.as_mv.first.as_int = bsi.mvs[i].as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ bd->bmi.as_mv.second.as_int = bsi.second_mvs[i].as_int;
+ bd->eob = bsi.eobs[i];
+ }
- *returntotrate = bsi.r;
- *returndistortion = bsi.d;
- *returnyrate = bsi.segment_yrate;
+ *returntotrate = bsi.r;
+ *returndistortion = bsi.d;
+ *returnyrate = bsi.segment_yrate;
- /* save partitions */
- x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
- x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
+ /* save partitions */
+ x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
+ x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
- for (i = 0; i < x->partition_info->count; i++)
- {
- int j;
+ for (i = 0; i < x->partition_info->count; i++) {
+ int j;
- j = vp8_mbsplit_offset[bsi.segment_num][i];
+ j = vp8_mbsplit_offset[bsi.segment_num][i];
- x->partition_info->bmi[i].mode = bsi.modes[j];
- x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[j].as_mv;
- }
- /*
- * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
- */
- x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
+ x->partition_info->bmi[i].mode = bsi.modes[j];
+ x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- x->partition_info->bmi[15].second_mv.as_int = bsi.second_mvs[15].as_int;
-
- return bsi.segment_rd;
+ x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[j].as_mv;
+ }
+ /*
+ * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+ */
+ x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[15].second_mv.as_int = bsi.second_mvs[15].as_int;
+
+ return bsi.segment_rd;
}
/* Order arr in increasing order, original position stored in idx */
-static void insertsortmv(int arr[], int len)
-{
- int i, j, k;
+static void insertsortmv(int arr[], int len) {
+ int i, j, k;
- for ( i = 1 ; i <= len-1 ; i++ )
- {
- for ( j = 0 ; j < i ; j++ )
- {
- if ( arr[j] > arr[i] )
- {
- int temp;
+ for (i = 1; i <= len - 1; i++) {
+ for (j = 0; j < i; j++) {
+ if (arr[j] > arr[i]) {
+ int temp;
- temp = arr[i];
+ temp = arr[i];
- for ( k = i; k >j; k--)
- arr[k] = arr[k - 1] ;
+ for (k = i; k > j; k--)
+ arr[k] = arr[k - 1];
- arr[j] = temp ;
- }
- }
+ arr[j] = temp;
+ }
}
+ }
}
-static void insertsortsad(int arr[],int idx[], int len)
-{
- int i, j, k;
+static void insertsortsad(int arr[], int idx[], int len) {
+ int i, j, k;
- for ( i = 1 ; i <= len-1 ; i++ )
- {
- for ( j = 0 ; j < i ; j++ )
- {
- if ( arr[j] > arr[i] )
- {
- int temp, tempi;
-
- temp = arr[i];
- tempi = idx[i];
+ for (i = 1; i <= len - 1; i++) {
+ for (j = 0; j < i; j++) {
+ if (arr[j] > arr[i]) {
+ int temp, tempi;
- for ( k = i; k >j; k--)
- {
- arr[k] = arr[k - 1] ;
- idx[k] = idx[k - 1];
- }
+ temp = arr[i];
+ tempi = idx[i];
- arr[j] = temp ;
- idx[j] = tempi;
- }
+ for (k = i; k > j; k--) {
+ arr[k] = arr[k - 1];
+ idx[k] = idx[k - 1];
}
+
+ arr[j] = temp;
+ idx[j] = tempi;
+ }
}
+ }
}
-//The improved MV prediction
+// The improved MV prediction
void vp8_mv_pred
(
- VP8_COMP *cpi,
- MACROBLOCKD *xd,
- const MODE_INFO *here,
- int_mv *mvp,
- int refframe,
- int *ref_frame_sign_bias,
- int *sr,
- int near_sadidx[]
-)
-{
- const MODE_INFO *above = here - xd->mode_info_stride;
- const MODE_INFO *left = here - 1;
- const MODE_INFO *aboveleft = above - 1;
- int_mv near_mvs[8];
- int near_ref[8];
- int_mv mv;
- int vcnt=0;
- int find=0;
- int mb_offset;
-
- int mvx[8];
- int mvy[8];
- int i;
-
- mv.as_int = 0;
-
- if(here->mbmi.ref_frame != INTRA_FRAME)
- {
- near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0;
- near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0;
-
- // read in 3 nearby block's MVs from current frame as prediction candidates.
- if (above->mbmi.ref_frame != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
- mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = above->mbmi.ref_frame;
- }
- vcnt++;
- if (left->mbmi.ref_frame != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
- mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = left->mbmi.ref_frame;
- }
- vcnt++;
- if (aboveleft->mbmi.ref_frame != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
- mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = aboveleft->mbmi.ref_frame;
- }
- vcnt++;
-
- // read in 5 nearby block's MVs from last frame.
- if(cpi->common.last_frame_type != KEY_FRAME)
- {
- mb_offset = (-xd->mb_to_top_edge/128 + 1) * (xd->mode_info_stride +1) + (-xd->mb_to_left_edge/128 +1) ;
-
- // current in last frame
- if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
- mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = cpi->lf_ref_frame[mb_offset];
- }
- vcnt++;
-
- // above in last frame
- if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1] != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride-1].as_int;
- mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride-1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1];
- }
- vcnt++;
-
- // left in last frame
- if (cpi->lf_ref_frame[mb_offset-1] != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = cpi->lfmv[mb_offset -1].as_int;
- mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset -1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1];
- }
- vcnt++;
-
- // right in last frame
- if (cpi->lf_ref_frame[mb_offset +1] != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = cpi->lfmv[mb_offset +1].as_int;
- mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = cpi->lf_ref_frame[mb_offset +1];
- }
- vcnt++;
-
- // below in last frame
- if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1] != INTRA_FRAME)
- {
- near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride +1].as_int;
- mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
- near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1];
- }
- vcnt++;
- }
-
- for(i=0; i< vcnt; i++)
- {
- if(near_ref[near_sadidx[i]] != INTRA_FRAME)
- {
- if(here->mbmi.ref_frame == near_ref[near_sadidx[i]])
- {
- mv.as_int = near_mvs[near_sadidx[i]].as_int;
- find = 1;
- if (i < 3)
- *sr = 3;
- else
- *sr = 2;
- break;
- }
- }
- }
-
- if(!find)
- {
- for(i=0; i<vcnt; i++)
- {
- mvx[i] = near_mvs[i].as_mv.row;
- mvy[i] = near_mvs[i].as_mv.col;
- }
-
- insertsortmv(mvx, vcnt);
- insertsortmv(mvy, vcnt);
- mv.as_mv.row = mvx[vcnt/2];
- mv.as_mv.col = mvy[vcnt/2];
-
- find = 1;
- //sr is set to 0 to allow calling function to decide the search range.
- *sr = 0;
- }
- }
-
- /* Set up return values */
- mvp->as_int = mv.as_int;
- vp8_clamp_mv2(mvp, xd);
+ VP8_COMP *cpi,
+ MACROBLOCKD *xd,
+ const MODE_INFO *here,
+ int_mv *mvp,
+ int refframe,
+ int *ref_frame_sign_bias,
+ int *sr,
+ int near_sadidx[]
+) {
+ const MODE_INFO *above = here - xd->mode_info_stride;
+ const MODE_INFO *left = here - 1;
+ const MODE_INFO *aboveleft = above - 1;
+ int_mv near_mvs[8];
+ int near_ref[8];
+ int_mv mv;
+ int vcnt = 0;
+ int find = 0;
+ int mb_offset;
+
+ int mvx[8];
+ int mvy[8];
+ int i;
+
+ mv.as_int = 0;
+
+ if (here->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0;
+ near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0;
+
+ // read in 3 nearby block's MVs from current frame as prediction candidates.
+ if (above->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = above->mbmi.ref_frame;
+ }
+ vcnt++;
+ if (left->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = left->mbmi.ref_frame;
+ }
+ vcnt++;
+ if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
+ mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = aboveleft->mbmi.ref_frame;
+ }
+ vcnt++;
+
+ // read in 5 nearby block's MVs from last frame.
+ if (cpi->common.last_frame_type != KEY_FRAME) {
+ mb_offset = (-xd->mb_to_top_edge / 128 + 1) * (xd->mode_info_stride + 1) + (-xd->mb_to_left_edge / 128 + 1);
+
+ // current in last frame
+ if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset];
+ }
+ vcnt++;
+
+ // above in last frame
+ if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride - 1].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride - 1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1];
+ }
+ vcnt++;
+
+ // left in last frame
+ if (cpi->lf_ref_frame[mb_offset - 1] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - 1].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - 1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1];
+ }
+ vcnt++;
+
+ // right in last frame
+ if (cpi->lf_ref_frame[mb_offset + 1] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + 1].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + 1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + 1];
+ }
+ vcnt++;
+
+ // below in last frame
+ if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1] != INTRA_FRAME) {
+ near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride + 1].as_int;
+ mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride + 1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
+ near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1];
+ }
+ vcnt++;
+ }
+
+ for (i = 0; i < vcnt; i++) {
+ if (near_ref[near_sadidx[i]] != INTRA_FRAME) {
+ if (here->mbmi.ref_frame == near_ref[near_sadidx[i]]) {
+ mv.as_int = near_mvs[near_sadidx[i]].as_int;
+ find = 1;
+ if (i < 3)
+ *sr = 3;
+ else
+ *sr = 2;
+ break;
+ }
+ }
+ }
+
+ if (!find) {
+ for (i = 0; i < vcnt; i++) {
+ mvx[i] = near_mvs[i].as_mv.row;
+ mvy[i] = near_mvs[i].as_mv.col;
+ }
+
+ insertsortmv(mvx, vcnt);
+ insertsortmv(mvy, vcnt);
+ mv.as_mv.row = mvx[vcnt / 2];
+ mv.as_mv.col = mvy[vcnt / 2];
+
+ find = 1;
+ // sr is set to 0 to allow calling function to decide the search range.
+ *sr = 0;
+ }
+ }
+
+ /* Set up return values */
+ mvp->as_int = mv.as_int;
+ vp8_clamp_mv2(mvp, xd);
}
-void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[])
-{
-
- int near_sad[8] = {0}; // 0-cf above, 1-cf left, 2-cf aboveleft, 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
- BLOCK *b = &x->block[0];
- unsigned char *src_y_ptr = *(b->base_src);
-
- //calculate sad for current frame 3 nearby MBs.
- if( xd->mb_to_top_edge==0 && xd->mb_to_left_edge ==0)
- {
- near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
- }else if(xd->mb_to_top_edge==0)
- { //only has left MB for sad calculation.
- near_sad[0] = near_sad[2] = INT_MAX;
- near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
- }else if(xd->mb_to_left_edge ==0)
- { //only has left MB for sad calculation.
- near_sad[1] = near_sad[2] = INT_MAX;
- near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
- }else
- {
- near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
- near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
- near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16 -16,xd->dst.y_stride, 0x7fffffff);
- }
-
- if(cpi->common.last_frame_type != KEY_FRAME)
- {
- //calculate sad for last frame 5 nearby MBs.
- unsigned char *pre_y_buffer = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
- int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
-
- if(xd->mb_to_top_edge==0) near_sad[4] = INT_MAX;
- if(xd->mb_to_left_edge ==0) near_sad[5] = INT_MAX;
- if(xd->mb_to_right_edge ==0) near_sad[6] = INT_MAX;
- if(xd->mb_to_bottom_edge==0) near_sad[7] = INT_MAX;
-
- if(near_sad[4] != INT_MAX)
- near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride *16, pre_y_stride, 0x7fffffff);
- if(near_sad[5] != INT_MAX)
- near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride, 0x7fffffff);
- near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer, pre_y_stride, 0x7fffffff);
- if(near_sad[6] != INT_MAX)
- near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride, 0x7fffffff);
- if(near_sad[7] != INT_MAX)
- near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride *16, pre_y_stride, 0x7fffffff);
- }
-
- if(cpi->common.last_frame_type != KEY_FRAME)
- {
- insertsortsad(near_sad, near_sadidx, 8);
- }else
- {
- insertsortsad(near_sad, near_sadidx, 3);
- }
+void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[]) {
+
+ int near_sad[8] = {0}; // 0-cf above, 1-cf left, 2-cf aboveleft, 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
+ BLOCK *b = &x->block[0];
+ unsigned char *src_y_ptr = *(b->base_src);
+
+ // calculate sad for current frame 3 nearby MBs.
+ if (xd->mb_to_top_edge == 0 && xd->mb_to_left_edge == 0) {
+ near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
+ } else if (xd->mb_to_top_edge == 0) {
+ // only has left MB for sad calculation.
+ near_sad[0] = near_sad[2] = INT_MAX;
+ near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride, 0x7fffffff);
+ } else if (xd->mb_to_left_edge == 0) {
+ // only has left MB for sad calculation.
+ near_sad[1] = near_sad[2] = INT_MAX;
+ near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16, xd->dst.y_stride, 0x7fffffff);
+ } else {
+ near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16, xd->dst.y_stride, 0x7fffffff);
+ near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride, 0x7fffffff);
+ near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16 - 16, xd->dst.y_stride, 0x7fffffff);
+ }
+
+ if (cpi->common.last_frame_type != KEY_FRAME) {
+ // calculate sad for last frame 5 nearby MBs.
+ unsigned char *pre_y_buffer = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
+ int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
+
+ if (xd->mb_to_top_edge == 0) near_sad[4] = INT_MAX;
+ if (xd->mb_to_left_edge == 0) near_sad[5] = INT_MAX;
+ if (xd->mb_to_right_edge == 0) near_sad[6] = INT_MAX;
+ if (xd->mb_to_bottom_edge == 0) near_sad[7] = INT_MAX;
+
+ if (near_sad[4] != INT_MAX)
+ near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride * 16, pre_y_stride, 0x7fffffff);
+ if (near_sad[5] != INT_MAX)
+ near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride, 0x7fffffff);
+ near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer, pre_y_stride, 0x7fffffff);
+ if (near_sad[6] != INT_MAX)
+ near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride, 0x7fffffff);
+ if (near_sad[7] != INT_MAX)
+ near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride * 16, pre_y_stride, 0x7fffffff);
+ }
+
+ if (cpi->common.last_frame_type != KEY_FRAME) {
+ insertsortsad(near_sad, near_sadidx, 8);
+ } else {
+ insertsortsad(near_sad, near_sadidx, 3);
+ }
}
void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int_mv *second_best_ref_mv)
-{
- if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
- {
- int i;
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
+ if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) {
+ int i;
- for (i = 0; i < x->partition_info->count; i++)
- {
- if (x->partition_info->bmi[i].mode == NEW4X4)
- {
+ for (i = 0; i < x->partition_info->count; i++) {
+ if (x->partition_info->bmi[i].mode == NEW4X4) {
#if CONFIG_HIGH_PRECISION_MV
- if (x->e_mbd.allow_high_precision_mv)
- {
- cpi->MVcount_hp[0][mv_max_hp+(x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp+(x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col)]++;
+ if (x->e_mbd.allow_high_precision_mv) {
+ cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row)]++;
+ cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col)]++;
#if CONFIG_ADAPTIVE_ENTROPY
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- {
- cpi->MVcount_hp[0][mv_max_hp+(x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp+(x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col)]++;
- }
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row)]++;
+ cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col)]++;
+ }
#endif
- }
- else
+ } else
#endif
- {
- cpi->MVcount[0][mv_max+((x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col) >> 1)]++;
+ {
+ cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
#if CONFIG_ADAPTIVE_ENTROPY
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- {
- cpi->MVcount[0][mv_max+((x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col) >> 1)]++;
- }
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col) >> 1)]++;
+ }
#endif
- }
- }
}
+ }
}
- else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
- {
+ } else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) {
#if CONFIG_HIGH_PRECISION_MV
- if (x->e_mbd.allow_high_precision_mv)
- {
- cpi->MVcount_hp[0][mv_max_hp+(x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
- - best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp+(x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
- - best_ref_mv->as_mv.col)]++;
+ if (x->e_mbd.allow_high_precision_mv) {
+ cpi->MVcount_hp[0][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ - best_ref_mv->as_mv.row)]++;
+ cpi->MVcount_hp[1][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ - best_ref_mv->as_mv.col)]++;
#if CONFIG_ADAPTIVE_ENTROPY
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- {
- cpi->MVcount_hp[0][mv_max_hp+(x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp+(x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col)]++;
- }
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ cpi->MVcount_hp[0][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row)]++;
+ cpi->MVcount_hp[1][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col)]++;
+ }
#endif
- }
- else
+ } else
#endif
- {
- cpi->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
- - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
- - best_ref_mv->as_mv.col) >> 1)]++;
+ {
+ cpi->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
#if CONFIG_ADAPTIVE_ENTROPY
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- {
- cpi->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col) >> 1)]++;
- }
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ cpi->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col) >> 1)]++;
+ }
#endif
- }
}
+ }
}
-static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4])
-{
- int i;
- MACROBLOCKD *xd = &x->e_mbd;
- for(i=0;i<4;i++)
- {
- int ib = vp8_i8x8_block[i];
- x->e_mbd.mode_info_context->bmi[ib+0].as_mode.first= modes[0][i];
- x->e_mbd.mode_info_context->bmi[ib+1].as_mode.first= modes[0][i];
- x->e_mbd.mode_info_context->bmi[ib+4].as_mode.first= modes[0][i];
- x->e_mbd.mode_info_context->bmi[ib+5].as_mode.first= modes[0][i];
+static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4]) {
+ int i;
+ MACROBLOCKD *xd = &x->e_mbd;
+ for (i = 0; i < 4; i++) {
+ int ib = vp8_i8x8_block[i];
+ x->e_mbd.mode_info_context->bmi[ib + 0].as_mode.first = modes[0][i];
+ x->e_mbd.mode_info_context->bmi[ib + 1].as_mode.first = modes[0][i];
+ x->e_mbd.mode_info_context->bmi[ib + 4].as_mode.first = modes[0][i];
+ x->e_mbd.mode_info_context->bmi[ib + 5].as_mode.first = modes[0][i];
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->bmi[ib+0].as_mode.second= modes[1][i];
- x->e_mbd.mode_info_context->bmi[ib+1].as_mode.second= modes[1][i];
- x->e_mbd.mode_info_context->bmi[ib+4].as_mode.second= modes[1][i];
- x->e_mbd.mode_info_context->bmi[ib+5].as_mode.second= modes[1][i];
-#endif
- //printf("%d,%d,%d,%d %d,%d,%d,%d\n",
- // modes[0][0], modes[0][1], modes[0][2], modes[0][3],
- // modes[1][0], modes[1][1], modes[1][2], modes[1][3]);
- }
-
- for (i = 0; i < 16; i++)
- {
- xd->block[i].bmi = xd->mode_info_context->bmi[i];
- }
+ x->e_mbd.mode_info_context->bmi[ib + 0].as_mode.second = modes[1][i];
+ x->e_mbd.mode_info_context->bmi[ib + 1].as_mode.second = modes[1][i];
+ x->e_mbd.mode_info_context->bmi[ib + 4].as_mode.second = modes[1][i];
+ x->e_mbd.mode_info_context->bmi[ib + 5].as_mode.second = modes[1][i];
+#endif
+ // printf("%d,%d,%d,%d %d,%d,%d,%d\n",
+ // modes[0][0], modes[0][1], modes[0][2], modes[0][3],
+ // modes[1][0], modes[1][1], modes[1][2], modes[1][3]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ xd->block[i].bmi = xd->mode_info_context->bmi[i];
+ }
}
-extern void calc_ref_probs( int * count, vp8_prob * probs );
-static void estimate_curframe_refprobs(VP8_COMP *cpi, vp8_prob mod_refprobs[3], int pred_ref)
-{
- int norm_cnt[MAX_REF_FRAMES];
- const int *const rfct = cpi->count_mb_ref_frame_usage;
- int intra_count = rfct[INTRA_FRAME];
- int last_count = rfct[LAST_FRAME];
- int gf_count = rfct[GOLDEN_FRAME];
- int arf_count = rfct[ALTREF_FRAME];
-
- // Work out modified reference frame probabilities to use where prediction
- // of the reference frame fails
- if (pred_ref == INTRA_FRAME)
- {
- norm_cnt[0] = 0;
- norm_cnt[1] = last_count;
- norm_cnt[2] = gf_count;
- norm_cnt[3] = arf_count;
- calc_ref_probs( norm_cnt, mod_refprobs );
- mod_refprobs[0] = 0; // This branch implicit
- }
- else if (pred_ref == LAST_FRAME)
- {
- norm_cnt[0] = intra_count;
- norm_cnt[1] = 0;
- norm_cnt[2] = gf_count;
- norm_cnt[3] = arf_count;
- calc_ref_probs( norm_cnt, mod_refprobs);
- mod_refprobs[1] = 0; // This branch implicit
- }
- else if (pred_ref == GOLDEN_FRAME)
- {
- norm_cnt[0] = intra_count;
- norm_cnt[1] = last_count;
- norm_cnt[2] = 0;
- norm_cnt[3] = arf_count;
- calc_ref_probs( norm_cnt, mod_refprobs );
- mod_refprobs[2] = 0; // This branch implicit
- }
- else
- {
- norm_cnt[0] = intra_count;
- norm_cnt[1] = last_count;
- norm_cnt[2] = gf_count;
- norm_cnt[3] = 0;
- calc_ref_probs( norm_cnt, mod_refprobs );
- mod_refprobs[2] = 0; // This branch implicit
- }
+extern void calc_ref_probs(int *count, vp8_prob *probs);
+static void estimate_curframe_refprobs(VP8_COMP *cpi, vp8_prob mod_refprobs[3], int pred_ref) {
+ int norm_cnt[MAX_REF_FRAMES];
+ const int *const rfct = cpi->count_mb_ref_frame_usage;
+ int intra_count = rfct[INTRA_FRAME];
+ int last_count = rfct[LAST_FRAME];
+ int gf_count = rfct[GOLDEN_FRAME];
+ int arf_count = rfct[ALTREF_FRAME];
+
+ // Work out modified reference frame probabilities to use where prediction
+ // of the reference frame fails
+ if (pred_ref == INTRA_FRAME) {
+ norm_cnt[0] = 0;
+ norm_cnt[1] = last_count;
+ norm_cnt[2] = gf_count;
+ norm_cnt[3] = arf_count;
+ calc_ref_probs(norm_cnt, mod_refprobs);
+ mod_refprobs[0] = 0; // This branch implicit
+ } else if (pred_ref == LAST_FRAME) {
+ norm_cnt[0] = intra_count;
+ norm_cnt[1] = 0;
+ norm_cnt[2] = gf_count;
+ norm_cnt[3] = arf_count;
+ calc_ref_probs(norm_cnt, mod_refprobs);
+ mod_refprobs[1] = 0; // This branch implicit
+ } else if (pred_ref == GOLDEN_FRAME) {
+ norm_cnt[0] = intra_count;
+ norm_cnt[1] = last_count;
+ norm_cnt[2] = 0;
+ norm_cnt[3] = arf_count;
+ calc_ref_probs(norm_cnt, mod_refprobs);
+ mod_refprobs[2] = 0; // This branch implicit
+ } else {
+ norm_cnt[0] = intra_count;
+ norm_cnt[1] = last_count;
+ norm_cnt[2] = gf_count;
+ norm_cnt[3] = 0;
+ calc_ref_probs(norm_cnt, mod_refprobs);
+ mod_refprobs[2] = 0; // This branch implicit
+ }
}
-static __inline unsigned weighted_cost(vp8_prob *tab0, vp8_prob *tab1, int idx, int val, int weight)
-{
- unsigned cost0 = tab0[idx] ? vp8_cost_bit(tab0[idx], val) : 0;
- unsigned cost1 = tab1[idx] ? vp8_cost_bit(tab1[idx], val) : 0;
- // weight is 16-bit fixed point, so this basically calculates:
- // 0.5 + weight * cost1 + (1.0 - weight) * cost0
- return (0x8000 + weight * cost1 + (0x10000 - weight) * cost0) >> 16;
+static __inline unsigned weighted_cost(vp8_prob *tab0, vp8_prob *tab1, int idx, int val, int weight) {
+ unsigned cost0 = tab0[idx] ? vp8_cost_bit(tab0[idx], val) : 0;
+ unsigned cost1 = tab1[idx] ? vp8_cost_bit(tab1[idx], val) : 0;
+ // weight is 16-bit fixed point, so this basically calculates:
+ // 0.5 + weight * cost1 + (1.0 - weight) * cost0
+ return (0x8000 + weight * cost1 + (0x10000 - weight) * cost0) >> 16;
}
-static void vp8_estimate_ref_frame_costs(VP8_COMP *cpi, int segment_id, unsigned int * ref_costs )
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- vp8_prob * mod_refprobs;
-
- unsigned int cost;
- int pred_ref ;
- int pred_flag;
- int pred_ctx ;
- int i;
- int tot_count;
-
- vp8_prob pred_prob, new_pred_prob;
- int seg_ref_active;
- int seg_ref_count = 0;
- seg_ref_active = segfeature_active( xd,
- segment_id,
- SEG_LVL_REF_FRAME );
-
- if ( seg_ref_active )
- {
- seg_ref_count = check_segref( xd, segment_id, INTRA_FRAME ) +
- check_segref( xd, segment_id, LAST_FRAME ) +
- check_segref( xd, segment_id, GOLDEN_FRAME ) +
- check_segref( xd, segment_id, ALTREF_FRAME );
- }
-
- // Get the predicted reference for this mb
- pred_ref = get_pred_ref( cm, xd );
-
- // Get the context probability for the prediction flag (based on last frame)
- pred_prob = get_pred_prob( cm, xd, PRED_REF );
-
- // Predict probability for current frame based on stats so far
- pred_ctx = get_pred_context(cm, xd, PRED_REF);
- tot_count = cpi->ref_pred_count[pred_ctx][0] + cpi->ref_pred_count[pred_ctx][1];
- if ( tot_count )
- {
- new_pred_prob =
- ( cpi->ref_pred_count[pred_ctx][0] * 255 + (tot_count >> 1)) / tot_count;
- new_pred_prob += !new_pred_prob;
- }
- else
- new_pred_prob = 128;
-
- // Get the set of probabilities to use if prediction fails
- mod_refprobs = cm->mod_refprobs[pred_ref];
-
- // For each possible selected reference frame work out a cost.
- for ( i = 0; i < MAX_REF_FRAMES; i++ )
- {
- if (seg_ref_active && seg_ref_count == 1)
- {
- cost = 0;
- }
- else
- {
- pred_flag = (i == pred_ref);
-
- // Get the prediction for the current mb
- cost = weighted_cost(&pred_prob, &new_pred_prob, 0,
- pred_flag, cpi->seg0_progress);
- if (cost > 1024) cost = 768; // i.e. account for 4 bits max.
-
- // for incorrectly predicted cases
- if ( ! pred_flag )
- {
- vp8_prob curframe_mod_refprobs[3];
-
- if (cpi->seg0_progress)
- {
- estimate_curframe_refprobs(cpi, curframe_mod_refprobs, pred_ref);
- }
- else
- {
- vpx_memset(curframe_mod_refprobs, 0, sizeof(curframe_mod_refprobs));
- }
-
- cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 0,
- (i != INTRA_FRAME), cpi->seg0_progress);
- if (i != INTRA_FRAME)
- {
- cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 1,
- (i != LAST_FRAME), cpi->seg0_progress);
- if (i != LAST_FRAME)
- {
- cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 2,
- (i != GOLDEN_FRAME), cpi->seg0_progress);
- }
- }
- }
- }
-
- ref_costs[i] = cost;
- }
+static void vp8_estimate_ref_frame_costs(VP8_COMP *cpi, int segment_id, unsigned int *ref_costs) {
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ vp8_prob *mod_refprobs;
+
+ unsigned int cost;
+ int pred_ref;
+ int pred_flag;
+ int pred_ctx;
+ int i;
+ int tot_count;
+
+ vp8_prob pred_prob, new_pred_prob;
+ int seg_ref_active;
+ int seg_ref_count = 0;
+ seg_ref_active = segfeature_active(xd,
+ segment_id,
+ SEG_LVL_REF_FRAME);
+
+ if (seg_ref_active) {
+ seg_ref_count = check_segref(xd, segment_id, INTRA_FRAME) +
+ check_segref(xd, segment_id, LAST_FRAME) +
+ check_segref(xd, segment_id, GOLDEN_FRAME) +
+ check_segref(xd, segment_id, ALTREF_FRAME);
+ }
+
+ // Get the predicted reference for this mb
+ pred_ref = get_pred_ref(cm, xd);
+
+ // Get the context probability for the prediction flag (based on last frame)
+ pred_prob = get_pred_prob(cm, xd, PRED_REF);
+
+ // Predict probability for current frame based on stats so far
+ pred_ctx = get_pred_context(cm, xd, PRED_REF);
+ tot_count = cpi->ref_pred_count[pred_ctx][0] + cpi->ref_pred_count[pred_ctx][1];
+ if (tot_count) {
+ new_pred_prob =
+ (cpi->ref_pred_count[pred_ctx][0] * 255 + (tot_count >> 1)) / tot_count;
+ new_pred_prob += !new_pred_prob;
+ } else
+ new_pred_prob = 128;
+
+ // Get the set of probabilities to use if prediction fails
+ mod_refprobs = cm->mod_refprobs[pred_ref];
+
+ // For each possible selected reference frame work out a cost.
+ for (i = 0; i < MAX_REF_FRAMES; i++) {
+ if (seg_ref_active && seg_ref_count == 1) {
+ cost = 0;
+ } else {
+ pred_flag = (i == pred_ref);
+
+ // Get the prediction for the current mb
+ cost = weighted_cost(&pred_prob, &new_pred_prob, 0,
+ pred_flag, cpi->seg0_progress);
+ if (cost > 1024) cost = 768; // i.e. account for 4 bits max.
+
+ // for incorrectly predicted cases
+ if (! pred_flag) {
+ vp8_prob curframe_mod_refprobs[3];
+
+ if (cpi->seg0_progress) {
+ estimate_curframe_refprobs(cpi, curframe_mod_refprobs, pred_ref);
+ } else {
+ vpx_memset(curframe_mod_refprobs, 0, sizeof(curframe_mod_refprobs));
+ }
+
+ cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 0,
+ (i != INTRA_FRAME), cpi->seg0_progress);
+ if (i != INTRA_FRAME) {
+ cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 1,
+ (i != LAST_FRAME), cpi->seg0_progress);
+ if (i != LAST_FRAME) {
+ cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 2,
+ (i != GOLDEN_FRAME), cpi->seg0_progress);
+ }
+ }
+ }
+ }
+
+ ref_costs[i] = cost;
+ }
}
-static void store_coding_context (MACROBLOCK *x, int mb_index,
- int mode_index,
- PARTITION_INFO *partition,
- int_mv *ref_mv,
- int_mv *second_ref_mv)
-{
- MACROBLOCKD *xd = &x->e_mbd;
-
- // Take a snapshot of the coding context so it can be
- // restored if we decide to encode this way
- x->mb_context[mb_index].best_mode_index = mode_index;
- vpx_memcpy(&x->mb_context[mb_index].mic, xd->mode_info_context,
- sizeof(MODE_INFO));
- vpx_memcpy(&x->mb_context[mb_index].partition_info, partition,
- sizeof(PARTITION_INFO));
- x->mb_context[mb_index].best_ref_mv.as_int = ref_mv->as_int;
- x->mb_context[mb_index].second_best_ref_mv.as_int = second_ref_mv->as_int;
-
- //x->mb_context[mb_index].rddiv = x->rddiv;
- //x->mb_context[mb_index].rdmult = x->rdmult;
+static void store_coding_context(MACROBLOCK *x, int mb_index,
+ int mode_index,
+ PARTITION_INFO *partition,
+ int_mv *ref_mv,
+ int_mv *second_ref_mv) {
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ // Take a snapshot of the coding context so it can be
+ // restored if we decide to encode this way
+ x->mb_context[mb_index].best_mode_index = mode_index;
+ vpx_memcpy(&x->mb_context[mb_index].mic, xd->mode_info_context,
+ sizeof(MODE_INFO));
+ vpx_memcpy(&x->mb_context[mb_index].partition_info, partition,
+ sizeof(PARTITION_INFO));
+ x->mb_context[mb_index].best_ref_mv.as_int = ref_mv->as_int;
+ x->mb_context[mb_index].second_best_ref_mv.as_int = second_ref_mv->as_int;
+
+ // x->mb_context[mb_index].rddiv = x->rddiv;
+ // x->mb_context[mb_index].rdmult = x->rdmult;
}
void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset,
int *returnrate, int *returndistortion, int *returnintra,
int *best_single_rd_diff, int *best_comp_rd_diff,
- int *best_hybrid_rd_diff)
-{
- VP8_COMMON *cm = &cpi->common;
- BLOCK *b = &x->block[0];
- BLOCKD *d = &x->e_mbd.block[0];
- MACROBLOCKD *xd = &x->e_mbd;
- union b_mode_info best_bmodes[16];
- MB_MODE_INFO best_mbmode;
- PARTITION_INFO best_partition;
- int_mv best_ref_mv, second_best_ref_mv;
- int_mv mode_mv[MB_MODE_COUNT];
- MB_PREDICTION_MODE this_mode;
- int num00;
- int best_mode_index = 0;
- int mode8x8[2][4];
- unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
- int mb_index = xd->mb_index;
-
- int i;
- int mode_index;
- int mdcounts[4];
- int rate;
- int distortion;
- int best_rd = INT_MAX;
- int best_intra_rd = INT_MAX;
- int best_comp_rd = INT_MAX;
- int best_single_rd = INT_MAX;
- int best_hybrid_rd = INT_MAX;
+ int *best_hybrid_rd_diff) {
+ VP8_COMMON *cm = &cpi->common;
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ MACROBLOCKD *xd = &x->e_mbd;
+ union b_mode_info best_bmodes[16];
+ MB_MODE_INFO best_mbmode;
+ PARTITION_INFO best_partition;
+ int_mv best_ref_mv, second_best_ref_mv;
+ int_mv mode_mv[MB_MODE_COUNT];
+ MB_PREDICTION_MODE this_mode;
+ int num00;
+ int best_mode_index = 0;
+ int mode8x8[2][4];
+ unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
+ int mb_index = xd->mb_index;
+
+ int i;
+ int mode_index;
+ int mdcounts[4];
+ int rate;
+ int distortion;
+ int best_rd = INT_MAX;
+ int best_intra_rd = INT_MAX;
+ int best_comp_rd = INT_MAX;
+ int best_single_rd = INT_MAX;
+ int best_hybrid_rd = INT_MAX;
#if CONFIG_PRED_FILTER
- int best_overall_rd = INT_MAX;
-#endif
- int rate2, distortion2;
- int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
- int uv_intra_skippable = 0;
- int uv_intra_rate_8x8 = 0, uv_intra_distortion_8x8 = 0, uv_intra_rate_tokenonly_8x8 = 0;
- int uv_intra_skippable_8x8=0;
- int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
- int distortion_uv;
- int best_yrd = INT_MAX;
+ int best_overall_rd = INT_MAX;
+#endif
+ int rate2, distortion2;
+ int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
+ int uv_intra_skippable = 0;
+ int uv_intra_rate_8x8 = 0, uv_intra_distortion_8x8 = 0, uv_intra_rate_tokenonly_8x8 = 0;
+ int uv_intra_skippable_8x8 = 0;
+ int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
+ int distortion_uv;
+ int best_yrd = INT_MAX;
#if CONFIG_PRED_FILTER
- int best_filter_state;
+ int best_filter_state;
#endif
- //int all_rds[MAX_MODES]; // Experimental debug code.
- //int all_rates[MAX_MODES];
- //int all_dist[MAX_MODES];
- //int intermodecost[MAX_MODES];
-
- MB_PREDICTION_MODE uv_intra_mode;
- MB_PREDICTION_MODE uv_intra_mode_8x8 = 0;
-
- int_mv mvp;
- int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
- int saddone=0;
- int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
-
- int_mv frame_nearest_mv[4];
- int_mv frame_near_mv[4];
- int_mv frame_best_ref_mv[4];
- int_mv mc_search_result[4];
- int frame_mdcounts[4][4];
- unsigned char *y_buffer[4];
- unsigned char *u_buffer[4];
- unsigned char *v_buffer[4];
-
- unsigned int ref_costs[MAX_REF_FRAMES];
- int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
-
- vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
- vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
- vpx_memset(&x->mb_context[mb_index], 0, sizeof(PICK_MODE_CONTEXT));
-
- for (i = 0; i < 4; i++)
- {
- mc_search_result[i].as_int = INVALID_MV;
- }
-
- for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++)
- {
- int j;
-
- for (j = 0; j < 16; j++)
- {
- int k;
-
- for (k = 0; k < MAX_REF_FRAMES - 1; k++)
- {
- seg_mvs[i][j][k].as_int = INVALID_MV;
- }
- }
- }
-
- if (cpi->ref_frame_flags & VP8_LAST_FLAG)
- {
- YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
-
- vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context,
- x->e_mbd.prev_mode_info_context,
- &frame_nearest_mv[LAST_FRAME], &frame_near_mv[LAST_FRAME],
- &frame_best_ref_mv[LAST_FRAME], frame_mdcounts[LAST_FRAME], LAST_FRAME, cpi->common.ref_frame_sign_bias);
+ // int all_rds[MAX_MODES]; // Experimental debug code.
+ // int all_rates[MAX_MODES];
+ // int all_dist[MAX_MODES];
+ // int intermodecost[MAX_MODES];
- y_buffer[LAST_FRAME] = lst_yv12->y_buffer + recon_yoffset;
- u_buffer[LAST_FRAME] = lst_yv12->u_buffer + recon_uvoffset;
- v_buffer[LAST_FRAME] = lst_yv12->v_buffer + recon_uvoffset;
- }
-
- if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
- {
- YV12_BUFFER_CONFIG *gld_yv12 = &cpi->common.yv12_fb[cpi->common.gld_fb_idx];
-
- vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context,
- x->e_mbd.prev_mode_info_context,
- &frame_nearest_mv[GOLDEN_FRAME], &frame_near_mv[GOLDEN_FRAME],
- &frame_best_ref_mv[GOLDEN_FRAME], frame_mdcounts[GOLDEN_FRAME], GOLDEN_FRAME, cpi->common.ref_frame_sign_bias);
-
- y_buffer[GOLDEN_FRAME] = gld_yv12->y_buffer + recon_yoffset;
- u_buffer[GOLDEN_FRAME] = gld_yv12->u_buffer + recon_uvoffset;
- v_buffer[GOLDEN_FRAME] = gld_yv12->v_buffer + recon_uvoffset;
- }
-
- if (cpi->ref_frame_flags & VP8_ALT_FLAG)
- {
- YV12_BUFFER_CONFIG *alt_yv12 = &cpi->common.yv12_fb[cpi->common.alt_fb_idx];
-
- vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context,
- x->e_mbd.prev_mode_info_context,
- &frame_nearest_mv[ALTREF_FRAME], &frame_near_mv[ALTREF_FRAME],
- &frame_best_ref_mv[ALTREF_FRAME], frame_mdcounts[ALTREF_FRAME], ALTREF_FRAME, cpi->common.ref_frame_sign_bias);
-
- y_buffer[ALTREF_FRAME] = alt_yv12->y_buffer + recon_yoffset;
- u_buffer[ALTREF_FRAME] = alt_yv12->u_buffer + recon_uvoffset;
- v_buffer[ALTREF_FRAME] = alt_yv12->v_buffer + recon_uvoffset;
- }
+ MB_PREDICTION_MODE uv_intra_mode;
+ MB_PREDICTION_MODE uv_intra_mode_8x8 = 0;
- *returnintra = INT_MAX;
-
- x->skip = 0;
-
- vpx_memset(mode_mv, 0, sizeof(mode_mv));
-
- x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
-
- /* Initialize zbin mode boost for uv costing */
- cpi->zbin_mode_boost = 0;
- vp8_update_zbin_extra(cpi, x);
-
- rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate,
- &uv_intra_rate_tokenonly, &uv_intra_distortion);
- uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
- uv_intra_skippable = mbuv_is_skippable(&x->e_mbd);
+ int_mv mvp;
+ int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int saddone = 0;
+ int sr = 0; // search range got from mv_pred(). It uses step_param levels. (0-7)
+
+ int_mv frame_nearest_mv[4];
+ int_mv frame_near_mv[4];
+ int_mv frame_best_ref_mv[4];
+ int_mv mc_search_result[4];
+ int frame_mdcounts[4][4];
+ unsigned char *y_buffer[4];
+ unsigned char *u_buffer[4];
+ unsigned char *v_buffer[4];
+
+ unsigned int ref_costs[MAX_REF_FRAMES];
+ int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
+
+ vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
+ vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
+ vpx_memset(&x->mb_context[mb_index], 0, sizeof(PICK_MODE_CONTEXT));
+
+ for (i = 0; i < 4; i++) {
+ mc_search_result[i].as_int = INVALID_MV;
+ }
+
+ for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++) {
+ int j;
+
+ for (j = 0; j < 16; j++) {
+ int k;
+
+ for (k = 0; k < MAX_REF_FRAMES - 1; k++) {
+ seg_mvs[i][j][k].as_int = INVALID_MV;
+ }
+ }
+ }
+
+ if (cpi->ref_frame_flags & VP8_LAST_FLAG) {
+ YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
+
+ vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context,
+ x->e_mbd.prev_mode_info_context,
+ &frame_nearest_mv[LAST_FRAME], &frame_near_mv[LAST_FRAME],
+ &frame_best_ref_mv[LAST_FRAME], frame_mdcounts[LAST_FRAME], LAST_FRAME, cpi->common.ref_frame_sign_bias);
+
+ y_buffer[LAST_FRAME] = lst_yv12->y_buffer + recon_yoffset;
+ u_buffer[LAST_FRAME] = lst_yv12->u_buffer + recon_uvoffset;
+ v_buffer[LAST_FRAME] = lst_yv12->v_buffer + recon_uvoffset;
+ }
+
+ if (cpi->ref_frame_flags & VP8_GOLD_FLAG) {
+ YV12_BUFFER_CONFIG *gld_yv12 = &cpi->common.yv12_fb[cpi->common.gld_fb_idx];
+
+ vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context,
+ x->e_mbd.prev_mode_info_context,
+ &frame_nearest_mv[GOLDEN_FRAME], &frame_near_mv[GOLDEN_FRAME],
+ &frame_best_ref_mv[GOLDEN_FRAME], frame_mdcounts[GOLDEN_FRAME], GOLDEN_FRAME, cpi->common.ref_frame_sign_bias);
+
+ y_buffer[GOLDEN_FRAME] = gld_yv12->y_buffer + recon_yoffset;
+ u_buffer[GOLDEN_FRAME] = gld_yv12->u_buffer + recon_uvoffset;
+ v_buffer[GOLDEN_FRAME] = gld_yv12->v_buffer + recon_uvoffset;
+ }
+
+ if (cpi->ref_frame_flags & VP8_ALT_FLAG) {
+ YV12_BUFFER_CONFIG *alt_yv12 = &cpi->common.yv12_fb[cpi->common.alt_fb_idx];
+
+ vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context,
+ x->e_mbd.prev_mode_info_context,
+ &frame_nearest_mv[ALTREF_FRAME], &frame_near_mv[ALTREF_FRAME],
+ &frame_best_ref_mv[ALTREF_FRAME], frame_mdcounts[ALTREF_FRAME], ALTREF_FRAME, cpi->common.ref_frame_sign_bias);
+
+ y_buffer[ALTREF_FRAME] = alt_yv12->y_buffer + recon_yoffset;
+ u_buffer[ALTREF_FRAME] = alt_yv12->u_buffer + recon_uvoffset;
+ v_buffer[ALTREF_FRAME] = alt_yv12->v_buffer + recon_uvoffset;
+ }
+
+ *returnintra = INT_MAX;
+
+ x->skip = 0;
+
+ vpx_memset(mode_mv, 0, sizeof(mode_mv));
+
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ /* Initialize zbin mode boost for uv costing */
+ cpi->zbin_mode_boost = 0;
+ vp8_update_zbin_extra(cpi, x);
+
+ rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate,
+ &uv_intra_rate_tokenonly, &uv_intra_distortion);
+ uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
+ uv_intra_skippable = mbuv_is_skippable(&x->e_mbd);
+
+ /* rough estimate for now */
+ if (cpi->common.txfm_mode == ALLOW_8X8) {
+ rd_pick_intra_mbuv_mode_8x8(cpi, x, &uv_intra_rate_8x8,
+ &uv_intra_rate_tokenonly_8x8,
+ &uv_intra_distortion_8x8);
+ uv_intra_mode_8x8 = x->e_mbd.mode_info_context->mbmi.uv_mode;
+ uv_intra_skippable_8x8 = mbuv_is_skippable_8x8(&x->e_mbd);
+ }
+
+ // Get estimates of reference frame costs for each reference frame
+ // that depend on the current prediction etc.
+ vp8_estimate_ref_frame_costs(cpi, segment_id, ref_costs);
+
+ for (mode_index = 0; mode_index < MAX_MODES; mode_index++) {
+ int this_rd = INT_MAX;
+ int disable_skip = 0;
+ int other_cost = 0;
+ int compmode_cost = 0;
+ int mode_excluded = 0;
- /* rough estimate for now */
- if(cpi->common.txfm_mode==ALLOW_8X8)
- {
- rd_pick_intra_mbuv_mode_8x8(cpi, x, &uv_intra_rate_8x8,
- &uv_intra_rate_tokenonly_8x8,
- &uv_intra_distortion_8x8);
- uv_intra_mode_8x8 = x->e_mbd.mode_info_context->mbmi.uv_mode;
- uv_intra_skippable_8x8 = mbuv_is_skippable_8x8(&x->e_mbd);
+ // Test best rd so far against threshold for trying this mode.
+ if (best_rd <= cpi->rd_threshes[mode_index]) {
+ continue;
}
-
- // Get estimates of reference frame costs for each reference frame
- // that depend on the current prediction etc.
- vp8_estimate_ref_frame_costs( cpi, segment_id, ref_costs );
-
- for (mode_index = 0; mode_index < MAX_MODES; mode_index++)
- {
- int this_rd = INT_MAX;
- int disable_skip = 0;
- int other_cost = 0;
- int compmode_cost = 0;
- int mode_excluded = 0;
-
- // Test best rd so far against threshold for trying this mode.
- if (best_rd <= cpi->rd_threshes[mode_index])
- {
- continue;
- }
-
- // These variables hold are rolling total cost and distortion for this mode
- rate2 = 0;
- distortion2 = 0;
- rate_y = 0;
- rate_uv =0;
-
- this_mode = vp8_mode_order[mode_index].mode;
- xd->mode_info_context->mbmi.mode = this_mode;
- xd->mode_info_context->mbmi.uv_mode = DC_PRED;
- xd->mode_info_context->mbmi.ref_frame =
- vp8_mode_order[mode_index].ref_frame;
- xd->mode_info_context->mbmi.second_ref_frame =
- vp8_mode_order[mode_index].second_ref_frame;
+
+ // These variables hold are rolling total cost and distortion for this mode
+ rate2 = 0;
+ distortion2 = 0;
+ rate_y = 0;
+ rate_uv = 0;
+
+ this_mode = vp8_mode_order[mode_index].mode;
+ xd->mode_info_context->mbmi.mode = this_mode;
+ xd->mode_info_context->mbmi.uv_mode = DC_PRED;
+ xd->mode_info_context->mbmi.ref_frame =
+ vp8_mode_order[mode_index].ref_frame;
+ xd->mode_info_context->mbmi.second_ref_frame =
+ vp8_mode_order[mode_index].second_ref_frame;
#if CONFIG_PRED_FILTER
- xd->mode_info_context->mbmi.pred_filter_enabled = 0;
+ xd->mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
#if CONFIG_COMP_INTRA_PRED
- xd->mode_info_context->mbmi.second_mode = (MB_PREDICTION_MODE) (DC_PRED - 1);
- xd->mode_info_context->mbmi.second_uv_mode = (MB_PREDICTION_MODE) (DC_PRED - 1);
-#endif
-
- // If the segment reference frame feature is enabled....
- // then do nothing if the current ref frame is not allowed..
- if ( segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) &&
- !check_segref( xd, segment_id,
- xd->mode_info_context->mbmi.ref_frame ) )
- {
- continue;
- }
- // If the segment mode feature is enabled....
- // then do nothing if the current mode is not allowed..
- else if ( segfeature_active( xd, segment_id, SEG_LVL_MODE ) &&
- ( this_mode !=
- get_segdata( xd, segment_id, SEG_LVL_MODE ) ) )
- {
- continue;
- }
-
- // Disable this drop out case if either the mode or ref frame
- // segment level feature is enabled for this segment. This is to
- // prevent the possibility that the we end up unable to pick any mode.
- else if ( !segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) &&
- !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
- {
- // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
- // unless ARNR filtering is enabled in which case we want
- // an unfiltered alternative
- if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
- {
- if (this_mode != ZEROMV ||
- x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
- {
- continue;
- }
- }
- }
-
- /* everything but intra */
- if (x->e_mbd.mode_info_context->mbmi.ref_frame)
- {
- int ref = x->e_mbd.mode_info_context->mbmi.ref_frame;
-
- x->e_mbd.pre.y_buffer = y_buffer[ref];
- x->e_mbd.pre.u_buffer = u_buffer[ref];
- x->e_mbd.pre.v_buffer = v_buffer[ref];
- mode_mv[NEARESTMV] = frame_nearest_mv[ref];
- mode_mv[NEARMV] = frame_near_mv[ref];
- best_ref_mv = frame_best_ref_mv[ref];
- vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
- }
-
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- {
- int ref = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
-
- x->e_mbd.second_pre.y_buffer = y_buffer[ref];
- x->e_mbd.second_pre.u_buffer = u_buffer[ref];
- x->e_mbd.second_pre.v_buffer = v_buffer[ref];
- second_best_ref_mv = frame_best_ref_mv[ref];
- }
-
- // Experimental code. Special case for gf and arf zeromv modes.
- // Increase zbin size to suppress noise
- if (cpi->zbin_mode_boost_enabled)
- {
- if ( vp8_mode_order[mode_index].ref_frame == INTRA_FRAME )
- cpi->zbin_mode_boost = 0;
- else
- {
- if (vp8_mode_order[mode_index].mode == ZEROMV)
- {
- if (vp8_mode_order[mode_index].ref_frame != LAST_FRAME)
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
- else
- cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- }
- else if (vp8_mode_order[mode_index].mode == SPLITMV)
- cpi->zbin_mode_boost = 0;
- else
- cpi->zbin_mode_boost = MV_ZBIN_BOOST;
- }
+ xd->mode_info_context->mbmi.second_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
+ xd->mode_info_context->mbmi.second_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
+#endif
+
+ // If the segment reference frame feature is enabled....
+ // then do nothing if the current ref frame is not allowed..
+ if (segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
+ !check_segref(xd, segment_id,
+ xd->mode_info_context->mbmi.ref_frame)) {
+ continue;
+ }
+ // If the segment mode feature is enabled....
+ // then do nothing if the current mode is not allowed..
+ else if (segfeature_active(xd, segment_id, SEG_LVL_MODE) &&
+ (this_mode !=
+ get_segdata(xd, segment_id, SEG_LVL_MODE))) {
+ continue;
+ }
+
+ // Disable this drop out case if either the mode or ref frame
+ // segment level feature is enabled for this segment. This is to
+ // prevent the possibility that the we end up unable to pick any mode.
+ else if (!segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
+ !segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
+ // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+ // unless ARNR filtering is enabled in which case we want
+ // an unfiltered alternative
+ if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+ if (this_mode != ZEROMV ||
+ x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
+ continue;
+ }
+ }
+ }
+
+ /* everything but intra */
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
+ int ref = x->e_mbd.mode_info_context->mbmi.ref_frame;
+
+ x->e_mbd.pre.y_buffer = y_buffer[ref];
+ x->e_mbd.pre.u_buffer = u_buffer[ref];
+ x->e_mbd.pre.v_buffer = v_buffer[ref];
+ mode_mv[NEARESTMV] = frame_nearest_mv[ref];
+ mode_mv[NEARMV] = frame_near_mv[ref];
+ best_ref_mv = frame_best_ref_mv[ref];
+ vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
+ }
+
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ int ref = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
+
+ x->e_mbd.second_pre.y_buffer = y_buffer[ref];
+ x->e_mbd.second_pre.u_buffer = u_buffer[ref];
+ x->e_mbd.second_pre.v_buffer = v_buffer[ref];
+ second_best_ref_mv = frame_best_ref_mv[ref];
+ }
+
+ // Experimental code. Special case for gf and arf zeromv modes.
+ // Increase zbin size to suppress noise
+ if (cpi->zbin_mode_boost_enabled) {
+ if (vp8_mode_order[mode_index].ref_frame == INTRA_FRAME)
+ cpi->zbin_mode_boost = 0;
+ else {
+ if (vp8_mode_order[mode_index].mode == ZEROMV) {
+ if (vp8_mode_order[mode_index].ref_frame != LAST_FRAME)
+ cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ else
+ cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ } else if (vp8_mode_order[mode_index].mode == SPLITMV)
+ cpi->zbin_mode_boost = 0;
+ else
+ cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
- vp8_update_zbin_extra(cpi, x);
- }
+ vp8_update_zbin_extra(cpi, x);
+ }
- if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame)
- switch (this_mode)
- {
- case B_PRED:
- {
- int tmp_rd;
+ if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ switch (this_mode) {
+ case B_PRED: {
+ int tmp_rd;
- // Note the rate value returned here includes the cost of coding the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
- tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y, &distortion, best_yrd,
+ // Note the rate value returned here includes the cost of coding the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
+ tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y, &distortion, best_yrd,
#if CONFIG_COMP_INTRA_PRED
- 0,
-#endif
- 0);
- rate2 += rate;
- distortion2 += distortion;
-
- if(tmp_rd < best_yrd)
- {
- rate2 += uv_intra_rate;
- rate_uv = uv_intra_rate_tokenonly;
- distortion2 += uv_intra_distortion;
- distortion_uv = uv_intra_distortion;
- }
- else
- {
- this_rd = INT_MAX;
- disable_skip = 1;
- }
+ 0,
+#endif
+ 0);
+ rate2 += rate;
+ distortion2 += distortion;
+
+ if (tmp_rd < best_yrd) {
+ rate2 += uv_intra_rate;
+ rate_uv = uv_intra_rate_tokenonly;
+ distortion2 += uv_intra_distortion;
+ distortion_uv = uv_intra_distortion;
+ } else {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
}
break;
- case I8X8_PRED:
- {
- int tmp_rd;
- tmp_rd = rd_pick_intra8x8mby_modes(cpi,
- x, &rate, &rate_y, &distortion, best_yrd);
- rate2 += rate;
- distortion2 += distortion;
-
- mode8x8[0][0]= x->e_mbd.mode_info_context->bmi[0].as_mode.first;
- mode8x8[0][1]= x->e_mbd.mode_info_context->bmi[2].as_mode.first;
- mode8x8[0][2]= x->e_mbd.mode_info_context->bmi[8].as_mode.first;
- mode8x8[0][3]= x->e_mbd.mode_info_context->bmi[10].as_mode.first;
+ case I8X8_PRED: {
+ int tmp_rd;
+ tmp_rd = rd_pick_intra8x8mby_modes(cpi,
+ x, &rate, &rate_y, &distortion, best_yrd);
+ rate2 += rate;
+ distortion2 += distortion;
+
+ mode8x8[0][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- mode8x8[1][0]= x->e_mbd.mode_info_context->bmi[0].as_mode.second;
- mode8x8[1][1]= x->e_mbd.mode_info_context->bmi[2].as_mode.second;
- mode8x8[1][2]= x->e_mbd.mode_info_context->bmi[8].as_mode.second;
- mode8x8[1][3]= x->e_mbd.mode_info_context->bmi[10].as_mode.second;
-#endif
-
- /* TODO: uv rate maybe over-estimated here since there is UV intra
- mode coded in I8X8_PRED prediction */
- if(tmp_rd < best_yrd)
- {
- rate2 += uv_intra_rate;
- rate_uv = uv_intra_rate_tokenonly;
- distortion2 += uv_intra_distortion;
- distortion_uv = uv_intra_distortion;
- }
- else
- {
- this_rd = INT_MAX;
- disable_skip = 1;
- }
+ mode8x8[1][0] = x->e_mbd.mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = x->e_mbd.mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = x->e_mbd.mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
+#endif
+
+ /* TODO: uv rate maybe over-estimated here since there is UV intra
+ mode coded in I8X8_PRED prediction */
+ if (tmp_rd < best_yrd) {
+ rate2 += uv_intra_rate;
+ rate_uv = uv_intra_rate_tokenonly;
+ distortion2 += uv_intra_distortion;
+ distortion_uv = uv_intra_distortion;
+ } else {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
}
break;
- case SPLITMV:
- {
- int tmp_rd;
- int this_rd_thresh;
-
- this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
- this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
-
- tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, NULL,
- best_yrd, mdcounts,
- &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs) ;
-
- rate2 += rate;
- distortion2 += distortion;
-
- // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
- if (tmp_rd < best_yrd)
- {
- // Now work out UV cost and add it in
- rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
- rate2 += rate_uv;
- distortion2 += distortion_uv;
- }
- else
- {
- this_rd = INT_MAX;
- disable_skip = 1;
- }
- mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
- compmode_cost =
- vp8_cost_bit( get_pred_prob( cm, xd, PRED_COMP ), 0 );
+ case SPLITMV: {
+ int tmp_rd;
+ int this_rd_thresh;
+
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
+
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, NULL,
+ best_yrd, mdcounts,
+ &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs);
+
+ rate2 += rate;
+ distortion2 += distortion;
+
+ // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
+ if (tmp_rd < best_yrd) {
+ // Now work out UV cost and add it in
+ rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ } else {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
+ mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
+ compmode_cost =
+ vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 0);
}
break;
#if CONFIG_NEWINTRAMODES
@@ -3178,976 +2916,895 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
case V_PRED:
case H_PRED:
case TM_PRED:
- x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
- // FIXME compound intra prediction
- RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
- (&x->e_mbd);
- if(cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd)) ;
- else
- macro_block_yrd(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd)) ;
- rate2 += rate_y;
- distortion2 += distortion;
- rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
- if(cpi->common.txfm_mode == ALLOW_8X8)
- {
- rate2 += uv_intra_rate_8x8;
- rate_uv = uv_intra_rate_tokenonly_8x8;
- distortion2 += uv_intra_distortion_8x8;
- distortion_uv = uv_intra_distortion_8x8;
- }
- else
- {
- rate2 += uv_intra_rate;
- rate_uv = uv_intra_rate_tokenonly;
- distortion2 += uv_intra_distortion;
- distortion_uv = uv_intra_distortion;
- }
- break;
-
- case NEWMV:
- {
- int thissme;
- int bestsme = INT_MAX;
- int step_param = cpi->sf.first_step;
- int further_steps;
- int n;
- int do_refine=1; /* If last step (1-away) of n-step search doesn't pick the center point as the best match,
- we will do a final 1-away diamond refining search */
-
- int sadpb = x->sadperbit16;
- int_mv mvp_full;
-
- int col_min = (best_ref_mv.as_mv.col>>3) - MAX_FULL_PEL_VAL + ((best_ref_mv.as_mv.col & 7)?1:0);
- int row_min = (best_ref_mv.as_mv.row>>3) - MAX_FULL_PEL_VAL + ((best_ref_mv.as_mv.row & 7)?1:0);
- int col_max = (best_ref_mv.as_mv.col>>3) + MAX_FULL_PEL_VAL;
- int row_max = (best_ref_mv.as_mv.row>>3) + MAX_FULL_PEL_VAL;
-
- int tmp_col_min = x->mv_col_min;
- int tmp_col_max = x->mv_col_max;
- int tmp_row_min = x->mv_row_min;
- int tmp_row_max = x->mv_row_max;
-
- if(!saddone)
- {
- vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] );
- saddone = 1;
- }
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+ // FIXME compound intra prediction
+ RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
+ (&x->e_mbd);
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ macro_block_yrd_8x8(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+ else
+ macro_block_yrd(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+ rate2 += rate_y;
+ distortion2 += distortion;
+ rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
+ if (cpi->common.txfm_mode == ALLOW_8X8) {
+ rate2 += uv_intra_rate_8x8;
+ rate_uv = uv_intra_rate_tokenonly_8x8;
+ distortion2 += uv_intra_distortion_8x8;
+ distortion_uv = uv_intra_distortion_8x8;
+ } else {
+ rate2 += uv_intra_rate;
+ rate_uv = uv_intra_rate_tokenonly;
+ distortion2 += uv_intra_distortion;
+ distortion_uv = uv_intra_distortion;
+ }
+ break;
+
+ case NEWMV: {
+ int thissme;
+ int bestsme = INT_MAX;
+ int step_param = cpi->sf.first_step;
+ int further_steps;
+ int n;
+ int do_refine = 1; /* If last step (1-away) of n-step search doesn't pick the center point as the best match,
+ we will do a final 1-away diamond refining search */
+
+ int sadpb = x->sadperbit16;
+ int_mv mvp_full;
+
+ int col_min = (best_ref_mv.as_mv.col >> 3) - MAX_FULL_PEL_VAL + ((best_ref_mv.as_mv.col & 7) ? 1 : 0);
+ int row_min = (best_ref_mv.as_mv.row >> 3) - MAX_FULL_PEL_VAL + ((best_ref_mv.as_mv.row & 7) ? 1 : 0);
+ int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ if (!saddone) {
+ vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
+ saddone = 1;
+ }
+
+ vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
+ x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
+
+ mvp_full.as_mv.col = mvp.as_mv.col >> 3;
+ mvp_full.as_mv.row = mvp.as_mv.row >> 3;
+
+ // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
+ if (x->mv_col_min < col_min)
+ x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max)
+ x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min)
+ x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max)
+ x->mv_row_max = row_max;
+
+ // adjust search range according to sr from mv prediction
+ if (sr > step_param)
+ step_param = sr;
+
+ // Initial step/diamond search
+ {
+ bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.as_mv.first,
+ step_param, sadpb, &num00,
+ &cpi->fn_ptr[BLOCK_16X16],
+ XMVCOST, &best_ref_mv);
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
- vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
- x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
-
- mvp_full.as_mv.col = mvp.as_mv.col>>3;
- mvp_full.as_mv.row = mvp.as_mv.row>>3;
-
- // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
- if (x->mv_col_min < col_min )
- x->mv_col_min = col_min;
- if (x->mv_col_max > col_max )
- x->mv_col_max = col_max;
- if (x->mv_row_min < row_min )
- x->mv_row_min = row_min;
- if (x->mv_row_max > row_max )
- x->mv_row_max = row_max;
-
- //adjust search range according to sr from mv prediction
- if(sr > step_param)
- step_param = sr;
-
- // Initial step/diamond search
- {
- bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.as_mv.first,
- step_param, sadpb, &num00,
- &cpi->fn_ptr[BLOCK_16X16],
- XMVCOST, &best_ref_mv);
- mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
-
- // Further step/diamond searches as necessary
- n = 0;
- further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
-
- n = num00;
- num00 = 0;
-
- /* If there won't be more n-step search, check to see if refining search is needed. */
- if (n > further_steps)
- do_refine = 0;
-
- while (n < further_steps)
- {
- n++;
-
- if (num00)
- num00--;
- else
- {
- thissme = cpi->diamond_search_sad(x, b, d, &mvp_full,
- &d->bmi.as_mv.first, step_param + n, sadpb, &num00,
- &cpi->fn_ptr[BLOCK_16X16],
- XMVCOST, &best_ref_mv);
-
- /* check to see if refining search is needed. */
- if (num00 > (further_steps-n))
- do_refine = 0;
-
- if (thissme < bestsme)
- {
- bestsme = thissme;
- mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
- }
- else
- {
- d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
- }
- }
+ // Further step/diamond searches as necessary
+ n = 0;
+ further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
+
+ n = num00;
+ num00 = 0;
+
+ /* If there won't be more n-step search, check to see if refining search is needed. */
+ if (n > further_steps)
+ do_refine = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ thissme = cpi->diamond_search_sad(x, b, d, &mvp_full,
+ &d->bmi.as_mv.first, step_param + n, sadpb, &num00,
+ &cpi->fn_ptr[BLOCK_16X16],
+ XMVCOST, &best_ref_mv);
+
+ /* check to see if refining search is needed. */
+ if (num00 > (further_steps - n))
+ do_refine = 0;
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
+ } else {
+ d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
}
+ }
}
+ }
- /* final 1-away diamond refining search */
- if (do_refine == 1)
- {
- int search_range;
+ /* final 1-away diamond refining search */
+ if (do_refine == 1) {
+ int search_range;
- //It seems not a good way to set search_range. Need further investigation.
- //search_range = MAXF(abs((mvp.row>>3) - d->bmi.mv.as_mv.row), abs((mvp.col>>3) - d->bmi.mv.as_mv.col));
- search_range = 8;
+ // It seems not a good way to set search_range. Need further investigation.
+ // search_range = MAXF(abs((mvp.row>>3) - d->bmi.mv.as_mv.row), abs((mvp.col>>3) - d->bmi.mv.as_mv.col));
+ search_range = 8;
- //thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
- thissme = cpi->refining_search_sad(x, b, d, &d->bmi.as_mv.first, sadpb,
- search_range, &cpi->fn_ptr[BLOCK_16X16],
- XMVCOST, &best_ref_mv);
+ // thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
+ thissme = cpi->refining_search_sad(x, b, d, &d->bmi.as_mv.first, sadpb,
+ search_range, &cpi->fn_ptr[BLOCK_16X16],
+ XMVCOST, &best_ref_mv);
- if (thissme < bestsme)
- {
- bestsme = thissme;
- mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
- }
- else
- {
- d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
- }
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
+ } else {
+ d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
}
+ }
- x->mv_col_min = tmp_col_min;
- x->mv_col_max = tmp_col_max;
- x->mv_row_min = tmp_row_min;
- x->mv_row_max = tmp_row_max;
-
- if (bestsme < INT_MAX)
- {
- int dis; /* TODO: use dis in distortion calculation later. */
- unsigned int sse;
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first, &best_ref_mv,
- x->errorperbit,
- &cpi->fn_ptr[BLOCK_16X16],
- XMVCOST, &dis, &sse);
- }
- mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.as_mv.first.as_int;
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
- mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first, &best_ref_mv,
+ x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16],
+ XMVCOST, &dis, &sse);
+ }
+ mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.as_mv.first.as_int;
- // Add the new motion vector cost to our rolling cost variable
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
+
+ // Add the new motion vector cost to our rolling cost variable
#if CONFIG_HIGH_PRECISION_MV
- rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
- XMVCOST, 96,
- x->e_mbd.allow_high_precision_mv);
+ rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
+ XMVCOST, 96,
+ x->e_mbd.allow_high_precision_mv);
#else
- rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
- XMVCOST, 96);
+ rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
+ XMVCOST, 96);
#endif
}
case NEARESTMV:
case NEARMV:
- // Clip "next_nearest" so that it does not extend to far out of image
- vp8_clamp_mv2(&mode_mv[this_mode], xd);
+ // Clip "next_nearest" so that it does not extend to far out of image
+ vp8_clamp_mv2(&mode_mv[this_mode], xd);
- // Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
- if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0))
- {
- continue;
- }
+ // Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
+ if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0)) {
+ continue;
+ }
case ZEROMV:
- // Trap vectors that reach beyond the UMV borders
- // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
- // because of the lack of break statements in the previous two cases.
- if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
- ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
- {
- continue;
- }
+ // Trap vectors that reach beyond the UMV borders
+ // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
+ // because of the lack of break statements in the previous two cases.
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
- vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
+ vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
#if CONFIG_PRED_FILTER
- // Filtered prediction:
- xd->mode_info_context->mbmi.pred_filter_enabled =
- vp8_mode_order[mode_index].pred_filter_flag;
- rate2 += vp8_cost_bit( cpi->common.prob_pred_filter_off,
- xd->mode_info_context->mbmi.pred_filter_enabled);
-#endif
-
- vp8_build_inter16x16_predictors_mby(&x->e_mbd);
-
- compmode_cost =
- vp8_cost_bit( get_pred_prob( cm, xd, PRED_COMP ), 0 );
-
- if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
- x->skip = 1;
- }
- else if (x->encode_breakout)
- {
- unsigned int sse;
- unsigned int var;
- int threshold = (xd->block[0].dequant[1]
- * xd->block[0].dequant[1] >>4);
-
- if(threshold < x->encode_breakout)
- threshold = x->encode_breakout;
-
- var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
- (*(b->base_src), b->src_stride,
- x->e_mbd.predictor, 16, &sse);
-
- if (sse < threshold)
- {
- unsigned int q2dc = xd->block[24].dequant[0];
- /* If there is no codeable 2nd order dc
- or a very small uniform pixel change change */
- if ((sse - var < q2dc * q2dc >>4) ||
- (sse /2 > var && sse-var < 64))
- {
- // Check u and v to make sure skip is ok
- int sse2= VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
- if (sse2 * 2 < threshold)
- {
- x->skip = 1;
- distortion2 = sse + sse2;
- rate2 = 500;
-
- /* for best_yrd calculation */
- rate_uv = 0;
- distortion_uv = sse2;
-
- disable_skip = 1;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
-
- break;
- }
- }
+ // Filtered prediction:
+ xd->mode_info_context->mbmi.pred_filter_enabled =
+ vp8_mode_order[mode_index].pred_filter_flag;
+ rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
+ xd->mode_info_context->mbmi.pred_filter_enabled);
+#endif
+
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd);
+
+ compmode_cost =
+ vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 0);
+
+ if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
+ x->skip = 1;
+ } else if (x->encode_breakout) {
+ unsigned int sse;
+ unsigned int var;
+ int threshold = (xd->block[0].dequant[1]
+ * xd->block[0].dequant[1] >> 4);
+
+ if (threshold < x->encode_breakout)
+ threshold = x->encode_breakout;
+
+ var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
+ (*(b->base_src), b->src_stride,
+ x->e_mbd.predictor, 16, &sse);
+
+ if (sse < threshold) {
+ unsigned int q2dc = xd->block[24].dequant[0];
+ /* If there is no codeable 2nd order dc
+ or a very small uniform pixel change change */
+ if ((sse - var < q2dc *q2dc >> 4) ||
+ (sse / 2 > var && sse - var < 64)) {
+ // Check u and v to make sure skip is ok
+ int sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
+ if (sse2 * 2 < threshold) {
+ x->skip = 1;
+ distortion2 = sse + sse2;
+ rate2 = 500;
+
+ /* for best_yrd calculation */
+ rate_uv = 0;
+ distortion_uv = sse2;
+
+ disable_skip = 1;
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+
+ break;
}
+ }
}
- //intermodecost[mode_index] = vp8_cost_mv_ref(cpi, this_mode, mdcounts); // Experimental debug code
+ }
+ // intermodecost[mode_index] = vp8_cost_mv_ref(cpi, this_mode, mdcounts); // Experimental debug code
- // Add in the Mv/mode cost
- rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
+ // Add in the Mv/mode cost
+ rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
- // Y cost and distortion
- if(cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
- else
- macro_block_yrd(x, &rate_y, &distortion,
+ // Y cost and distortion
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
-
- rate2 += rate_y;
- distortion2 += distortion;
-
- // UV cost and distortion
- vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
-
- if(cpi->common.txfm_mode == ALLOW_8X8)
- rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- else
- rd_inter16x16_uv(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- rate2 += rate_uv;
- distortion2 += distortion_uv;
- mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
- break;
+ else
+ macro_block_yrd(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+
+ rate2 += rate_y;
+ distortion2 += distortion;
+
+ // UV cost and distortion
+ vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
+
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ else
+ rd_inter16x16_uv(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
+ break;
default:
- break;
- }
- else /* x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0 */
- {
- int ref1 = x->e_mbd.mode_info_context->mbmi.ref_frame;
- int ref2 = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
-
- mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
- switch (this_mode)
- {
- case NEWMV:
- if (mc_search_result[ref1].as_int == INVALID_MV ||
- mc_search_result[ref2].as_int == INVALID_MV)
- continue;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
+ break;
+ }
+ else { /* x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0 */
+ int ref1 = x->e_mbd.mode_info_context->mbmi.ref_frame;
+ int ref2 = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
+
+ mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
+ switch (this_mode) {
+ case NEWMV:
+ if (mc_search_result[ref1].as_int == INVALID_MV ||
+ mc_search_result[ref2].as_int == INVALID_MV)
+ continue;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
#if CONFIG_HIGH_PRECISION_MV
- rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
- &frame_best_ref_mv[ref1],
- XMVCOST, 96,
- x->e_mbd.allow_high_precision_mv);
- rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
- &frame_best_ref_mv[ref2],
- XMVCOST, 96,
- x->e_mbd.allow_high_precision_mv);
+ rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
+ &frame_best_ref_mv[ref1],
+ XMVCOST, 96,
+ x->e_mbd.allow_high_precision_mv);
+ rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
+ &frame_best_ref_mv[ref2],
+ XMVCOST, 96,
+ x->e_mbd.allow_high_precision_mv);
#else
- rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
- &frame_best_ref_mv[ref1],
- XMVCOST, 96);
- rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
- &frame_best_ref_mv[ref2],
- XMVCOST, 96);
-#endif
- break;
- case ZEROMV:
- x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = 0;
- break;
- case NEARMV:
- if (frame_near_mv[ref1].as_int == 0 || frame_near_mv[ref2].as_int == 0)
- {
- continue;
- }
- x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_near_mv[ref1].as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_near_mv[ref2].as_int;
- break;
- case NEARESTMV:
- if (frame_nearest_mv[ref1].as_int == 0 || frame_nearest_mv[ref2].as_int == 0)
- {
- continue;
- }
- x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_nearest_mv[ref1].as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_nearest_mv[ref2].as_int;
- break;
- case SPLITMV:
- {
- int tmp_rd;
- int this_rd_thresh;
-
- this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
- this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
-
- tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, &second_best_ref_mv,
- best_yrd, mdcounts,
- &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs) ;
-
- rate2 += rate;
- distortion2 += distortion;
-
- // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
- if (tmp_rd < best_yrd)
- {
- // Now work out UV cost and add it in
- rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
- rate2 += rate_uv;
- distortion2 += distortion_uv;
- }
- else
- {
- this_rd = INT_MAX;
- disable_skip = 1;
- }
- }
- break;
- default:
- break;
- }
-
- if (this_mode != SPLITMV)
- {
- /* Add in the Mv/mode cost */
- rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
-
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
- if (((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) < x->mv_row_min) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) > x->mv_row_max) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) < x->mv_col_min) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) > x->mv_col_max) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) < x->mv_row_min) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) > x->mv_row_max) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) < x->mv_col_min) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) > x->mv_col_max))
- {
- continue;
- }
-
- /* build first and second prediction */
- vp8_build_inter16x16_predictors_mby(&x->e_mbd);
- vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
- /* do second round and average the results */
- vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
- &x->e_mbd.predictor[256],
- &x->e_mbd.predictor[320], 16, 8);
-
- /* Y cost and distortion */
- if (cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
- else
- macro_block_yrd(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
-
- rate2 += rate_y;
- distortion2 += distortion;
-
- /* UV cost and distortion */
- if(cpi->common.txfm_mode == ALLOW_8X8)
- rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- else
- rd_inter16x16_uv(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- rate2 += rate_uv;
- distortion2 += distortion_uv;
- }
-
- /* don't bother w/ skip, we would never have come here if skip were enabled */
- x->e_mbd.mode_info_context->mbmi.mode = this_mode;
-
- /* We don't include the cost of the second reference here, because there are only
- * three options: Last/Golden, ARF/Last or Golden/ARF, or in other words if you
- * present them in that order, the second one is always known if the first is known */
- compmode_cost =
- vp8_cost_bit( get_pred_prob( cm, xd, PRED_COMP ), 1 );
- }
-
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- rate2 += compmode_cost;
+ rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
+ &frame_best_ref_mv[ref1],
+ XMVCOST, 96);
+ rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
+ &frame_best_ref_mv[ref2],
+ XMVCOST, 96);
+#endif
+ break;
+ case ZEROMV:
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int = 0;
+ break;
+ case NEARMV:
+ if (frame_near_mv[ref1].as_int == 0 || frame_near_mv[ref2].as_int == 0) {
+ continue;
+ }
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_near_mv[ref1].as_int;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_near_mv[ref2].as_int;
+ break;
+ case NEARESTMV:
+ if (frame_nearest_mv[ref1].as_int == 0 || frame_nearest_mv[ref2].as_int == 0) {
+ continue;
+ }
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_nearest_mv[ref1].as_int;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_nearest_mv[ref2].as_int;
+ break;
+ case SPLITMV: {
+ int tmp_rd;
+ int this_rd_thresh;
+
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
+
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, &second_best_ref_mv,
+ best_yrd, mdcounts,
+ &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs);
+
+ rate2 += rate;
+ distortion2 += distortion;
+
+ // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
+ if (tmp_rd < best_yrd) {
+ // Now work out UV cost and add it in
+ rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ } else {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
}
+ break;
+ default:
+ break;
+ }
+
+ if (this_mode != SPLITMV) {
+ /* Add in the Mv/mode cost */
+ rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
+
+ vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
+ vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
+ if (((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) < x->mv_row_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) > x->mv_row_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) < x->mv_col_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) > x->mv_col_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) < x->mv_row_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) > x->mv_row_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) < x->mv_col_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+
+ /* build first and second prediction */
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd);
+ vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
+ /* do second round and average the results */
+ vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
+ &x->e_mbd.predictor[256],
+ &x->e_mbd.predictor[320], 16, 8);
+
+ /* Y cost and distortion */
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ macro_block_yrd_8x8(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+ else
+ macro_block_yrd(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+ rate2 += rate_y;
+ distortion2 += distortion;
- // Estimate the reference frame signaling cost and add it
- // to the rolling cost variable.
- rate2 += ref_costs[x->e_mbd.mode_info_context->mbmi.ref_frame];
-
- if (!disable_skip)
- {
- // Test for the condition where skip block will be activated
- // because there are no non zero coefficients and make any
- // necessary adjustment for rate. Ignore if skip is coded at
- // segment level as the cost wont have been added in.
- if ( cpi->common.mb_no_coeff_skip )
- {
- int mb_skippable;
- int mb_skip_allowed;
- int has_y2 = ( this_mode!=SPLITMV
- &&this_mode!=B_PRED
- &&this_mode!=I8X8_PRED);
-
- if((cpi->common.txfm_mode == ALLOW_8X8) && has_y2)
- {
- if(x->e_mbd.mode_info_context->mbmi.ref_frame!=INTRA_FRAME)
- mb_skippable = mb_is_skippable_8x8(&x->e_mbd);
- else
- mb_skippable = uv_intra_skippable_8x8
- & mby_is_skippable_8x8(&x->e_mbd);
- }
- else
- {
- if(x->e_mbd.mode_info_context->mbmi.ref_frame!=INTRA_FRAME)
- mb_skippable = mb_is_skippable(&x->e_mbd, has_y2);
- else
- mb_skippable = uv_intra_skippable
- & mby_is_skippable(&x->e_mbd, has_y2);
- }
-
- // Is Mb level skip allowed for this mb.
- mb_skip_allowed =
- !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
- get_segdata( xd, segment_id, SEG_LVL_EOB );
-
- if (mb_skippable)
- {
- // Back out the coefficient coding costs
- rate2 -= (rate_y + rate_uv);
- //for best_yrd calculation
- rate_uv = 0;
-
- if ( mb_skip_allowed )
- {
- int prob_skip_cost;
-
- // Cost the skip mb case
+ /* UV cost and distortion */
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ else
+ rd_inter16x16_uv(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ }
+
+ /* don't bother w/ skip, we would never have come here if skip were enabled */
+ x->e_mbd.mode_info_context->mbmi.mode = this_mode;
+
+ /* We don't include the cost of the second reference here, because there are only
+ * three options: Last/Golden, ARF/Last or Golden/ARF, or in other words if you
+ * present them in that order, the second one is always known if the first is known */
+ compmode_cost =
+ vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 1);
+ }
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ rate2 += compmode_cost;
+ }
+
+
+ // Estimate the reference frame signaling cost and add it
+ // to the rolling cost variable.
+ rate2 += ref_costs[x->e_mbd.mode_info_context->mbmi.ref_frame];
+
+ if (!disable_skip) {
+ // Test for the condition where skip block will be activated
+ // because there are no non zero coefficients and make any
+ // necessary adjustment for rate. Ignore if skip is coded at
+ // segment level as the cost wont have been added in.
+ if (cpi->common.mb_no_coeff_skip) {
+ int mb_skippable;
+ int mb_skip_allowed;
+ int has_y2 = (this_mode != SPLITMV
+ && this_mode != B_PRED
+ && this_mode != I8X8_PRED);
+
+ if ((cpi->common.txfm_mode == ALLOW_8X8) && has_y2) {
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
+ mb_skippable = mb_is_skippable_8x8(&x->e_mbd);
+ else
+ mb_skippable = uv_intra_skippable_8x8
+ & mby_is_skippable_8x8(&x->e_mbd);
+ } else {
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
+ mb_skippable = mb_is_skippable(&x->e_mbd, has_y2);
+ else
+ mb_skippable = uv_intra_skippable
+ & mby_is_skippable(&x->e_mbd, has_y2);
+ }
+
+ // Is Mb level skip allowed for this mb.
+ mb_skip_allowed =
+ !segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
+ get_segdata(xd, segment_id, SEG_LVL_EOB);
+
+ if (mb_skippable) {
+ // Back out the coefficient coding costs
+ rate2 -= (rate_y + rate_uv);
+ // for best_yrd calculation
+ rate_uv = 0;
+
+ if (mb_skip_allowed) {
+ int prob_skip_cost;
+
+ // Cost the skip mb case
#if CONFIG_NEWENTROPY
- vp8_prob skip_prob =
- get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP);
-
- if (skip_prob)
- {
- prob_skip_cost = vp8_cost_bit(skip_prob, 1);
- rate2 += prob_skip_cost;
- other_cost += prob_skip_cost;
- }
+ vp8_prob skip_prob =
+ get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP);
+
+ if (skip_prob) {
+ prob_skip_cost = vp8_cost_bit(skip_prob, 1);
+ rate2 += prob_skip_cost;
+ other_cost += prob_skip_cost;
+ }
#else
- if (cpi->prob_skip_false)
- {
- prob_skip_cost =
- vp8_cost_bit(cpi->prob_skip_false, 1);
- rate2 += prob_skip_cost;
- other_cost += prob_skip_cost;
- }
-#endif
- }
- }
- // Add in the cost of the no skip flag.
- else if ( mb_skip_allowed )
- {
- #if CONFIG_NEWENTROPY
- int prob_skip_cost = vp8_cost_bit(
- get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
- #else
- int prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 0);
- #endif
- rate2 += prob_skip_cost;
- other_cost += prob_skip_cost;
- }
+ if (cpi->prob_skip_false) {
+ prob_skip_cost =
+ vp8_cost_bit(cpi->prob_skip_false, 1);
+ rate2 += prob_skip_cost;
+ other_cost += prob_skip_cost;
}
-
- // Calculate the final RD estimate for this mode.
- this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
- }
-
- // Experimental debug code.
- //all_rds[mode_index] = this_rd;
- //all_rates[mode_index] = rate2;
- //all_dist[mode_index] = distortion2;
-
- // Keep record of best intra distortion
- if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
- (this_rd < best_intra_rd) )
- {
- best_intra_rd = this_rd;
- *returnintra = distortion2 ;
- }
-
- if (!disable_skip && x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
- {
- if (this_rd < best_comp_rd)
- best_comp_rd = this_rd;
- if (this_rd < best_single_rd)
- best_single_rd = this_rd;
- if (this_rd < best_hybrid_rd)
- best_hybrid_rd = this_rd;
- }
-
-#if CONFIG_PRED_FILTER
- // Keep track of the best mode irrespective of prediction filter state
- if (this_rd < best_overall_rd)
- {
- best_overall_rd = this_rd;
- best_filter_state = xd->mode_info_context->mbmi.pred_filter_enabled;
- }
-
- // Ignore modes where the prediction filter state doesn't
- // match the state signaled at the frame level
- if ((cm->pred_filter_mode == 2) ||
- (cm->pred_filter_mode ==
- xd->mode_info_context->mbmi.pred_filter_enabled))
- {
#endif
- // Did this mode help.. i.e. is it the new best mode
- if (this_rd < best_rd || x->skip)
- {
- if (!mode_excluded)
- {
- // Note index of best mode so far
- best_mode_index = mode_index;
-
- if (this_mode <= B_PRED)
- {
- if( cpi->common.txfm_mode == ALLOW_8X8
- && this_mode != B_PRED
- && this_mode != I8X8_PRED)
- x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode_8x8;
- else
- x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
- /* required for left and above block mv */
- x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
- }
-
- other_cost +=
- ref_costs[x->e_mbd.mode_info_context->mbmi.ref_frame];
-
- /* Calculate the final y RD estimate for this mode */
- best_yrd = RDCOST(x->rdmult, x->rddiv, (rate2-rate_uv-other_cost),
- (distortion2-distortion_uv));
-
- *returnrate = rate2;
- *returndistortion = distortion2;
- best_rd = this_rd;
- vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
- vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
-
- if ((this_mode == B_PRED)
- ||(this_mode == I8X8_PRED)
- || (this_mode == SPLITMV))
- for (i = 0; i < 16; i++)
- {
- best_bmodes[i] = x->e_mbd.block[i].bmi;
- }
- }
-
- // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
- cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ }
}
- // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
- else
- {
- cpi->rd_thresh_mult[mode_index] += 4;
-
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
-
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ // Add in the cost of the no skip flag.
+ else if (mb_skip_allowed) {
+#if CONFIG_NEWENTROPY
+ int prob_skip_cost = vp8_cost_bit(
+ get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
+#else
+ int prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 0);
+#endif
+ rate2 += prob_skip_cost;
+ other_cost += prob_skip_cost;
}
+ }
- /* keep record of best compound/single-only prediction */
- if (!disable_skip &&
- x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
- {
- int single_rd, hybrid_rd, single_rate, hybrid_rate;
-
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- single_rate = rate2 - compmode_cost;
- hybrid_rate = rate2;
- }
- else
- {
- single_rate = rate2;
- hybrid_rate = rate2 + compmode_cost;
- }
-
- single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
- hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
+ // Calculate the final RD estimate for this mode.
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ }
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame == INTRA_FRAME &&
- single_rd < best_single_rd)
- {
- best_single_rd = single_rd;
- }
- else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame != INTRA_FRAME &&
- single_rd < best_comp_rd)
- {
- best_comp_rd = single_rd;
- }
- if (hybrid_rd < best_hybrid_rd)
- {
- best_hybrid_rd = hybrid_rd;
- }
- }
-#if CONFIG_PRED_FILTER
- }
-#endif
+ // Experimental debug code.
+ // all_rds[mode_index] = this_rd;
+ // all_rates[mode_index] = rate2;
+ // all_dist[mode_index] = distortion2;
- if (x->skip)
- break;
+ // Keep record of best intra distortion
+ if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
+ (this_rd < best_intra_rd)) {
+ best_intra_rd = this_rd;
+ *returnintra = distortion2;
+ }
+ if (!disable_skip && x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ if (this_rd < best_comp_rd)
+ best_comp_rd = this_rd;
+ if (this_rd < best_single_rd)
+ best_single_rd = this_rd;
+ if (this_rd < best_hybrid_rd)
+ best_hybrid_rd = this_rd;
}
#if CONFIG_PRED_FILTER
- // Update counts for prediction filter usage
- if (best_filter_state != 0)
- ++cpi->pred_filter_on_count;
- else
- ++cpi->pred_filter_off_count;
-#endif
+ // Keep track of the best mode irrespective of prediction filter state
+ if (this_rd < best_overall_rd) {
+ best_overall_rd = this_rd;
+ best_filter_state = xd->mode_info_context->mbmi.pred_filter_enabled;
+ }
+
+ // Ignore modes where the prediction filter state doesn't
+ // match the state signaled at the frame level
+ if ((cm->pred_filter_mode == 2) ||
+ (cm->pred_filter_mode ==
+ xd->mode_info_context->mbmi.pred_filter_enabled)) {
+#endif
+ // Did this mode help.. i.e. is it the new best mode
+ if (this_rd < best_rd || x->skip) {
+ if (!mode_excluded) {
+ // Note index of best mode so far
+ best_mode_index = mode_index;
+
+ if (this_mode <= B_PRED) {
+ if (cpi->common.txfm_mode == ALLOW_8X8
+ && this_mode != B_PRED
+ && this_mode != I8X8_PRED)
+ x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode_8x8;
+ else
+ x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
+ /* required for left and above block mv */
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ }
- // Reduce the activation RD thresholds for the best choice mode
- if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
- {
- int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
+ other_cost +=
+ ref_costs[x->e_mbd.mode_info_context->mbmi.ref_frame];
- cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
- cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
+ /* Calculate the final y RD estimate for this mode */
+ best_yrd = RDCOST(x->rdmult, x->rddiv, (rate2 - rate_uv - other_cost),
+ (distortion2 - distortion_uv));
- // If we chose a split mode then reset the new MV thresholds as well
- /*if ( vp8_mode_order[best_mode_index].mode == SPLITMV )
- {
- best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWMV] >> 4);
- cpi->rd_thresh_mult[THR_NEWMV] = (cpi->rd_thresh_mult[THR_NEWMV] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWMV]-best_adjustment: MIN_THRESHMULT;
- cpi->rd_threshes[THR_NEWMV] = (cpi->rd_baseline_thresh[THR_NEWMV] >> 7) * cpi->rd_thresh_mult[THR_NEWMV];
+ *returnrate = rate2;
+ *returndistortion = distortion2;
+ best_rd = this_rd;
+ vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
+ vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
- best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWG] >> 4);
- cpi->rd_thresh_mult[THR_NEWG] = (cpi->rd_thresh_mult[THR_NEWG] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWG]-best_adjustment: MIN_THRESHMULT;
- cpi->rd_threshes[THR_NEWG] = (cpi->rd_baseline_thresh[THR_NEWG] >> 7) * cpi->rd_thresh_mult[THR_NEWG];
+ if ((this_mode == B_PRED)
+ || (this_mode == I8X8_PRED)
+ || (this_mode == SPLITMV))
+ for (i = 0; i < 16; i++) {
+ best_bmodes[i] = x->e_mbd.block[i].bmi;
+ }
+ }
- best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWA] >> 4);
- cpi->rd_thresh_mult[THR_NEWA] = (cpi->rd_thresh_mult[THR_NEWA] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWA]-best_adjustment: MIN_THRESHMULT;
- cpi->rd_threshes[THR_NEWA] = (cpi->rd_baseline_thresh[THR_NEWA] >> 7) * cpi->rd_thresh_mult[THR_NEWA];
- }*/
+ // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
+ cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
+ cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ }
+ // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
+ else {
+ cpi->rd_thresh_mult[mode_index] += 4;
- }
+ if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- // This code force Altref,0,0 and skip for the frame that overlays a
- // an alrtef unless Altref is filtered. However, this is unsafe if
- // segment level coding of ref frame or mode is enabled for this
- // segment.
- if (!segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) &&
- !segfeature_active( xd, segment_id, SEG_LVL_MODE ) &&
- cpi->is_src_frame_alt_ref &&
- (cpi->oxcf.arnr_max_frames == 0) &&
- (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME))
- {
- x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
- x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
- x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
- x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
- (cpi->common.mb_no_coeff_skip) ? 1 : 0;
- x->e_mbd.mode_info_context->mbmi.partitioning = 0;
-
- *best_single_rd_diff = *best_comp_rd_diff = *best_hybrid_rd_diff = 0;
-
- store_coding_context (x, mb_index, best_mode_index, &best_partition,
- &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
- &frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame]);
- return;
- }
+ cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ }
- // macroblock modes
- vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
+ /* keep record of best compound/single-only prediction */
+ if (!disable_skip &&
+ x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
+ int single_rd, hybrid_rd, single_rate, hybrid_rate;
- if (best_mbmode.mode == B_PRED)
- {
- for (i = 0; i < 16; i++)
- {
- xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
- xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ single_rate = rate2 - compmode_cost;
+ hybrid_rate = rate2;
+ } else {
+ single_rate = rate2;
+ hybrid_rate = rate2 + compmode_cost;
}
- }
- if (best_mbmode.mode == I8X8_PRED)
- {
- set_i8x8_block_modes(x, mode8x8);
- }
-
- if (best_mbmode.mode == SPLITMV)
- {
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mv.first.as_int = best_bmodes[i].as_mv.first.as_int;
- if (xd->mode_info_context->mbmi.second_ref_frame)
- for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].as_mv.second.as_int = best_bmodes[i].as_mv.second.as_int;
-
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
+ single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
+ hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
- x->e_mbd.mode_info_context->mbmi.mv.as_int =
- x->partition_info->bmi[15].mv.as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int =
- x->partition_info->bmi[15].second_mv.as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame == INTRA_FRAME &&
+ single_rd < best_single_rd) {
+ best_single_rd = single_rd;
+ } else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame != INTRA_FRAME &&
+ single_rd < best_comp_rd) {
+ best_comp_rd = single_rd;
+ }
+ if (hybrid_rd < best_hybrid_rd) {
+ best_hybrid_rd = hybrid_rd;
+ }
+ }
+#if CONFIG_PRED_FILTER
}
+#endif
- if (best_single_rd == INT_MAX)
- *best_single_rd_diff = INT_MIN;
- else
- *best_single_rd_diff = best_rd - best_single_rd;
- if (best_comp_rd == INT_MAX)
- *best_comp_rd_diff = INT_MIN;
- else
- *best_comp_rd_diff = best_rd - best_comp_rd;
- if (best_hybrid_rd == INT_MAX)
- *best_hybrid_rd_diff = INT_MIN;
- else
- *best_hybrid_rd_diff = best_rd - best_hybrid_rd;
+ if (x->skip)
+ break;
+
+ }
- store_coding_context (x, mb_index, best_mode_index, &best_partition,
- &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
- &frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame]);
+#if CONFIG_PRED_FILTER
+ // Update counts for prediction filter usage
+ if (best_filter_state != 0)
+ ++cpi->pred_filter_on_count;
+ else
+ ++cpi->pred_filter_off_count;
+#endif
+
+ // Reduce the activation RD thresholds for the best choice mode
+ if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
+ int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
+
+ cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
+ cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
+
+ // If we chose a split mode then reset the new MV thresholds as well
+ /*if ( vp8_mode_order[best_mode_index].mode == SPLITMV )
+ {
+ best_adjustment = 4; // (cpi->rd_thresh_mult[THR_NEWMV] >> 4);
+ cpi->rd_thresh_mult[THR_NEWMV] = (cpi->rd_thresh_mult[THR_NEWMV] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWMV]-best_adjustment: MIN_THRESHMULT;
+ cpi->rd_threshes[THR_NEWMV] = (cpi->rd_baseline_thresh[THR_NEWMV] >> 7) * cpi->rd_thresh_mult[THR_NEWMV];
+
+ best_adjustment = 4; // (cpi->rd_thresh_mult[THR_NEWG] >> 4);
+ cpi->rd_thresh_mult[THR_NEWG] = (cpi->rd_thresh_mult[THR_NEWG] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWG]-best_adjustment: MIN_THRESHMULT;
+ cpi->rd_threshes[THR_NEWG] = (cpi->rd_baseline_thresh[THR_NEWG] >> 7) * cpi->rd_thresh_mult[THR_NEWG];
+
+ best_adjustment = 4; // (cpi->rd_thresh_mult[THR_NEWA] >> 4);
+ cpi->rd_thresh_mult[THR_NEWA] = (cpi->rd_thresh_mult[THR_NEWA] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWA]-best_adjustment: MIN_THRESHMULT;
+ cpi->rd_threshes[THR_NEWA] = (cpi->rd_baseline_thresh[THR_NEWA] >> 7) * cpi->rd_thresh_mult[THR_NEWA];
+ }*/
+
+ }
+
+ // This code force Altref,0,0 and skip for the frame that overlays a
+ // an alrtef unless Altref is filtered. However, this is unsafe if
+ // segment level coding of ref frame or mode is enabled for this
+ // segment.
+ if (!segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
+ !segfeature_active(xd, segment_id, SEG_LVL_MODE) &&
+ cpi->is_src_frame_alt_ref &&
+ (cpi->oxcf.arnr_max_frames == 0) &&
+ (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
+ x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
+ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
+ (cpi->common.mb_no_coeff_skip) ? 1 : 0;
+ x->e_mbd.mode_info_context->mbmi.partitioning = 0;
+
+ *best_single_rd_diff = *best_comp_rd_diff = *best_hybrid_rd_diff = 0;
+
+ store_coding_context(x, mb_index, best_mode_index, &best_partition,
+ &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
+ &frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame]);
+ return;
+ }
+
+ // macroblock modes
+ vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
+
+ if (best_mbmode.mode == B_PRED) {
+ for (i = 0; i < 16; i++) {
+ xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
+ xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
+ }
+ }
+
+ if (best_mbmode.mode == I8X8_PRED) {
+ set_i8x8_block_modes(x, mode8x8);
+ }
+
+ if (best_mbmode.mode == SPLITMV) {
+ for (i = 0; i < 16; i++)
+ xd->mode_info_context->bmi[i].as_mv.first.as_int = best_bmodes[i].as_mv.first.as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ for (i = 0; i < 16; i++)
+ xd->mode_info_context->bmi[i].as_mv.second.as_int = best_bmodes[i].as_mv.second.as_int;
+
+ vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
+
+ x->e_mbd.mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int =
+ x->partition_info->bmi[15].second_mv.as_int;
+ }
+
+ if (best_single_rd == INT_MAX)
+ *best_single_rd_diff = INT_MIN;
+ else
+ *best_single_rd_diff = best_rd - best_single_rd;
+ if (best_comp_rd == INT_MAX)
+ *best_comp_rd_diff = INT_MIN;
+ else
+ *best_comp_rd_diff = best_rd - best_comp_rd;
+ if (best_hybrid_rd == INT_MAX)
+ *best_hybrid_rd_diff = INT_MIN;
+ else
+ *best_hybrid_rd_diff = best_rd - best_hybrid_rd;
+
+ store_coding_context(x, mb_index, best_mode_index, &best_partition,
+ &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
+ &frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame]);
}
-int vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x)
-{
- MACROBLOCKD *xd = &x->e_mbd;
- int error4x4, error16x16;
+int vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ int error4x4, error16x16;
#if CONFIG_COMP_INTRA_PRED
- int error4x4d, rate4x4d, dist4x4d;
-#endif
- int rate4x4, rate16x16 = 0, rateuv;
- int dist4x4, dist16x16, distuv;
- int rate;
- int rate4x4_tokenonly = 0;
- int rate16x16_tokenonly = 0;
- int rateuv_tokenonly = 0;
- int error8x8, rate8x8_tokenonly=0;
- int rate8x8, dist8x8;
- int mode16x16;
- int mode8x8[2][4];
-
- xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
-
- rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
- rate = rateuv;
-
- error16x16 = rd_pick_intra16x16mby_mode(cpi, x,
- &rate16x16, &rate16x16_tokenonly,
- &dist16x16);
- mode16x16 = xd->mode_info_context->mbmi.mode;
-
- error8x8 = rd_pick_intra8x8mby_modes(cpi, x,
- &rate8x8, &rate8x8_tokenonly,
- &dist8x8, error16x16);
- mode8x8[0][0]= xd->mode_info_context->bmi[0].as_mode.first;
- mode8x8[0][1]= xd->mode_info_context->bmi[2].as_mode.first;
- mode8x8[0][2]= xd->mode_info_context->bmi[8].as_mode.first;
- mode8x8[0][3]= xd->mode_info_context->bmi[10].as_mode.first;
+ int error4x4d, rate4x4d, dist4x4d;
+#endif
+ int rate4x4, rate16x16 = 0, rateuv;
+ int dist4x4, dist16x16, distuv;
+ int rate;
+ int rate4x4_tokenonly = 0;
+ int rate16x16_tokenonly = 0;
+ int rateuv_tokenonly = 0;
+ int error8x8, rate8x8_tokenonly = 0;
+ int rate8x8, dist8x8;
+ int mode16x16;
+ int mode8x8[2][4];
+
+ xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+
+ rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
+ rate = rateuv;
+
+ error16x16 = rd_pick_intra16x16mby_mode(cpi, x,
+ &rate16x16, &rate16x16_tokenonly,
+ &dist16x16);
+ mode16x16 = xd->mode_info_context->mbmi.mode;
+
+ error8x8 = rd_pick_intra8x8mby_modes(cpi, x,
+ &rate8x8, &rate8x8_tokenonly,
+ &dist8x8, error16x16);
+ mode8x8[0][0] = xd->mode_info_context->bmi[0].as_mode.first;
+ mode8x8[0][1] = xd->mode_info_context->bmi[2].as_mode.first;
+ mode8x8[0][2] = xd->mode_info_context->bmi[8].as_mode.first;
+ mode8x8[0][3] = xd->mode_info_context->bmi[10].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
- mode8x8[1][0]= xd->mode_info_context->bmi[0].as_mode.second;
- mode8x8[1][1]= xd->mode_info_context->bmi[2].as_mode.second;
- mode8x8[1][2]= xd->mode_info_context->bmi[8].as_mode.second;
- mode8x8[1][3]= xd->mode_info_context->bmi[10].as_mode.second;
+ mode8x8[1][0] = xd->mode_info_context->bmi[0].as_mode.second;
+ mode8x8[1][1] = xd->mode_info_context->bmi[2].as_mode.second;
+ mode8x8[1][2] = xd->mode_info_context->bmi[8].as_mode.second;
+ mode8x8[1][3] = xd->mode_info_context->bmi[10].as_mode.second;
#endif
- error4x4 = rd_pick_intra4x4mby_modes(cpi, x,
- &rate4x4, &rate4x4_tokenonly,
- &dist4x4, error16x16,
+ error4x4 = rd_pick_intra4x4mby_modes(cpi, x,
+ &rate4x4, &rate4x4_tokenonly,
+ &dist4x4, error16x16,
#if CONFIG_COMP_INTRA_PRED
- 0,
+ 0,
#endif
- 0);
+ 0);
#if CONFIG_COMP_INTRA_PRED
- error4x4d = rd_pick_intra4x4mby_modes(cpi, x,
- &rate4x4d, &rate4x4_tokenonly,
- &dist4x4d, error16x16, 1, 0);
+ error4x4d = rd_pick_intra4x4mby_modes(cpi, x,
+ &rate4x4d, &rate4x4_tokenonly,
+ &dist4x4d, error16x16, 1, 0);
#endif
- if(error8x8> error16x16)
- {
- if (error4x4 < error16x16)
- {
+ if (error8x8 > error16x16) {
+ if (error4x4 < error16x16) {
#if CONFIG_COMP_INTRA_PRED
- rate += (error4x4d < error4x4) ? rate4x4d : rate4x4;
- if (error4x4d >= error4x4) // FIXME save original modes etc.
- error4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4,
- &rate4x4_tokenonly,
- &dist4x4, error16x16, 0,
- cpi->update_context);
+ rate += (error4x4d < error4x4) ? rate4x4d : rate4x4;
+ if (error4x4d >= error4x4) // FIXME save original modes etc.
+ error4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4,
+ &rate4x4_tokenonly,
+ &dist4x4, error16x16, 0,
+ cpi->update_context);
#else
- rate += rate4x4;
+ rate += rate4x4;
#endif
- xd->mode_info_context->mbmi.mode = B_PRED;
- }
- else
- {
- xd->mode_info_context->mbmi.mode = mode16x16;
- rate += rate16x16;
+ xd->mode_info_context->mbmi.mode = B_PRED;
+ } else {
+ xd->mode_info_context->mbmi.mode = mode16x16;
+ rate += rate16x16;
- }
}
- else
- {
- if (error4x4 < error8x8)
- {
+ } else {
+ if (error4x4 < error8x8) {
#if CONFIG_COMP_INTRA_PRED
- rate += (error4x4d < error4x4) ? rate4x4d : rate4x4;
- if (error4x4d >= error4x4) // FIXME save original modes etc.
- error4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4,
- &rate4x4_tokenonly,
- &dist4x4, error16x16, 0,
- cpi->update_context);
+ rate += (error4x4d < error4x4) ? rate4x4d : rate4x4;
+ if (error4x4d >= error4x4) // FIXME save original modes etc.
+ error4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4,
+ &rate4x4_tokenonly,
+ &dist4x4, error16x16, 0,
+ cpi->update_context);
#else
- rate += rate4x4;
+ rate += rate4x4;
#endif
- xd->mode_info_context->mbmi.mode = B_PRED;
- }
- else
- {
+ xd->mode_info_context->mbmi.mode = B_PRED;
+ } else {
- xd->mode_info_context->mbmi.mode = I8X8_PRED;
- set_i8x8_block_modes(x, mode8x8);
- rate += rate8x8;
- }
+ xd->mode_info_context->mbmi.mode = I8X8_PRED;
+ set_i8x8_block_modes(x, mode8x8);
+ rate += rate8x8;
}
- return rate;
+ }
+ return rate;
}
int vp8cx_pick_mode_inter_macroblock
(
- VP8_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset, int recon_uvoffset
-)
-{
- VP8_COMMON *cm = &cpi->common;
- MACROBLOCKD *const xd = &x->e_mbd;
- int rate;
- int distortion;
- int intra_error = 0;
- unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
+ VP8_COMP *cpi, MACROBLOCK *x,
+ int recon_yoffset, int recon_uvoffset
+) {
+ VP8_COMMON *cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int rate;
+ int distortion;
+ int intra_error = 0;
+ unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
#if CONFIG_COMPRED
- unsigned char ref_pred_flag;
-#endif
-
- if (xd->segmentation_enabled)
- x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
- else
- x->encode_breakout = cpi->oxcf.encode_breakout;
-
- //if (cpi->sf.RD)
- // For now this codebase is limited to a single rd encode path
- {
- int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
- int single, compound, hybrid;
-
- vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
- &distortion, &intra_error, &single, &compound,
- &hybrid);
-
- // TODO Save these to add in only if MB coding mode is selected?
- cpi->rd_single_diff += single;
- cpi->rd_comp_diff += compound;
- cpi->rd_hybrid_diff += hybrid;
- if (xd->mode_info_context->mbmi.ref_frame)
- {
- unsigned char pred_context;
-
- pred_context = get_pred_context( cm, xd, PRED_COMP );
-
- if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
- cpi->single_pred_count[pred_context]++;
- else
- cpi->comp_pred_count[pred_context]++;
- }
-
- /* test code: set transform size based on mode selection */
- if( cpi->common.txfm_mode == ALLOW_8X8
- && xd->mode_info_context->mbmi.mode != I8X8_PRED
- && xd->mode_info_context->mbmi.mode != B_PRED
- && xd->mode_info_context->mbmi.mode != SPLITMV)
- {
- xd->mode_info_context->mbmi.txfm_size = TX_8X8;
- cpi->t8x8_count ++;
- }
- else
- {
- xd->mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->t4x4_count++;
- }
-
- /* restore cpi->zbin_mode_boost_enabled */
- cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
- }
- //else
- // The non rd encode path has been deleted from this code base
- // to simplify development
- // vp8_pick_inter_mode
-
- // Store metrics so they can be added in to totals if this mode is picked
- x->mb_context[xd->mb_index].distortion = distortion;
- x->mb_context[xd->mb_index].intra_error = intra_error;
-
- return rate;
+ unsigned char ref_pred_flag;
+#endif
+
+ if (xd->segmentation_enabled)
+ x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
+ else
+ x->encode_breakout = cpi->oxcf.encode_breakout;
+
+ // if (cpi->sf.RD)
+ // For now this codebase is limited to a single rd encode path
+ {
+ int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
+ int single, compound, hybrid;
+
+ vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
+ &distortion, &intra_error, &single, &compound,
+ &hybrid);
+
+ // TODO Save these to add in only if MB coding mode is selected?
+ cpi->rd_single_diff += single;
+ cpi->rd_comp_diff += compound;
+ cpi->rd_hybrid_diff += hybrid;
+ if (xd->mode_info_context->mbmi.ref_frame) {
+ unsigned char pred_context;
+
+ pred_context = get_pred_context(cm, xd, PRED_COMP);
+
+ if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
+ cpi->single_pred_count[pred_context]++;
+ else
+ cpi->comp_pred_count[pred_context]++;
+ }
+
+ /* test code: set transform size based on mode selection */
+ if (cpi->common.txfm_mode == ALLOW_8X8
+ && xd->mode_info_context->mbmi.mode != I8X8_PRED
+ && xd->mode_info_context->mbmi.mode != B_PRED
+ && xd->mode_info_context->mbmi.mode != SPLITMV) {
+ xd->mode_info_context->mbmi.txfm_size = TX_8X8;
+ cpi->t8x8_count++;
+ } else {
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->t4x4_count++;
+ }
+
+ /* restore cpi->zbin_mode_boost_enabled */
+ cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
+ }
+ // else
+ // The non rd encode path has been deleted from this code base
+ // to simplify development
+ // vp8_pick_inter_mode
+
+ // Store metrics so they can be added in to totals if this mode is picked
+ x->mb_context[xd->mb_index].distortion = distortion;
+ x->mb_context[xd->mb_index].intra_error = intra_error;
+
+ return rate;
}
diff --git a/vp8/encoder/rdopt.h b/vp8/encoder/rdopt.h
index 5e11c8d49..0077411d0 100644
--- a/vp8/encoder/rdopt.h
+++ b/vp8/encoder/rdopt.h
@@ -23,14 +23,14 @@ extern int vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_mv_pred
(
- VP8_COMP *cpi,
- MACROBLOCKD *xd,
- const MODE_INFO *here,
- int_mv *mvp,
- int refframe,
- int *ref_frame_sign_bias,
- int *sr,
- int near_sadidx[]
+ VP8_COMP *cpi,
+ MACROBLOCKD *xd,
+ const MODE_INFO *here,
+ int_mv *mvp,
+ int refframe,
+ int *ref_frame_sign_bias,
+ int *sr,
+ int near_sadidx[]
);
extern void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[]);
extern void vp8_init_me_luts();
diff --git a/vp8/encoder/sad_c.c b/vp8/encoder/sad_c.c
index c734458a9..5ce13ec12 100644
--- a/vp8/encoder/sad_c.c
+++ b/vp8/encoder/sad_c.c
@@ -14,389 +14,362 @@
#include "vpx/vpx_integer.h"
unsigned int vp8_sad16x16_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad)
-{
-
- int r, c;
- unsigned int sad = 0;
-
- for (r = 0; r < 16; r++)
- {
- for (c = 0; c < 16; c++)
- {
- sad += abs(src_ptr[c] - ref_ptr[c]);
- }
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int max_sad) {
+
+ int r, c;
+ unsigned int sad = 0;
+
+ for (r = 0; r < 16; r++) {
+ for (c = 0; c < 16; c++) {
+ sad += abs(src_ptr[c] - ref_ptr[c]);
}
- return sad;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ }
+
+ return sad;
}
static __inline
unsigned int sad_mx_n_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int m,
- int n)
-{
-
- int r, c;
- unsigned int sad = 0;
-
- for (r = 0; r < n; r++)
- {
- for (c = 0; c < m; c++)
- {
- sad += abs(src_ptr[c] - ref_ptr[c]);
- }
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int m,
+ int n) {
+
+ int r, c;
+ unsigned int sad = 0;
+
+ for (r = 0; r < n; r++) {
+ for (c = 0; c < m; c++) {
+ sad += abs(src_ptr[c] - ref_ptr[c]);
}
- return sad;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ }
+
+ return sad;
}
unsigned int vp8_sad8x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad)
-{
-
- return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 8);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int max_sad) {
+
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 8);
}
unsigned int vp8_sad16x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad)
-{
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int max_sad) {
- return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 8);
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 8);
}
unsigned int vp8_sad8x16_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad)
-{
-
- return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 16);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int max_sad) {
+
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 16);
}
unsigned int vp8_sad4x4_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad)
-{
-
- return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int max_sad) {
+
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4);
}
void vp8_sad16x16x3_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad16x16x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned short *sad_array
-)
-{
- sad_array[0] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
- sad_array[3] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 3 , ref_stride, 0x7fffffff);
- sad_array[4] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
- sad_array[5] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
- sad_array[6] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 6 , ref_stride, 0x7fffffff);
- sad_array[7] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned short *sad_array
+) {
+ sad_array[0] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ sad_array[3] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
+ sad_array[4] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
+ sad_array[5] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
+ sad_array[6] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
+ sad_array[7] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad16x8x3_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad16x8x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned short *sad_array
-)
-{
- sad_array[0] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
- sad_array[3] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 3 , ref_stride, 0x7fffffff);
- sad_array[4] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
- sad_array[5] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
- sad_array[6] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 6 , ref_stride, 0x7fffffff);
- sad_array[7] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned short *sad_array
+) {
+ sad_array[0] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ sad_array[3] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
+ sad_array[4] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
+ sad_array[5] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
+ sad_array[6] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
+ sad_array[7] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad8x8x3_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad8x8x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned short *sad_array
-)
-{
- sad_array[0] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
- sad_array[3] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 3 , ref_stride, 0x7fffffff);
- sad_array[4] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
- sad_array[5] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
- sad_array[6] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 6 , ref_stride, 0x7fffffff);
- sad_array[7] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned short *sad_array
+) {
+ sad_array[0] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ sad_array[3] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
+ sad_array[4] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
+ sad_array[5] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
+ sad_array[6] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
+ sad_array[7] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad8x16x3_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad8x16x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned short *sad_array
-)
-{
- sad_array[0] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
- sad_array[3] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 3 , ref_stride, 0x7fffffff);
- sad_array[4] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
- sad_array[5] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
- sad_array[6] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 6 , ref_stride, 0x7fffffff);
- sad_array[7] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned short *sad_array
+) {
+ sad_array[0] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ sad_array[3] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
+ sad_array[4] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
+ sad_array[5] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
+ sad_array[6] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
+ sad_array[7] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad4x4x3_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp8_sad4x4x8_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- unsigned short *sad_array
-)
-{
- sad_array[0] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr , ref_stride, 0x7fffffff);
- sad_array[1] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
- sad_array[2] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
- sad_array[3] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 3 , ref_stride, 0x7fffffff);
- sad_array[4] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
- sad_array[5] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
- sad_array[6] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 6 , ref_stride, 0x7fffffff);
- sad_array[7] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned short *sad_array
+) {
+ sad_array[0] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, 0x7fffffff);
+ sad_array[3] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, 0x7fffffff);
+ sad_array[4] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, 0x7fffffff);
+ sad_array[5] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, 0x7fffffff);
+ sad_array[6] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, 0x7fffffff);
+ sad_array[7] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, 0x7fffffff);
}
void vp8_sad16x16x4d_c(
- const unsigned char *src_ptr,
- int src_stride,
- unsigned char *ref_ptr[],
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
- sad_array[3] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad16x8x4d_c(
- const unsigned char *src_ptr,
- int src_stride,
- unsigned char *ref_ptr[],
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
- sad_array[3] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad8x8x4d_c(
- const unsigned char *src_ptr,
- int src_stride,
- unsigned char *ref_ptr[],
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
- sad_array[3] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad8x16x4d_c(
- const unsigned char *src_ptr,
- int src_stride,
- unsigned char *ref_ptr[],
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
- sad_array[3] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp8_sad4x4x4d_c(
- const unsigned char *src_ptr,
- int src_stride,
- unsigned char *ref_ptr[],
- int ref_stride,
- unsigned int *sad_array
-)
-{
- sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
- sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
- sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
- sad_array[3] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
+ const unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array
+) {
+ sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[3], ref_stride, 0x7fffffff);
}
/* Copy 2 macroblocks to a buffer */
void vp8_copy32xn_c(
- unsigned char *src_ptr,
- int src_stride,
- unsigned char *dst_ptr,
- int dst_stride,
- int height)
-{
- int r;
-
- for (r = 0; r < height; r++)
- {
+ unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *dst_ptr,
+ int dst_stride,
+ int height) {
+ int r;
+
+ for (r = 0; r < height; r++) {
#if !(CONFIG_FAST_UNALIGNED)
- dst_ptr[0] = src_ptr[0];
- dst_ptr[1] = src_ptr[1];
- dst_ptr[2] = src_ptr[2];
- dst_ptr[3] = src_ptr[3];
- dst_ptr[4] = src_ptr[4];
- dst_ptr[5] = src_ptr[5];
- dst_ptr[6] = src_ptr[6];
- dst_ptr[7] = src_ptr[7];
- dst_ptr[8] = src_ptr[8];
- dst_ptr[9] = src_ptr[9];
- dst_ptr[10] = src_ptr[10];
- dst_ptr[11] = src_ptr[11];
- dst_ptr[12] = src_ptr[12];
- dst_ptr[13] = src_ptr[13];
- dst_ptr[14] = src_ptr[14];
- dst_ptr[15] = src_ptr[15];
- dst_ptr[16] = src_ptr[16];
- dst_ptr[17] = src_ptr[17];
- dst_ptr[18] = src_ptr[18];
- dst_ptr[19] = src_ptr[19];
- dst_ptr[20] = src_ptr[20];
- dst_ptr[21] = src_ptr[21];
- dst_ptr[22] = src_ptr[22];
- dst_ptr[23] = src_ptr[23];
- dst_ptr[24] = src_ptr[24];
- dst_ptr[25] = src_ptr[25];
- dst_ptr[26] = src_ptr[26];
- dst_ptr[27] = src_ptr[27];
- dst_ptr[28] = src_ptr[28];
- dst_ptr[29] = src_ptr[29];
- dst_ptr[30] = src_ptr[30];
- dst_ptr[31] = src_ptr[31];
+ dst_ptr[0] = src_ptr[0];
+ dst_ptr[1] = src_ptr[1];
+ dst_ptr[2] = src_ptr[2];
+ dst_ptr[3] = src_ptr[3];
+ dst_ptr[4] = src_ptr[4];
+ dst_ptr[5] = src_ptr[5];
+ dst_ptr[6] = src_ptr[6];
+ dst_ptr[7] = src_ptr[7];
+ dst_ptr[8] = src_ptr[8];
+ dst_ptr[9] = src_ptr[9];
+ dst_ptr[10] = src_ptr[10];
+ dst_ptr[11] = src_ptr[11];
+ dst_ptr[12] = src_ptr[12];
+ dst_ptr[13] = src_ptr[13];
+ dst_ptr[14] = src_ptr[14];
+ dst_ptr[15] = src_ptr[15];
+ dst_ptr[16] = src_ptr[16];
+ dst_ptr[17] = src_ptr[17];
+ dst_ptr[18] = src_ptr[18];
+ dst_ptr[19] = src_ptr[19];
+ dst_ptr[20] = src_ptr[20];
+ dst_ptr[21] = src_ptr[21];
+ dst_ptr[22] = src_ptr[22];
+ dst_ptr[23] = src_ptr[23];
+ dst_ptr[24] = src_ptr[24];
+ dst_ptr[25] = src_ptr[25];
+ dst_ptr[26] = src_ptr[26];
+ dst_ptr[27] = src_ptr[27];
+ dst_ptr[28] = src_ptr[28];
+ dst_ptr[29] = src_ptr[29];
+ dst_ptr[30] = src_ptr[30];
+ dst_ptr[31] = src_ptr[31];
#else
- ((uint32_t *)dst_ptr)[0] = ((uint32_t *)src_ptr)[0] ;
- ((uint32_t *)dst_ptr)[1] = ((uint32_t *)src_ptr)[1] ;
- ((uint32_t *)dst_ptr)[2] = ((uint32_t *)src_ptr)[2] ;
- ((uint32_t *)dst_ptr)[3] = ((uint32_t *)src_ptr)[3] ;
- ((uint32_t *)dst_ptr)[4] = ((uint32_t *)src_ptr)[4] ;
- ((uint32_t *)dst_ptr)[5] = ((uint32_t *)src_ptr)[5] ;
- ((uint32_t *)dst_ptr)[6] = ((uint32_t *)src_ptr)[6] ;
- ((uint32_t *)dst_ptr)[7] = ((uint32_t *)src_ptr)[7] ;
+ ((uint32_t *)dst_ptr)[0] = ((uint32_t *)src_ptr)[0];
+ ((uint32_t *)dst_ptr)[1] = ((uint32_t *)src_ptr)[1];
+ ((uint32_t *)dst_ptr)[2] = ((uint32_t *)src_ptr)[2];
+ ((uint32_t *)dst_ptr)[3] = ((uint32_t *)src_ptr)[3];
+ ((uint32_t *)dst_ptr)[4] = ((uint32_t *)src_ptr)[4];
+ ((uint32_t *)dst_ptr)[5] = ((uint32_t *)src_ptr)[5];
+ ((uint32_t *)dst_ptr)[6] = ((uint32_t *)src_ptr)[6];
+ ((uint32_t *)dst_ptr)[7] = ((uint32_t *)src_ptr)[7];
#endif
- src_ptr += src_stride;
- dst_ptr += dst_stride;
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
- }
+ }
}
diff --git a/vp8/encoder/satd_c.c b/vp8/encoder/satd_c.c
index 88c304b1f..2ce1b9937 100644
--- a/vp8/encoder/satd_c.c
+++ b/vp8/encoder/satd_c.c
@@ -16,38 +16,33 @@ unsigned int vp8_satd16x16_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
- unsigned int *psatd)
-{
- int r, c, i;
- unsigned int satd = 0;
- DECLARE_ALIGNED(16, short, diff_in[256]);
- DECLARE_ALIGNED(16, short, diff_out[16]);
- short *in;
+ unsigned int *psatd) {
+ int r, c, i;
+ unsigned int satd = 0;
+ DECLARE_ALIGNED(16, short, diff_in[256]);
+ DECLARE_ALIGNED(16, short, diff_out[16]);
+ short *in;
- for (r = 0; r < 16; r++)
- {
- for (c = 0; c < 16; c++)
- {
- diff_in[r * 16 + c] = src_ptr[c] - ref_ptr[c];
- }
- src_ptr += src_stride;
- ref_ptr += ref_stride;
+ for (r = 0; r < 16; r++) {
+ for (c = 0; c < 16; c++) {
+ diff_in[r * 16 + c] = src_ptr[c] - ref_ptr[c];
}
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ }
- in = diff_in;
- for (r = 0; r < 16; r += 4)
- {
- for (c = 0; c < 16; c+=4)
- {
- vp8_short_walsh4x4_c(in + c, diff_out, 32);
- for(i = 0; i < 16; i++)
- satd += abs(diff_out[i]);
- }
- in += 64;
+ in = diff_in;
+ for (r = 0; r < 16; r += 4) {
+ for (c = 0; c < 16; c += 4) {
+ vp8_short_walsh4x4_c(in + c, diff_out, 32);
+ for (i = 0; i < 16; i++)
+ satd += abs(diff_out[i]);
}
+ in += 64;
+ }
- if (psatd)
- *psatd = satd;
+ if (psatd)
+ *psatd = satd;
- return satd;
+ return satd;
}
diff --git a/vp8/encoder/segmentation.c b/vp8/encoder/segmentation.c
index c36246a02..83b27a6e9 100644
--- a/vp8/encoder/segmentation.c
+++ b/vp8/encoder/segmentation.c
@@ -14,304 +14,276 @@
#include "segmentation.h"
#include "vp8/common/pred_common.h"
-void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
-{
- int mb_row, mb_col;
+void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
+ int mb_row, mb_col;
+
+ MODE_INFO *this_mb_mode_info = cm->mi;
+
+ x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
+
+ if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame)) {
+ // Reset Gf useage monitors
+ vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
+ cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
+ } else {
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+
+ // If using golden then set GF active flag if not already set.
+ // If using last frame 0,0 mode then leave flag as it is
+ // else if using non 0,0 motion or intra modes then clear
+ // flag if it is currently set
+ if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) ||
+ (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME)) {
+ if (*(x->gf_active_ptr) == 0) {
+ *(x->gf_active_ptr) = 1;
+ cpi->gf_active_count++;
+ }
+ } else if ((this_mb_mode_info->mbmi.mode != ZEROMV) &&
+ *(x->gf_active_ptr)) {
+ *(x->gf_active_ptr) = 0;
+ cpi->gf_active_count--;
+ }
- MODE_INFO *this_mb_mode_info = cm->mi;
+ x->gf_active_ptr++; // Step onto next entry
+ this_mb_mode_info++; // skip to next mb
- x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
+ }
- if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame))
- {
- // Reset Gf useage monitors
- vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
- cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
- }
- else
- {
- // for each macroblock row in image
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
- {
- // for each macroblock col in image
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
-
- // If using golden then set GF active flag if not already set.
- // If using last frame 0,0 mode then leave flag as it is
- // else if using non 0,0 motion or intra modes then clear
- // flag if it is currently set
- if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) ||
- (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME))
- {
- if (*(x->gf_active_ptr) == 0)
- {
- *(x->gf_active_ptr) = 1;
- cpi->gf_active_count ++;
- }
- }
- else if ((this_mb_mode_info->mbmi.mode != ZEROMV) &&
- *(x->gf_active_ptr))
- {
- *(x->gf_active_ptr) = 0;
- cpi->gf_active_count--;
- }
-
- x->gf_active_ptr++; // Step onto next entry
- this_mb_mode_info++; // skip to next mb
-
- }
-
- // this is to account for the border
- this_mb_mode_info++;
- }
+ // this is to account for the border
+ this_mb_mode_info++;
}
+ }
}
-void vp8_enable_segmentation(VP8_PTR ptr)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
+void vp8_enable_segmentation(VP8_PTR ptr) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
- // Set the appropriate feature bit
- cpi->mb.e_mbd.segmentation_enabled = 1;
- cpi->mb.e_mbd.update_mb_segmentation_map = 1;
- cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ // Set the appropriate feature bit
+ cpi->mb.e_mbd.segmentation_enabled = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
-void vp8_disable_segmentation(VP8_PTR ptr)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
+void vp8_disable_segmentation(VP8_PTR ptr) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
- // Clear the appropriate feature bit
- cpi->mb.e_mbd.segmentation_enabled = 0;
+ // Clear the appropriate feature bit
+ cpi->mb.e_mbd.segmentation_enabled = 0;
}
void vp8_set_segmentation_map(VP8_PTR ptr,
- unsigned char *segmentation_map)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
+ unsigned char *segmentation_map) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
- // Copy in the new segmentation map
- vpx_memcpy( cpi->segmentation_map, segmentation_map,
- (cpi->common.mb_rows * cpi->common.mb_cols) );
+ // Copy in the new segmentation map
+ vpx_memcpy(cpi->segmentation_map, segmentation_map,
+ (cpi->common.mb_rows * cpi->common.mb_cols));
- // Signal that the map should be updated.
- cpi->mb.e_mbd.update_mb_segmentation_map = 1;
- cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+ // Signal that the map should be updated.
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
void vp8_set_segment_data(VP8_PTR ptr,
signed char *feature_data,
- unsigned char abs_delta)
-{
- VP8_COMP *cpi = (VP8_COMP *)(ptr);
+ unsigned char abs_delta) {
+ VP8_COMP *cpi = (VP8_COMP *)(ptr);
- cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
+ cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
- vpx_memcpy(cpi->mb.e_mbd.segment_feature_data, feature_data,
- sizeof(cpi->mb.e_mbd.segment_feature_data));
+ vpx_memcpy(cpi->mb.e_mbd.segment_feature_data, feature_data,
+ sizeof(cpi->mb.e_mbd.segment_feature_data));
- // TBD ?? Set the feature mask
- // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
- // sizeof(cpi->mb.e_mbd.segment_feature_mask));
+ // TBD ?? Set the feature mask
+ // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
+ // sizeof(cpi->mb.e_mbd.segment_feature_mask));
}
// Based on set of segment counts calculate a probability tree
-static void calc_segtree_probs( MACROBLOCKD * xd,
- int * segcounts,
- vp8_prob * segment_tree_probs )
-{
- int count1,count2;
- int tot_count;
- int i;
-
- // Blank the strtucture to start with
- vpx_memset(segment_tree_probs, 0, sizeof(segment_tree_probs));
-
- // Total count for all segments
- count1 = segcounts[0] + segcounts[1];
- count2 = segcounts[2] + segcounts[3];
- tot_count = count1 + count2;
-
- // Work out probabilities of each segment
- if (tot_count)
- segment_tree_probs[0] = (count1 * 255) / tot_count;
- if (count1 > 0)
- segment_tree_probs[1] = (segcounts[0] * 255) / count1;
- if (count2 > 0)
- segment_tree_probs[2] = (segcounts[2] * 255) / count2;
-
- // Clamp probabilities to minimum allowed value
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
- {
- if (segment_tree_probs[i] == 0)
- segment_tree_probs[i] = 1;
- }
+static void calc_segtree_probs(MACROBLOCKD *xd,
+ int *segcounts,
+ vp8_prob *segment_tree_probs) {
+ int count1, count2;
+ int tot_count;
+ int i;
+
+ // Blank the strtucture to start with
+ vpx_memset(segment_tree_probs, 0, sizeof(segment_tree_probs));
+
+ // Total count for all segments
+ count1 = segcounts[0] + segcounts[1];
+ count2 = segcounts[2] + segcounts[3];
+ tot_count = count1 + count2;
+
+ // Work out probabilities of each segment
+ if (tot_count)
+ segment_tree_probs[0] = (count1 * 255) / tot_count;
+ if (count1 > 0)
+ segment_tree_probs[1] = (segcounts[0] * 255) / count1;
+ if (count2 > 0)
+ segment_tree_probs[2] = (segcounts[2] * 255) / count2;
+
+ // Clamp probabilities to minimum allowed value
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
+ if (segment_tree_probs[i] == 0)
+ segment_tree_probs[i] = 1;
+ }
}
// Based on set of segment counts and probabilities calculate a cost estimate
-static int cost_segmap( MACROBLOCKD * xd,
- int * segcounts,
- vp8_prob * probs )
-{
- int cost;
- int count1,count2;
-
- // Cost the top node of the tree
- count1 = segcounts[0] + segcounts[1];
- count2 = segcounts[2] + segcounts[3];
- cost = count1 * vp8_cost_zero(probs[0]) +
- count2 * vp8_cost_one(probs[0]);
-
- // Now add the cost of each individual segment branch
- if (count1 > 0)
- cost += segcounts[0] * vp8_cost_zero(probs[1]) +
- segcounts[1] * vp8_cost_one(probs[1]);
-
- if (count2 > 0)
- cost += segcounts[2] * vp8_cost_zero(probs[2]) +
- segcounts[3] * vp8_cost_one(probs[2]) ;
-
- return cost;
+static int cost_segmap(MACROBLOCKD *xd,
+ int *segcounts,
+ vp8_prob *probs) {
+ int cost;
+ int count1, count2;
-}
+ // Cost the top node of the tree
+ count1 = segcounts[0] + segcounts[1];
+ count2 = segcounts[2] + segcounts[3];
+ cost = count1 * vp8_cost_zero(probs[0]) +
+ count2 * vp8_cost_one(probs[0]);
-void choose_segmap_coding_method( VP8_COMP *cpi )
-{
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & cpi->mb.e_mbd;
-
- int i;
- int tot_count;
- int no_pred_cost;
- int t_pred_cost = INT_MAX;
- int pred_context;
-
- int mb_row, mb_col;
- int segmap_index = 0;
- unsigned char segment_id;
-
- int temporal_predictor_count[PREDICTION_PROBS][2];
- int no_pred_segcounts[MAX_MB_SEGMENTS];
- int t_unpred_seg_counts[MAX_MB_SEGMENTS];
-
- vp8_prob no_pred_tree[MB_FEATURE_TREE_PROBS];
- vp8_prob t_pred_tree[MB_FEATURE_TREE_PROBS];
- vp8_prob t_nopred_prob[PREDICTION_PROBS];
-
- // Set default state for the segment tree probabilities and the
- // temporal coding probabilities
- vpx_memset(xd->mb_segment_tree_probs, 255,
- sizeof(xd->mb_segment_tree_probs));
- vpx_memset(cm->segment_pred_probs, 255,
- sizeof(cm->segment_pred_probs));
-
- vpx_memset(no_pred_segcounts, 0, sizeof(no_pred_segcounts));
- vpx_memset(t_unpred_seg_counts, 0, sizeof(t_unpred_seg_counts));
- vpx_memset(temporal_predictor_count, 0, sizeof(temporal_predictor_count));
-
- // First of all generate stats regarding how well the last segment map
- // predicts this one
-
- // Initialize macroblock decoder mode info context for the first mb
- // in the frame
- xd->mode_info_context = cm->mi;
-
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
- {
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
- {
- segment_id = xd->mode_info_context->mbmi.segment_id;
-
- // Count the number of hits on each segment with no prediction
- no_pred_segcounts[segment_id]++;
-
- // Temporal prediction not allowed on key frames
- if (cm->frame_type != KEY_FRAME)
- {
- // Test to see if the segment id matches the predicted value.
- int seg_predicted =
- (segment_id == get_pred_mb_segid( cm, segmap_index ));
-
- // Get the segment id prediction context
- pred_context =
- get_pred_context( cm, xd, PRED_SEG_ID );
-
- // Store the prediction status for this mb and update counts
- // as appropriate
- set_pred_flag( xd, PRED_SEG_ID, seg_predicted );
- temporal_predictor_count[pred_context][seg_predicted]++;
-
- if ( !seg_predicted )
- // Update the "unpredicted" segment count
- t_unpred_seg_counts[segment_id]++;
- }
-
- // Step on to the next mb
- xd->mode_info_context++;
-
- // Step on to the next entry in the segment maps
- segmap_index++;
- }
+ // Now add the cost of each individual segment branch
+ if (count1 > 0)
+ cost += segcounts[0] * vp8_cost_zero(probs[1]) +
+ segcounts[1] * vp8_cost_one(probs[1]);
- // this is to account for the border in mode_info_context
- xd->mode_info_context++;
- }
+ if (count2 > 0)
+ cost += segcounts[2] * vp8_cost_zero(probs[2]) +
+ segcounts[3] * vp8_cost_one(probs[2]);
- // Work out probability tree for coding segments without prediction
- // and the cost.
- calc_segtree_probs( xd, no_pred_segcounts, no_pred_tree );
- no_pred_cost = cost_segmap( xd, no_pred_segcounts, no_pred_tree );
-
- // Key frames cannot use temporal prediction
- if (cm->frame_type != KEY_FRAME)
- {
- // Work out probability tree for coding those segments not
- // predicted using the temporal method and the cost.
- calc_segtree_probs( xd, t_unpred_seg_counts, t_pred_tree );
- t_pred_cost = cost_segmap( xd, t_unpred_seg_counts, t_pred_tree );
-
- // Add in the cost of the signalling for each prediction context
- for ( i = 0; i < PREDICTION_PROBS; i++ )
- {
- tot_count = temporal_predictor_count[i][0] +
- temporal_predictor_count[i][1];
-
- // Work out the context probabilities for the segment
- // prediction flag
- if ( tot_count )
- {
- t_nopred_prob[i] = ( temporal_predictor_count[i][0] * 255 ) /
- tot_count;
-
- // Clamp to minimum allowed value
- if ( t_nopred_prob[i] < 1 )
- t_nopred_prob[i] = 1;
- }
- else
- t_nopred_prob[i] = 1;
-
- // Add in the predictor signaling cost
- t_pred_cost += ( temporal_predictor_count[i][0] *
- vp8_cost_zero(t_nopred_prob[i]) ) +
- ( temporal_predictor_count[i][1] *
- vp8_cost_one(t_nopred_prob[i]) );
- }
- }
+ return cost;
- // Now choose which coding method to use.
- if ( t_pred_cost < no_pred_cost )
- {
- cm->temporal_update = 1;
- vpx_memcpy( xd->mb_segment_tree_probs,
- t_pred_tree, sizeof(t_pred_tree) );
- vpx_memcpy( &cm->segment_pred_probs,
- t_nopred_prob, sizeof(t_nopred_prob) );
+}
+
+void choose_segmap_coding_method(VP8_COMP *cpi) {
+ VP8_COMMON *const cm = & cpi->common;
+ MACROBLOCKD *const xd = & cpi->mb.e_mbd;
+
+ int i;
+ int tot_count;
+ int no_pred_cost;
+ int t_pred_cost = INT_MAX;
+ int pred_context;
+
+ int mb_row, mb_col;
+ int segmap_index = 0;
+ unsigned char segment_id;
+
+ int temporal_predictor_count[PREDICTION_PROBS][2];
+ int no_pred_segcounts[MAX_MB_SEGMENTS];
+ int t_unpred_seg_counts[MAX_MB_SEGMENTS];
+
+ vp8_prob no_pred_tree[MB_FEATURE_TREE_PROBS];
+ vp8_prob t_pred_tree[MB_FEATURE_TREE_PROBS];
+ vp8_prob t_nopred_prob[PREDICTION_PROBS];
+
+ // Set default state for the segment tree probabilities and the
+ // temporal coding probabilities
+ vpx_memset(xd->mb_segment_tree_probs, 255,
+ sizeof(xd->mb_segment_tree_probs));
+ vpx_memset(cm->segment_pred_probs, 255,
+ sizeof(cm->segment_pred_probs));
+
+ vpx_memset(no_pred_segcounts, 0, sizeof(no_pred_segcounts));
+ vpx_memset(t_unpred_seg_counts, 0, sizeof(t_unpred_seg_counts));
+ vpx_memset(temporal_predictor_count, 0, sizeof(temporal_predictor_count));
+
+ // First of all generate stats regarding how well the last segment map
+ // predicts this one
+
+ // Initialize macroblock decoder mode info context for the first mb
+ // in the frame
+ xd->mode_info_context = cm->mi;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ // Count the number of hits on each segment with no prediction
+ no_pred_segcounts[segment_id]++;
+
+ // Temporal prediction not allowed on key frames
+ if (cm->frame_type != KEY_FRAME) {
+ // Test to see if the segment id matches the predicted value.
+ int seg_predicted =
+ (segment_id == get_pred_mb_segid(cm, segmap_index));
+
+ // Get the segment id prediction context
+ pred_context =
+ get_pred_context(cm, xd, PRED_SEG_ID);
+
+ // Store the prediction status for this mb and update counts
+ // as appropriate
+ set_pred_flag(xd, PRED_SEG_ID, seg_predicted);
+ temporal_predictor_count[pred_context][seg_predicted]++;
+
+ if (!seg_predicted)
+ // Update the "unpredicted" segment count
+ t_unpred_seg_counts[segment_id]++;
+ }
+
+ // Step on to the next mb
+ xd->mode_info_context++;
+
+ // Step on to the next entry in the segment maps
+ segmap_index++;
}
- else
- {
- cm->temporal_update = 0;
- vpx_memcpy( xd->mb_segment_tree_probs,
- no_pred_tree, sizeof(no_pred_tree) );
+
+ // this is to account for the border in mode_info_context
+ xd->mode_info_context++;
+ }
+
+ // Work out probability tree for coding segments without prediction
+ // and the cost.
+ calc_segtree_probs(xd, no_pred_segcounts, no_pred_tree);
+ no_pred_cost = cost_segmap(xd, no_pred_segcounts, no_pred_tree);
+
+ // Key frames cannot use temporal prediction
+ if (cm->frame_type != KEY_FRAME) {
+ // Work out probability tree for coding those segments not
+ // predicted using the temporal method and the cost.
+ calc_segtree_probs(xd, t_unpred_seg_counts, t_pred_tree);
+ t_pred_cost = cost_segmap(xd, t_unpred_seg_counts, t_pred_tree);
+
+ // Add in the cost of the signalling for each prediction context
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ tot_count = temporal_predictor_count[i][0] +
+ temporal_predictor_count[i][1];
+
+ // Work out the context probabilities for the segment
+ // prediction flag
+ if (tot_count) {
+ t_nopred_prob[i] = (temporal_predictor_count[i][0] * 255) /
+ tot_count;
+
+ // Clamp to minimum allowed value
+ if (t_nopred_prob[i] < 1)
+ t_nopred_prob[i] = 1;
+ } else
+ t_nopred_prob[i] = 1;
+
+ // Add in the predictor signaling cost
+ t_pred_cost += (temporal_predictor_count[i][0] *
+ vp8_cost_zero(t_nopred_prob[i])) +
+ (temporal_predictor_count[i][1] *
+ vp8_cost_one(t_nopred_prob[i]));
}
+ }
+
+ // Now choose which coding method to use.
+ if (t_pred_cost < no_pred_cost) {
+ cm->temporal_update = 1;
+ vpx_memcpy(xd->mb_segment_tree_probs,
+ t_pred_tree, sizeof(t_pred_tree));
+ vpx_memcpy(&cm->segment_pred_probs,
+ t_nopred_prob, sizeof(t_nopred_prob));
+ } else {
+ cm->temporal_update = 0;
+ vpx_memcpy(xd->mb_segment_tree_probs,
+ no_pred_tree, sizeof(no_pred_tree));
+ }
}
diff --git a/vp8/encoder/segmentation.h b/vp8/encoder/segmentation.h
index a7e1f7cfe..80e09fabb 100644
--- a/vp8/encoder/segmentation.h
+++ b/vp8/encoder/segmentation.h
@@ -28,9 +28,9 @@ extern void vp8_set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_ma
// The values given for each segment can be either deltas (from the default
// value chosen for the frame) or absolute values.
//
-// Valid range for abs values is (0-127 for MB_LVL_ALT_Q) , (0-63 for
+// Valid range for abs values is (0-127 for MB_LVL_ALT_Q), (0-63 for
// SEGMENT_ALT_LF)
-// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q) , (+/-63 for
+// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q), (+/-63 for
// SEGMENT_ALT_LF)
//
// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
@@ -38,6 +38,6 @@ extern void vp8_set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_ma
//
extern void vp8_set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta);
-extern void choose_segmap_coding_method( VP8_COMP *cpi );
+extern void choose_segmap_coding_method(VP8_COMP *cpi);
#endif /* __INC_SEGMENTATION_H__ */
diff --git a/vp8/encoder/ssim.c b/vp8/encoder/ssim.c
index d0f8e490a..d3d9711dc 100644
--- a/vp8/encoder/ssim.c
+++ b/vp8/encoder/ssim.c
@@ -13,55 +13,49 @@
void vp8_ssim_parms_16x16_c
(
- unsigned char *s,
- int sp,
- unsigned char *r,
- int rp,
- unsigned long *sum_s,
- unsigned long *sum_r,
- unsigned long *sum_sq_s,
- unsigned long *sum_sq_r,
- unsigned long *sum_sxr
-)
-{
- int i,j;
- for(i=0;i<16;i++,s+=sp,r+=rp)
- {
- for(j=0;j<16;j++)
- {
- *sum_s += s[j];
- *sum_r += r[j];
- *sum_sq_s += s[j] * s[j];
- *sum_sq_r += r[j] * r[j];
- *sum_sxr += s[j] * r[j];
- }
- }
+ unsigned char *s,
+ int sp,
+ unsigned char *r,
+ int rp,
+ unsigned long *sum_s,
+ unsigned long *sum_r,
+ unsigned long *sum_sq_s,
+ unsigned long *sum_sq_r,
+ unsigned long *sum_sxr
+) {
+ int i, j;
+ for (i = 0; i < 16; i++, s += sp, r += rp) {
+ for (j = 0; j < 16; j++) {
+ *sum_s += s[j];
+ *sum_r += r[j];
+ *sum_sq_s += s[j] * s[j];
+ *sum_sq_r += r[j] * r[j];
+ *sum_sxr += s[j] * r[j];
+ }
+ }
}
void vp8_ssim_parms_8x8_c
(
- unsigned char *s,
- int sp,
- unsigned char *r,
- int rp,
- unsigned long *sum_s,
- unsigned long *sum_r,
- unsigned long *sum_sq_s,
- unsigned long *sum_sq_r,
- unsigned long *sum_sxr
-)
-{
- int i,j;
- for(i=0;i<8;i++,s+=sp,r+=rp)
- {
- for(j=0;j<8;j++)
- {
- *sum_s += s[j];
- *sum_r += r[j];
- *sum_sq_s += s[j] * s[j];
- *sum_sq_r += r[j] * r[j];
- *sum_sxr += s[j] * r[j];
- }
- }
+ unsigned char *s,
+ int sp,
+ unsigned char *r,
+ int rp,
+ unsigned long *sum_s,
+ unsigned long *sum_r,
+ unsigned long *sum_sq_s,
+ unsigned long *sum_sq_r,
+ unsigned long *sum_sxr
+) {
+ int i, j;
+ for (i = 0; i < 8; i++, s += sp, r += rp) {
+ for (j = 0; j < 8; j++) {
+ *sum_s += s[j];
+ *sum_r += r[j];
+ *sum_sq_s += s[j] * s[j];
+ *sum_sq_r += r[j] * r[j];
+ *sum_sxr += s[j] * r[j];
+ }
+ }
}
const static int64_t cc1 = 26634; // (64^2*(.01*255)^2
@@ -69,79 +63,75 @@ const static int64_t cc2 = 239708; // (64^2*(.03*255)^2
static double similarity
(
- unsigned long sum_s,
- unsigned long sum_r,
- unsigned long sum_sq_s,
- unsigned long sum_sq_r,
- unsigned long sum_sxr,
- int count
-)
-{
- int64_t ssim_n, ssim_d;
- int64_t c1, c2;
-
- //scale the constants by number of pixels
- c1 = (cc1*count*count)>>12;
- c2 = (cc2*count*count)>>12;
-
- ssim_n = (2*sum_s*sum_r+ c1)*((int64_t) 2*count*sum_sxr-
- (int64_t) 2*sum_s*sum_r+c2);
-
- ssim_d = (sum_s*sum_s +sum_r*sum_r+c1)*
- ((int64_t)count*sum_sq_s-(int64_t)sum_s*sum_s +
- (int64_t)count*sum_sq_r-(int64_t) sum_r*sum_r +c2) ;
-
- return ssim_n * 1.0 / ssim_d;
+ unsigned long sum_s,
+ unsigned long sum_r,
+ unsigned long sum_sq_s,
+ unsigned long sum_sq_r,
+ unsigned long sum_sxr,
+ int count
+) {
+ int64_t ssim_n, ssim_d;
+ int64_t c1, c2;
+
+ // scale the constants by number of pixels
+ c1 = (cc1 * count * count) >> 12;
+ c2 = (cc2 * count * count) >> 12;
+
+ ssim_n = (2 * sum_s * sum_r + c1) * ((int64_t) 2 * count * sum_sxr -
+ (int64_t) 2 * sum_s * sum_r + c2);
+
+ ssim_d = (sum_s * sum_s + sum_r * sum_r + c1) *
+ ((int64_t)count * sum_sq_s - (int64_t)sum_s * sum_s +
+ (int64_t)count * sum_sq_r - (int64_t) sum_r * sum_r + c2);
+
+ return ssim_n * 1.0 / ssim_d;
}
-static double ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp,
- const vp8_variance_rtcd_vtable_t *rtcd)
-{
- unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
- SSIMPF_INVOKE(rtcd,16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
- return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
+static double ssim_16x16(unsigned char *s, int sp, unsigned char *r, int rp,
+ const vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ SSIMPF_INVOKE(rtcd, 16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+ return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
}
-static double ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp,
- const vp8_variance_rtcd_vtable_t *rtcd)
-{
- unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
- SSIMPF_INVOKE(rtcd,8x8)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
- return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
+static double ssim_8x8(unsigned char *s, int sp, unsigned char *r, int rp,
+ const vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ SSIMPF_INVOKE(rtcd, 8x8)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+ return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
}
// TODO: (jbb) tried to scale this function such that we may be able to use it
// for distortion metric in mode selection code ( provided we do a reconstruction)
-long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
- const vp8_variance_rtcd_vtable_t *rtcd)
-{
- unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
- int64_t ssim3;
- int64_t ssim_n1,ssim_n2;
- int64_t ssim_d1,ssim_d2;
- int64_t ssim_t1,ssim_t2;
- int64_t c1, c2;
+long dssim(unsigned char *s, int sp, unsigned char *r, int rp,
+ const vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ int64_t ssim3;
+ int64_t ssim_n1, ssim_n2;
+ int64_t ssim_d1, ssim_d2;
+ int64_t ssim_t1, ssim_t2;
+ int64_t c1, c2;
- // normalize by 256/64
- c1 = cc1*16;
- c2 = cc2*16;
+ // normalize by 256/64
+ c1 = cc1 * 16;
+ c2 = cc2 * 16;
- SSIMPF_INVOKE(rtcd,16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
- ssim_n1 = (2*sum_s*sum_r+ c1);
+ SSIMPF_INVOKE(rtcd, 16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+ ssim_n1 = (2 * sum_s * sum_r + c1);
- ssim_n2 =((int64_t) 2*256*sum_sxr-(int64_t) 2*sum_s*sum_r+c2);
+ ssim_n2 = ((int64_t) 2 * 256 * sum_sxr - (int64_t) 2 * sum_s * sum_r + c2);
- ssim_d1 =((int64_t)sum_s*sum_s +(int64_t)sum_r*sum_r+c1);
+ ssim_d1 = ((int64_t)sum_s * sum_s + (int64_t)sum_r * sum_r + c1);
- ssim_d2 = (256 * (int64_t) sum_sq_s-(int64_t) sum_s*sum_s +
- (int64_t) 256*sum_sq_r-(int64_t) sum_r*sum_r +c2) ;
+ ssim_d2 = (256 * (int64_t) sum_sq_s - (int64_t) sum_s * sum_s +
+ (int64_t) 256 * sum_sq_r - (int64_t) sum_r * sum_r + c2);
- ssim_t1 = 256 - 256 * ssim_n1 / ssim_d1;
- ssim_t2 = 256 - 256 * ssim_n2 / ssim_d2;
+ ssim_t1 = 256 - 256 * ssim_n1 / ssim_d1;
+ ssim_t2 = 256 - 256 * ssim_n2 / ssim_d2;
- ssim3 = 256 *ssim_t1 * ssim_t2;
- if(ssim3 <0 )
- ssim3=0;
- return (long)( ssim3 );
+ ssim3 = 256 * ssim_t1 * ssim_t2;
+ if (ssim3 < 0)
+ ssim3 = 0;
+ return (long)(ssim3);
}
// We are using a 8x8 moving window with starting location of each 8x8 window
@@ -149,91 +139,86 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
// block boundaries to penalize blocking artifacts.
double vp8_ssim2
(
- unsigned char *img1,
- unsigned char *img2,
- int stride_img1,
- int stride_img2,
- int width,
- int height,
- const vp8_variance_rtcd_vtable_t *rtcd
-)
-{
- int i,j;
- int samples =0;
- double ssim_total=0;
-
- // sample point start with each 4x4 location
- for(i=0; i < height-8; i+=4, img1 += stride_img1*4, img2 += stride_img2*4)
- {
- for(j=0; j < width-8; j+=4 )
- {
- double v = ssim_8x8(img1+j, stride_img1, img2+j, stride_img2, rtcd);
- ssim_total += v;
- samples++;
- }
+ unsigned char *img1,
+ unsigned char *img2,
+ int stride_img1,
+ int stride_img2,
+ int width,
+ int height,
+ const vp8_variance_rtcd_vtable_t *rtcd
+) {
+ int i, j;
+ int samples = 0;
+ double ssim_total = 0;
+
+ // sample point start with each 4x4 location
+ for (i = 0; i < height - 8; i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
+ for (j = 0; j < width - 8; j += 4) {
+ double v = ssim_8x8(img1 + j, stride_img1, img2 + j, stride_img2, rtcd);
+ ssim_total += v;
+ samples++;
}
- ssim_total /= samples;
- return ssim_total;
+ }
+ ssim_total /= samples;
+ return ssim_total;
}
double vp8_calc_ssim
(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- int lumamask,
- double *weight,
- const vp8_variance_rtcd_vtable_t *rtcd
-)
-{
- double a, b, c;
- double ssimv;
+ YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest,
+ int lumamask,
+ double *weight,
+ const vp8_variance_rtcd_vtable_t *rtcd
+) {
+ double a, b, c;
+ double ssimv;
- a = vp8_ssim2(source->y_buffer, dest->y_buffer,
- source->y_stride, dest->y_stride, source->y_width,
- source->y_height, rtcd);
+ a = vp8_ssim2(source->y_buffer, dest->y_buffer,
+ source->y_stride, dest->y_stride, source->y_width,
+ source->y_height, rtcd);
- b = vp8_ssim2(source->u_buffer, dest->u_buffer,
- source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
+ b = vp8_ssim2(source->u_buffer, dest->u_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height, rtcd);
- c = vp8_ssim2(source->v_buffer, dest->v_buffer,
- source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
+ c = vp8_ssim2(source->v_buffer, dest->v_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height, rtcd);
- ssimv = a * .8 + .1 * (b + c);
+ ssimv = a * .8 + .1 * (b + c);
- *weight = 1;
+ *weight = 1;
- return ssimv;
+ return ssimv;
}
double vp8_calc_ssimg
(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *ssim_y,
- double *ssim_u,
- double *ssim_v,
- const vp8_variance_rtcd_vtable_t *rtcd
-)
-{
- double ssim_all = 0;
- double a, b, c;
-
- a = vp8_ssim2(source->y_buffer, dest->y_buffer,
- source->y_stride, dest->y_stride, source->y_width,
- source->y_height, rtcd);
-
- b = vp8_ssim2(source->u_buffer, dest->u_buffer,
- source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
-
- c = vp8_ssim2(source->v_buffer, dest->v_buffer,
- source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
- *ssim_y = a;
- *ssim_u = b;
- *ssim_v = c;
- ssim_all = (a * 4 + b + c) /6;
-
- return ssim_all;
+ YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest,
+ double *ssim_y,
+ double *ssim_u,
+ double *ssim_v,
+ const vp8_variance_rtcd_vtable_t *rtcd
+) {
+ double ssim_all = 0;
+ double a, b, c;
+
+ a = vp8_ssim2(source->y_buffer, dest->y_buffer,
+ source->y_stride, dest->y_stride, source->y_width,
+ source->y_height, rtcd);
+
+ b = vp8_ssim2(source->u_buffer, dest->u_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height, rtcd);
+
+ c = vp8_ssim2(source->v_buffer, dest->v_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height, rtcd);
+ *ssim_y = a;
+ *ssim_u = b;
+ *ssim_v = c;
+ ssim_all = (a * 4 + b + c) / 6;
+
+ return ssim_all;
}
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index 564be41f1..964f4c856 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -39,520 +39,491 @@
static void vp8_temporal_filter_predictors_mb_c
(
- MACROBLOCKD *x,
- unsigned char *y_mb_ptr,
- unsigned char *u_mb_ptr,
- unsigned char *v_mb_ptr,
- int stride,
- int mv_row,
- int mv_col,
- unsigned char *pred
-)
-{
- int offset;
- unsigned char *yptr, *uptr, *vptr;
- int omv_row, omv_col;
-
- // Y
- yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
-
- if ((mv_row | mv_col) & 7)
- {
+ MACROBLOCKD *x,
+ unsigned char *y_mb_ptr,
+ unsigned char *u_mb_ptr,
+ unsigned char *v_mb_ptr,
+ int stride,
+ int mv_row,
+ int mv_col,
+ unsigned char *pred
+) {
+ int offset;
+ unsigned char *yptr, *uptr, *vptr;
+ int omv_row, omv_col;
+
+ // Y
+ yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
+
+ if ((mv_row | mv_col) & 7) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- x->subpixel_predict16x16(yptr, stride,
- (mv_col & 7)<<1, (mv_row & 7)<<1, &pred[0], 16);
+ x->subpixel_predict16x16(yptr, stride,
+ (mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
#else
- x->subpixel_predict16x16(yptr, stride,
- mv_col & 7, mv_row & 7, &pred[0], 16);
+ x->subpixel_predict16x16(yptr, stride,
+ mv_col & 7, mv_row & 7, &pred[0], 16);
#endif
- }
- else
- {
- RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
- }
-
- // U & V
- omv_row = mv_row;
- omv_col = mv_col;
- mv_row >>= 1;
- mv_col >>= 1;
- stride = (stride + 1) >> 1;
- offset = (mv_row >> 3) * stride + (mv_col >> 3);
- uptr = u_mb_ptr + offset;
- vptr = v_mb_ptr + offset;
+ } else {
+ RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
+ }
+
+ // U & V
+ omv_row = mv_row;
+ omv_col = mv_col;
+ mv_row >>= 1;
+ mv_col >>= 1;
+ stride = (stride + 1) >> 1;
+ offset = (mv_row >> 3) * stride + (mv_col >> 3);
+ uptr = u_mb_ptr + offset;
+ vptr = v_mb_ptr + offset;
#if CONFIG_SIXTEENTH_SUBPEL_UV
- if ((omv_row | omv_col) & 15)
- {
- x->subpixel_predict8x8(uptr, stride,
- (omv_col & 15), (omv_row & 15), &pred[256], 8);
- x->subpixel_predict8x8(vptr, stride,
- (omv_col & 15), (omv_row & 15), &pred[320], 8);
- }
+ if ((omv_row | omv_col) & 15) {
+ x->subpixel_predict8x8(uptr, stride,
+ (omv_col & 15), (omv_row & 15), &pred[256], 8);
+ x->subpixel_predict8x8(vptr, stride,
+ (omv_col & 15), (omv_row & 15), &pred[320], 8);
+ }
#else
- if ((mv_row | mv_col) & 7)
- {
- x->subpixel_predict8x8(uptr, stride,
- mv_col & 7, mv_row & 7, &pred[256], 8);
- x->subpixel_predict8x8(vptr, stride,
- mv_col & 7, mv_row & 7, &pred[320], 8);
- }
+ if ((mv_row | mv_col) & 7) {
+ x->subpixel_predict8x8(uptr, stride,
+ mv_col & 7, mv_row & 7, &pred[256], 8);
+ x->subpixel_predict8x8(vptr, stride,
+ mv_col & 7, mv_row & 7, &pred[320], 8);
+ }
#endif
- else
- {
- RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
- RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
- }
+ else {
+ RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
+ RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
+ }
}
void vp8_temporal_filter_apply_c
(
- unsigned char *frame1,
- unsigned int stride,
- unsigned char *frame2,
- unsigned int block_size,
- int strength,
- int filter_weight,
- unsigned int *accumulator,
- unsigned short *count
-)
-{
- unsigned int i, j, k;
- int modifier;
- int byte = 0;
-
- for (i = 0,k = 0; i < block_size; i++)
- {
- for (j = 0; j < block_size; j++, k++)
- {
-
- int src_byte = frame1[byte];
- int pixel_value = *frame2++;
-
- modifier = src_byte - pixel_value;
- // This is an integer approximation of:
- // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
- // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
- modifier *= modifier;
- modifier *= 3;
- modifier += 1 << (strength - 1);
- modifier >>= strength;
-
- if (modifier > 16)
- modifier = 16;
-
- modifier = 16 - modifier;
- modifier *= filter_weight;
-
- count[k] += modifier;
- accumulator[k] += modifier * pixel_value;
-
- byte++;
- }
-
- byte += stride - block_size;
+ unsigned char *frame1,
+ unsigned int stride,
+ unsigned char *frame2,
+ unsigned int block_size,
+ int strength,
+ int filter_weight,
+ unsigned int *accumulator,
+ unsigned short *count
+) {
+ unsigned int i, j, k;
+ int modifier;
+ int byte = 0;
+
+ for (i = 0, k = 0; i < block_size; i++) {
+ for (j = 0; j < block_size; j++, k++) {
+
+ int src_byte = frame1[byte];
+ int pixel_value = *frame2++;
+
+ modifier = src_byte - pixel_value;
+ // This is an integer approximation of:
+ // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
+ // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
+ modifier *= modifier;
+ modifier *= 3;
+ modifier += 1 << (strength - 1);
+ modifier >>= strength;
+
+ if (modifier > 16)
+ modifier = 16;
+
+ modifier = 16 - modifier;
+ modifier *= filter_weight;
+
+ count[k] += modifier;
+ accumulator[k] += modifier * pixel_value;
+
+ byte++;
}
+
+ byte += stride - block_size;
+ }
}
#if ALT_REF_MC_ENABLED
static int vp8_temporal_filter_find_matching_mb_c
(
- VP8_COMP *cpi,
- YV12_BUFFER_CONFIG *arf_frame,
- YV12_BUFFER_CONFIG *frame_ptr,
- int mb_offset,
- int error_thresh
-)
-{
- MACROBLOCK *x = &cpi->mb;
- int step_param;
- int further_steps;
- int sadpb = x->sadperbit16;
- int bestsme = INT_MAX;
-
- BLOCK *b = &x->block[0];
- BLOCKD *d = &x->e_mbd.block[0];
- int_mv best_ref_mv1;
- int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
-
- // Save input state
- unsigned char **base_src = b->base_src;
- int src = b->src;
- int src_stride = b->src_stride;
- unsigned char **base_pre = d->base_pre;
- int pre = d->pre;
- int pre_stride = d->pre_stride;
-
- best_ref_mv1.as_int = 0;
- best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3;
- best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >>3;
-
- // Setup frame pointers
- b->base_src = &arf_frame->y_buffer;
- b->src_stride = arf_frame->y_stride;
- b->src = mb_offset;
-
- d->base_pre = &frame_ptr->y_buffer;
- d->pre_stride = frame_ptr->y_stride;
- d->pre = mb_offset;
-
- // Further step/diamond searches as necessary
- if (cpi->Speed < 8)
- {
- step_param = cpi->sf.first_step +
- ((cpi->Speed > 5) ? 1 : 0);
- further_steps =
- (cpi->sf.max_step_search_steps - 1)-step_param;
- }
- else
- {
- step_param = cpi->sf.first_step + 2;
- further_steps = 0;
- }
-
- /*cpi->sf.search_method == HEX*/
- // TODO Check that the 16x16 vf & sdf are selected here
- // Ignore mv costing by sending NULL pointer instead of cost arrays
- bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv.first,
- step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
- NULL, NULL, &best_ref_mv1);
+ VP8_COMP *cpi,
+ YV12_BUFFER_CONFIG *arf_frame,
+ YV12_BUFFER_CONFIG *frame_ptr,
+ int mb_offset,
+ int error_thresh
+) {
+ MACROBLOCK *x = &cpi->mb;
+ int step_param;
+ int further_steps;
+ int sadpb = x->sadperbit16;
+ int bestsme = INT_MAX;
+
+ BLOCK *b = &x->block[0];
+ BLOCKD *d = &x->e_mbd.block[0];
+ int_mv best_ref_mv1;
+ int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
+
+ // Save input state
+ unsigned char **base_src = b->base_src;
+ int src = b->src;
+ int src_stride = b->src_stride;
+ unsigned char **base_pre = d->base_pre;
+ int pre = d->pre;
+ int pre_stride = d->pre_stride;
+
+ best_ref_mv1.as_int = 0;
+ best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >> 3;
+ best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3;
+
+ // Setup frame pointers
+ b->base_src = &arf_frame->y_buffer;
+ b->src_stride = arf_frame->y_stride;
+ b->src = mb_offset;
+
+ d->base_pre = &frame_ptr->y_buffer;
+ d->pre_stride = frame_ptr->y_stride;
+ d->pre = mb_offset;
+
+ // Further step/diamond searches as necessary
+ if (cpi->Speed < 8) {
+ step_param = cpi->sf.first_step +
+ ((cpi->Speed > 5) ? 1 : 0);
+ further_steps =
+ (cpi->sf.max_step_search_steps - 1) - step_param;
+ } else {
+ step_param = cpi->sf.first_step + 2;
+ further_steps = 0;
+ }
+
+ /*cpi->sf.search_method == HEX*/
+ // TODO Check that the 16x16 vf & sdf are selected here
+ // Ignore mv costing by sending NULL pointer instead of cost arrays
+ bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv.first,
+ step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
+ NULL, NULL, &best_ref_mv1);
#if ALT_REF_SUBPEL_ENABLED
- // Try sub-pixel MC?
- //if (bestsme > error_thresh && bestsme < INT_MAX)
- {
- int distortion;
- unsigned int sse;
- // Ignore mv costing by sending NULL pointer instead of cost array
- bestsme = cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first,
- &best_ref_mv1,
- x->errorperbit,
- &cpi->fn_ptr[BLOCK_16X16],
- NULL, &distortion, &sse);
- }
+ // Try sub-pixel MC?
+ // if (bestsme > error_thresh && bestsme < INT_MAX)
+ {
+ int distortion;
+ unsigned int sse;
+ // Ignore mv costing by sending NULL pointer instead of cost array
+ bestsme = cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first,
+ &best_ref_mv1,
+ x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16],
+ NULL, &distortion, &sse);
+ }
#endif
- // Save input state
- b->base_src = base_src;
- b->src = src;
- b->src_stride = src_stride;
- d->base_pre = base_pre;
- d->pre = pre;
- d->pre_stride = pre_stride;
+ // Save input state
+ b->base_src = base_src;
+ b->src = src;
+ b->src_stride = src_stride;
+ d->base_pre = base_pre;
+ d->pre = pre;
+ d->pre_stride = pre_stride;
- return bestsme;
+ return bestsme;
}
#endif
static void vp8_temporal_filter_iterate_c
(
- VP8_COMP *cpi,
- int frame_count,
- int alt_ref_index,
- int strength
-)
-{
- int byte;
- int frame;
- int mb_col, mb_row;
- unsigned int filter_weight;
- int mb_cols = cpi->common.mb_cols;
- int mb_rows = cpi->common.mb_rows;
- int mb_y_offset = 0;
- int mb_uv_offset = 0;
- DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16*16 + 8*8 + 8*8);
- DECLARE_ALIGNED_ARRAY(16, unsigned short, count, 16*16 + 8*8 + 8*8);
- MACROBLOCKD *mbd = &cpi->mb.e_mbd;
- YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
- unsigned char *dst1, *dst2;
- DECLARE_ALIGNED_ARRAY(16, unsigned char, predictor, 16*16 + 8*8 + 8*8);
-
- // Save input state
- unsigned char *y_buffer = mbd->pre.y_buffer;
- unsigned char *u_buffer = mbd->pre.u_buffer;
- unsigned char *v_buffer = mbd->pre.v_buffer;
-
- for (mb_row = 0; mb_row < mb_rows; mb_row++)
- {
+ VP8_COMP *cpi,
+ int frame_count,
+ int alt_ref_index,
+ int strength
+) {
+ int byte;
+ int frame;
+ int mb_col, mb_row;
+ unsigned int filter_weight;
+ int mb_cols = cpi->common.mb_cols;
+ int mb_rows = cpi->common.mb_rows;
+ int mb_y_offset = 0;
+ int mb_uv_offset = 0;
+ DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 + 8 * 8 + 8 * 8);
+ DECLARE_ALIGNED_ARRAY(16, unsigned short, count, 16 * 16 + 8 * 8 + 8 * 8);
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
+ unsigned char *dst1, *dst2;
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, predictor, 16 * 16 + 8 * 8 + 8 * 8);
+
+ // Save input state
+ unsigned char *y_buffer = mbd->pre.y_buffer;
+ unsigned char *u_buffer = mbd->pre.u_buffer;
+ unsigned char *v_buffer = mbd->pre.v_buffer;
+
+ for (mb_row = 0; mb_row < mb_rows; mb_row++) {
#if ALT_REF_MC_ENABLED
- // Source frames are extended to 16 pixels. This is different than
- // L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
- // A 6/8 tap filter is used for motion search. This requires 2 pixels
- // before and 3 pixels after. So the largest Y mv on a border would
- // then be 16 - INTERP_EXTEND. The UV blocks are half the size of the Y and
- // therefore only extended by 8. The largest mv that a UV block
- // can support is 8 - INTERP_EXTEND. A UV mv is half of a Y mv.
- // (16 - INTERP_EXTEND) >> 1 which is greater than 8 - INTERP_EXTEND.
- // To keep the mv in play for both Y and UV planes the max that it
- // can be on a border is therefore 16 - (2*INTERP_EXTEND+1).
- cpi->mb.mv_row_min = -((mb_row * 16) + (17 - 2*INTERP_EXTEND));
- cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
- + (17 - 2*INTERP_EXTEND);
+ // Source frames are extended to 16 pixels. This is different than
+ // L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
+ // A 6/8 tap filter is used for motion search. This requires 2 pixels
+ // before and 3 pixels after. So the largest Y mv on a border would
+ // then be 16 - INTERP_EXTEND. The UV blocks are half the size of the Y and
+ // therefore only extended by 8. The largest mv that a UV block
+ // can support is 8 - INTERP_EXTEND. A UV mv is half of a Y mv.
+ // (16 - INTERP_EXTEND) >> 1 which is greater than 8 - INTERP_EXTEND.
+ // To keep the mv in play for both Y and UV planes the max that it
+ // can be on a border is therefore 16 - (2*INTERP_EXTEND+1).
+ cpi->mb.mv_row_min = -((mb_row * 16) + (17 - 2 * INTERP_EXTEND));
+ cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
+ + (17 - 2 * INTERP_EXTEND);
#endif
- for (mb_col = 0; mb_col < mb_cols; mb_col++)
- {
- int i, j, k;
- int stride;
+ for (mb_col = 0; mb_col < mb_cols; mb_col++) {
+ int i, j, k;
+ int stride;
- vpx_memset(accumulator, 0, 384*sizeof(unsigned int));
- vpx_memset(count, 0, 384*sizeof(unsigned short));
+ vpx_memset(accumulator, 0, 384 * sizeof(unsigned int));
+ vpx_memset(count, 0, 384 * sizeof(unsigned short));
#if ALT_REF_MC_ENABLED
- cpi->mb.mv_col_min = -((mb_col * 16) + (17 - 2*INTERP_EXTEND));
- cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16)
- + (17 - 2*INTERP_EXTEND);
+ cpi->mb.mv_col_min = -((mb_col * 16) + (17 - 2 * INTERP_EXTEND));
+ cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16)
+ + (17 - 2 * INTERP_EXTEND);
#endif
- for (frame = 0; frame < frame_count; frame++)
- {
- if (cpi->frames[frame] == NULL)
- continue;
-
- mbd->block[0].bmi.as_mv.first.as_mv.row = 0;
- mbd->block[0].bmi.as_mv.first.as_mv.col = 0;
-
- if (frame == alt_ref_index)
- {
- filter_weight = 2;
- }
- else
- {
- int err = 0;
+ for (frame = 0; frame < frame_count; frame++) {
+ if (cpi->frames[frame] == NULL)
+ continue;
+
+ mbd->block[0].bmi.as_mv.first.as_mv.row = 0;
+ mbd->block[0].bmi.as_mv.first.as_mv.col = 0;
+
+ if (frame == alt_ref_index) {
+ filter_weight = 2;
+ } else {
+ int err = 0;
#if ALT_REF_MC_ENABLED
#define THRESH_LOW 10000
#define THRESH_HIGH 20000
- // Find best match in this frame by MC
- err = vp8_temporal_filter_find_matching_mb_c
- (cpi,
- cpi->frames[alt_ref_index],
- cpi->frames[frame],
- mb_y_offset,
- THRESH_LOW);
+ // Find best match in this frame by MC
+ err = vp8_temporal_filter_find_matching_mb_c
+ (cpi,
+ cpi->frames[alt_ref_index],
+ cpi->frames[frame],
+ mb_y_offset,
+ THRESH_LOW);
#endif
- // Assign higher weight to matching MB if it's error
- // score is lower. If not applying MC default behavior
- // is to weight all MBs equal.
- filter_weight = err<THRESH_LOW
- ? 2 : err<THRESH_HIGH ? 1 : 0;
- }
-
- if (filter_weight != 0)
- {
- // Construct the predictors
- vp8_temporal_filter_predictors_mb_c
- (mbd,
- cpi->frames[frame]->y_buffer + mb_y_offset,
- cpi->frames[frame]->u_buffer + mb_uv_offset,
- cpi->frames[frame]->v_buffer + mb_uv_offset,
- cpi->frames[frame]->y_stride,
- mbd->block[0].bmi.as_mv.first.as_mv.row,
- mbd->block[0].bmi.as_mv.first.as_mv.col,
- predictor);
-
- // Apply the filter (YUV)
- TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
- (f->y_buffer + mb_y_offset,
- f->y_stride,
- predictor,
- 16,
- strength,
- filter_weight,
- accumulator,
- count);
-
- TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
- (f->u_buffer + mb_uv_offset,
- f->uv_stride,
- predictor + 256,
- 8,
- strength,
- filter_weight,
- accumulator + 256,
- count + 256);
-
- TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
- (f->v_buffer + mb_uv_offset,
- f->uv_stride,
- predictor + 320,
- 8,
- strength,
- filter_weight,
- accumulator + 320,
- count + 320);
- }
- }
-
- // Normalize filter output to produce AltRef frame
- dst1 = cpi->alt_ref_buffer.y_buffer;
- stride = cpi->alt_ref_buffer.y_stride;
- byte = mb_y_offset;
- for (i = 0,k = 0; i < 16; i++)
- {
- for (j = 0; j < 16; j++, k++)
- {
- unsigned int pval = accumulator[k] + (count[k] >> 1);
- pval *= cpi->fixed_divide[count[k]];
- pval >>= 19;
-
- dst1[byte] = (unsigned char)pval;
-
- // move to next pixel
- byte++;
- }
-
- byte += stride - 16;
- }
-
- dst1 = cpi->alt_ref_buffer.u_buffer;
- dst2 = cpi->alt_ref_buffer.v_buffer;
- stride = cpi->alt_ref_buffer.uv_stride;
- byte = mb_uv_offset;
- for (i = 0,k = 256; i < 8; i++)
- {
- for (j = 0; j < 8; j++, k++)
- {
- int m=k+64;
-
- // U
- unsigned int pval = accumulator[k] + (count[k] >> 1);
- pval *= cpi->fixed_divide[count[k]];
- pval >>= 19;
- dst1[byte] = (unsigned char)pval;
-
- // V
- pval = accumulator[m] + (count[m] >> 1);
- pval *= cpi->fixed_divide[count[m]];
- pval >>= 19;
- dst2[byte] = (unsigned char)pval;
-
- // move to next pixel
- byte++;
- }
-
- byte += stride - 8;
- }
-
- mb_y_offset += 16;
- mb_uv_offset += 8;
+ // Assign higher weight to matching MB if it's error
+ // score is lower. If not applying MC default behavior
+ // is to weight all MBs equal.
+ filter_weight = err < THRESH_LOW
+ ? 2 : err < THRESH_HIGH ? 1 : 0;
+ }
+
+ if (filter_weight != 0) {
+ // Construct the predictors
+ vp8_temporal_filter_predictors_mb_c
+ (mbd,
+ cpi->frames[frame]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->u_buffer + mb_uv_offset,
+ cpi->frames[frame]->v_buffer + mb_uv_offset,
+ cpi->frames[frame]->y_stride,
+ mbd->block[0].bmi.as_mv.first.as_mv.row,
+ mbd->block[0].bmi.as_mv.first.as_mv.col,
+ predictor);
+
+ // Apply the filter (YUV)
+ TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
+ (f->y_buffer + mb_y_offset,
+ f->y_stride,
+ predictor,
+ 16,
+ strength,
+ filter_weight,
+ accumulator,
+ count);
+
+ TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
+ (f->u_buffer + mb_uv_offset,
+ f->uv_stride,
+ predictor + 256,
+ 8,
+ strength,
+ filter_weight,
+ accumulator + 256,
+ count + 256);
+
+ TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
+ (f->v_buffer + mb_uv_offset,
+ f->uv_stride,
+ predictor + 320,
+ 8,
+ strength,
+ filter_weight,
+ accumulator + 320,
+ count + 320);
+ }
+ }
+
+ // Normalize filter output to produce AltRef frame
+ dst1 = cpi->alt_ref_buffer.y_buffer;
+ stride = cpi->alt_ref_buffer.y_stride;
+ byte = mb_y_offset;
+ for (i = 0, k = 0; i < 16; i++) {
+ for (j = 0; j < 16; j++, k++) {
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+
+ dst1[byte] = (unsigned char)pval;
+
+ // move to next pixel
+ byte++;
+ }
+
+ byte += stride - 16;
+ }
+
+ dst1 = cpi->alt_ref_buffer.u_buffer;
+ dst2 = cpi->alt_ref_buffer.v_buffer;
+ stride = cpi->alt_ref_buffer.uv_stride;
+ byte = mb_uv_offset;
+ for (i = 0, k = 256; i < 8; i++) {
+ for (j = 0; j < 8; j++, k++) {
+ int m = k + 64;
+
+ // U
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+ dst1[byte] = (unsigned char)pval;
+
+ // V
+ pval = accumulator[m] + (count[m] >> 1);
+ pval *= cpi->fixed_divide[count[m]];
+ pval >>= 19;
+ dst2[byte] = (unsigned char)pval;
+
+ // move to next pixel
+ byte++;
}
- mb_y_offset += 16*(f->y_stride-mb_cols);
- mb_uv_offset += 8*(f->uv_stride-mb_cols);
+ byte += stride - 8;
+ }
+
+ mb_y_offset += 16;
+ mb_uv_offset += 8;
}
- // Restore input state
- mbd->pre.y_buffer = y_buffer;
- mbd->pre.u_buffer = u_buffer;
- mbd->pre.v_buffer = v_buffer;
+ mb_y_offset += 16 * (f->y_stride - mb_cols);
+ mb_uv_offset += 8 * (f->uv_stride - mb_cols);
+ }
+
+ // Restore input state
+ mbd->pre.y_buffer = y_buffer;
+ mbd->pre.u_buffer = u_buffer;
+ mbd->pre.v_buffer = v_buffer;
}
void vp8_temporal_filter_prepare_c
(
- VP8_COMP *cpi,
- int distance
-)
-{
- int frame = 0;
+ VP8_COMP *cpi,
+ int distance
+) {
+ int frame = 0;
- int num_frames_backward = 0;
- int num_frames_forward = 0;
- int frames_to_blur_backward = 0;
- int frames_to_blur_forward = 0;
- int frames_to_blur = 0;
- int start_frame = 0;
+ int num_frames_backward = 0;
+ int num_frames_forward = 0;
+ int frames_to_blur_backward = 0;
+ int frames_to_blur_forward = 0;
+ int frames_to_blur = 0;
+ int start_frame = 0;
- int strength = cpi->oxcf.arnr_strength;
+ int strength = cpi->oxcf.arnr_strength;
- int blur_type = cpi->oxcf.arnr_type;
+ int blur_type = cpi->oxcf.arnr_type;
- int max_frames = cpi->active_arnr_frames;
+ int max_frames = cpi->active_arnr_frames;
- num_frames_backward = distance;
- num_frames_forward = vp8_lookahead_depth(cpi->lookahead)
- - (num_frames_backward + 1);
+ num_frames_backward = distance;
+ num_frames_forward = vp8_lookahead_depth(cpi->lookahead)
+ - (num_frames_backward + 1);
- switch (blur_type)
- {
+ switch (blur_type) {
case 1:
- /////////////////////////////////////////
- // Backward Blur
+ /////////////////////////////////////////
+ // Backward Blur
- frames_to_blur_backward = num_frames_backward;
+ frames_to_blur_backward = num_frames_backward;
- if (frames_to_blur_backward >= max_frames)
- frames_to_blur_backward = max_frames - 1;
+ if (frames_to_blur_backward >= max_frames)
+ frames_to_blur_backward = max_frames - 1;
- frames_to_blur = frames_to_blur_backward + 1;
- break;
+ frames_to_blur = frames_to_blur_backward + 1;
+ break;
case 2:
- /////////////////////////////////////////
- // Forward Blur
+ /////////////////////////////////////////
+ // Forward Blur
- frames_to_blur_forward = num_frames_forward;
+ frames_to_blur_forward = num_frames_forward;
- if (frames_to_blur_forward >= max_frames)
- frames_to_blur_forward = max_frames - 1;
+ if (frames_to_blur_forward >= max_frames)
+ frames_to_blur_forward = max_frames - 1;
- frames_to_blur = frames_to_blur_forward + 1;
- break;
+ frames_to_blur = frames_to_blur_forward + 1;
+ break;
case 3:
default:
- /////////////////////////////////////////
- // Center Blur
- frames_to_blur_forward = num_frames_forward;
- frames_to_blur_backward = num_frames_backward;
+ /////////////////////////////////////////
+ // Center Blur
+ frames_to_blur_forward = num_frames_forward;
+ frames_to_blur_backward = num_frames_backward;
- if (frames_to_blur_forward > frames_to_blur_backward)
- frames_to_blur_forward = frames_to_blur_backward;
+ if (frames_to_blur_forward > frames_to_blur_backward)
+ frames_to_blur_forward = frames_to_blur_backward;
- if (frames_to_blur_backward > frames_to_blur_forward)
- frames_to_blur_backward = frames_to_blur_forward;
+ if (frames_to_blur_backward > frames_to_blur_forward)
+ frames_to_blur_backward = frames_to_blur_forward;
- // When max_frames is even we have 1 more frame backward than forward
- if (frames_to_blur_forward > (max_frames - 1) / 2)
- frames_to_blur_forward = ((max_frames - 1) / 2);
+ // When max_frames is even we have 1 more frame backward than forward
+ if (frames_to_blur_forward > (max_frames - 1) / 2)
+ frames_to_blur_forward = ((max_frames - 1) / 2);
- if (frames_to_blur_backward > (max_frames / 2))
- frames_to_blur_backward = (max_frames / 2);
+ if (frames_to_blur_backward > (max_frames / 2))
+ frames_to_blur_backward = (max_frames / 2);
- frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
- break;
- }
+ frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
+ break;
+ }
- start_frame = distance + frames_to_blur_forward;
+ start_frame = distance + frames_to_blur_forward;
#ifdef DEBUGFWG
- // DEBUG FWG
- printf("max:%d FBCK:%d FFWD:%d ftb:%d ftbbck:%d ftbfwd:%d sei:%d lasei:%d start:%d"
- , max_frames
- , num_frames_backward
- , num_frames_forward
- , frames_to_blur
- , frames_to_blur_backward
- , frames_to_blur_forward
- , cpi->source_encode_index
- , cpi->last_alt_ref_sei
- , start_frame);
+ // DEBUG FWG
+ printf("max:%d FBCK:%d FFWD:%d ftb:%d ftbbck:%d ftbfwd:%d sei:%d lasei:%d start:%d"
+, max_frames
+, num_frames_backward
+, num_frames_forward
+, frames_to_blur
+, frames_to_blur_backward
+, frames_to_blur_forward
+, cpi->source_encode_index
+, cpi->last_alt_ref_sei
+, start_frame);
#endif
- // Setup frame pointers, NULL indicates frame not included in filter
- vpx_memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *));
- for (frame = 0; frame < frames_to_blur; frame++)
- {
- int which_buffer = start_frame - frame;
- struct lookahead_entry* buf = vp8_lookahead_peek(cpi->lookahead,
- which_buffer);
- cpi->frames[frames_to_blur-1-frame] = &buf->img;
- }
-
- vp8_temporal_filter_iterate_c (
- cpi,
- frames_to_blur,
- frames_to_blur_backward,
- strength );
+ // Setup frame pointers, NULL indicates frame not included in filter
+ vpx_memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *));
+ for (frame = 0; frame < frames_to_blur; frame++) {
+ int which_buffer = start_frame - frame;
+ struct lookahead_entry *buf = vp8_lookahead_peek(cpi->lookahead,
+ which_buffer);
+ cpi->frames[frames_to_blur - 1 - frame] = &buf->img;
+ }
+
+ vp8_temporal_filter_iterate_c(
+ cpi,
+ frames_to_blur,
+ frames_to_blur_backward,
+ strength);
}
#endif
diff --git a/vp8/encoder/temporal_filter.h b/vp8/encoder/temporal_filter.h
index 740037a85..e051c5c76 100644
--- a/vp8/encoder/temporal_filter.h
+++ b/vp8/encoder/temporal_filter.h
@@ -13,17 +13,17 @@
#define __INC_VP8_TEMPORAL_FILTER_H
#define prototype_apply(sym)\
- void (sym) \
- ( \
- unsigned char *frame1, \
- unsigned int stride, \
- unsigned char *frame2, \
- unsigned int block_size, \
- int strength, \
- int filter_weight, \
- unsigned int *accumulator, \
- unsigned short *count \
- )
+ void (sym) \
+ ( \
+ unsigned char *frame1, \
+ unsigned int stride, \
+ unsigned char *frame2, \
+ unsigned int block_size, \
+ int strength, \
+ int filter_weight, \
+ unsigned int *accumulator, \
+ unsigned short *count \
+ )
#if ARCH_X86 || ARCH_X86_64
#include "x86/temporal_filter_x86.h"
@@ -34,9 +34,8 @@
#endif
extern prototype_apply(vp8_temporal_filter_apply);
-typedef struct
-{
- prototype_apply(*apply);
+typedef struct {
+ prototype_apply(*apply);
} vp8_temporal_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index 4bbe99fd0..2d58669e0 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -27,21 +27,21 @@
INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
extern unsigned int tree_update_hist [BLOCK_TYPES]
- [COEF_BANDS]
- [PREV_COEF_CONTEXTS]
- [ENTROPY_NODES][2];
+[COEF_BANDS]
+[PREV_COEF_CONTEXTS]
+[ENTROPY_NODES][2];
extern unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8]
- [COEF_BANDS]
- [PREV_COEF_CONTEXTS]
- [ENTROPY_NODES] [2];
+[COEF_BANDS]
+[PREV_COEF_CONTEXTS]
+[ENTROPY_NODES] [2];
#endif
-void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
-void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t);
+void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t);
void vp8_fix_contexts(MACROBLOCKD *x);
-static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE*2];
+static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
const TOKENVALUE *vp8_dct_value_tokens_ptr;
-static int dct_value_cost[DCT_MAX_VALUE*2];
+static int dct_value_cost[DCT_MAX_VALUE * 2];
const int *vp8_dct_value_cost_ptr;
#ifdef ENC_DEBUG
@@ -50,877 +50,782 @@ extern int mb_col_debug;
extern int enc_debug;
#endif
-static void fill_value_tokens()
-{
+static void fill_value_tokens() {
- TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
- vp8_extra_bit_struct *const e = vp8_extra_bits;
+ TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
+ vp8_extra_bit_struct *const e = vp8_extra_bits;
- int i = -DCT_MAX_VALUE;
- int sign = 1;
+ int i = -DCT_MAX_VALUE;
+ int sign = 1;
- do
- {
- if (!i)
- sign = 0;
-
- {
- const int a = sign ? -i : i;
- int eb = sign;
+ do {
+ if (!i)
+ sign = 0;
- if (a > 4)
- {
- int j = 4;
+ {
+ const int a = sign ? -i : i;
+ int eb = sign;
- while (++j < 11 && e[j].base_val <= a) {}
+ if (a > 4) {
+ int j = 4;
- t[i].Token = --j;
- eb |= (a - e[j].base_val) << 1;
- }
- else
- t[i].Token = a;
+ while (++j < 11 && e[j].base_val <= a) {}
- t[i].Extra = eb;
- }
+ t[i].Token = --j;
+ eb |= (a - e[j].base_val) << 1;
+ } else
+ t[i].Token = a;
- // initialize the cost for extra bits for all possible coefficient value.
- {
- int cost = 0;
- vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token;
+ t[i].Extra = eb;
+ }
- if (p->base_val)
- {
- const int extra = t[i].Extra;
- const int Length = p->Len;
+ // initialize the cost for extra bits for all possible coefficient value.
+ {
+ int cost = 0;
+ vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token;
- if (Length)
- cost += vp8_treed_cost(p->tree, p->prob, extra >> 1, Length);
+ if (p->base_val) {
+ const int extra = t[i].Extra;
+ const int Length = p->Len;
- cost += vp8_cost_bit(vp8_prob_half, extra & 1); /* sign */
- dct_value_cost[i + DCT_MAX_VALUE] = cost;
- }
+ if (Length)
+ cost += vp8_treed_cost(p->tree, p->prob, extra >> 1, Length);
- }
+ cost += vp8_cost_bit(vp8_prob_half, extra & 1); /* sign */
+ dct_value_cost[i + DCT_MAX_VALUE] = cost;
+ }
}
- while (++i < DCT_MAX_VALUE);
- vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
- vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+ } while (++i < DCT_MAX_VALUE);
+
+ vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
static void tokenize2nd_order_b_8x8
(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- int c = 0; /* start at DC */
- const int eob = b->eob; /* one beyond last nonzero coeff */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- int x;
- const short *qcoeff_ptr = b->qcoeff;
-
- int seg_eob = 4;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
- }
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- assert(eob<=4);
-
- do
- {
- const int band = vp8_coef_bands[c];
- int v = 0;
-
- if (c < eob)
- {
- int rc = vp8_default_zig_zag1d[c];
- v = qcoeff_ptr[rc];
-
- assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- x = vp8_dct_value_tokens_ptr[v].Token;
- }
- else
- x = DCT_EOB_TOKEN;
-
- t->Token = x;
- //printf("Token : %d\n", x);
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
-
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ const FRAME_TYPE frametype,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ int c = 0; /* start at DC */
+ const int eob = b->eob; /* one beyond last nonzero coeff */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ int x;
+ const short *qcoeff_ptr = b->qcoeff;
+
+ int seg_eob = 4;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
+ }
+
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ assert(eob <= 4);
+
+ do {
+ const int band = vp8_coef_bands[c];
+ int v = 0;
+
+ if (c < eob) {
+ int rc = vp8_default_zig_zag1d[c];
+ v = qcoeff_ptr[rc];
+
+ assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ x = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ x = DCT_EOB_TOKEN;
+
+ t->Token = x;
+ // printf("Token : %d\n", x);
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+
+ t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
#ifdef ENC_DEBUG
- if (t->skip_eob_node && vp8_coef_encodings[x].Len==1)
- printf("Trouble 2 x=%d Len=%d skip=%d eob=%d c=%d band=%d type=%d: [%d %d %d]\n",
- x, vp8_coef_encodings[x].Len, t->skip_eob_node, eob, c, band, type,
- cpi->count, mb_row_debug, mb_col_debug);
+ if (t->skip_eob_node && vp8_coef_encodings[x].Len == 1)
+ printf("Trouble 2 x=%d Len=%d skip=%d eob=%d c=%d band=%d type=%d: [%d %d %d]\n",
+ x, vp8_coef_encodings[x].Len, t->skip_eob_node, eob, c, band, type,
+ cpi->count, mb_row_debug, mb_col_debug);
#endif
- ++cpi->coef_counts_8x8 [type] [band] [pt] [x];
- }
- while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c <seg_eob);
+ ++cpi->coef_counts_8x8 [type] [band] [pt] [x];
+ } while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
- *tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ *tp = t;
+ pt = (c != !type); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
static void tokenize2nd_order_b
(
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- int c; /* start at DC */
- TOKENEXTRA *t = *tp;/* store tokens starting here */
- const BLOCKD *b;
- const short *qcoeff_ptr;
- ENTROPY_CONTEXT * a;
- ENTROPY_CONTEXT * l;
- int band, rc, v, token;
-
- int seg_eob = 16;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
- }
-
- b = xd->block + 24;
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + 8;
- l = (ENTROPY_CONTEXT *)xd->left_context + 8;
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- for (c = 0; c < b->eob; c++)
- {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
-
- ++cpi->coef_counts [1] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ int c; /* start at DC */
+ TOKENEXTRA *t = *tp;/* store tokens starting here */
+ const BLOCKD *b;
+ const short *qcoeff_ptr;
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int band, rc, v, token;
+
+ int seg_eob = 16;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
+ }
+
+ b = xd->block + 24;
+ qcoeff_ptr = b->qcoeff;
+ a = (ENTROPY_CONTEXT *)xd->above_context + 8;
+ l = (ENTROPY_CONTEXT *)xd->left_context + 8;
+
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ for (c = 0; c < b->eob; c++) {
+ rc = vp8_default_zig_zag1d[c];
+ band = vp8_coef_bands[c];
+ v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
+
+ t->skip_eob_node = ((pt == 0) && (band > 0));
+
+ ++cpi->coef_counts [1] [band] [pt] [token];
+
+ pt = vp8_prev_token_class[token];
+ t++;
+ }
+
+ if (c < seg_eob) {
+ band = vp8_coef_bands[c];
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
- if (c < seg_eob)
- {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
+ t->skip_eob_node = ((pt == 0) && (band > 0));
- t->skip_eob_node = ((pt == 0) && (band > 0));
+ ++cpi->coef_counts [1] [band] [pt] [DCT_EOB_TOKEN];
- ++cpi->coef_counts [1] [band] [pt] [DCT_EOB_TOKEN];
+ t++;
+ }
- t++;
- }
-
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ *tp = t;
+ pt = (c != 0); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
static void tokenize1st_order_b_8x8
(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- int c = type ? 0 : 1; /* start at DC unless type 0 */
- const int eob = b->eob; /* one beyond last nonzero coeff */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- int x;
- const short *qcoeff_ptr = b->qcoeff;
-
- int seg_eob = 64;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
+ MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ const FRAME_TYPE frametype,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ int c = type ? 0 : 1; /* start at DC unless type 0 */
+ const int eob = b->eob; /* one beyond last nonzero coeff */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ int x;
+ const short *qcoeff_ptr = b->qcoeff;
+
+ int seg_eob = 64;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
+ }
+
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ do {
+ const int band = vp8_coef_bands_8x8[c];
+ int v;
+
+ x = DCT_EOB_TOKEN;
+
+ if (c < eob) {
+ int rc = vp8_default_zig_zag1d_8x8[c];
+ v = qcoeff_ptr[rc];
+
+ assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ x = vp8_dct_value_tokens_ptr[v].Token;
}
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- do
- {
- const int band = vp8_coef_bands_8x8[c];
- int v;
-
- x = DCT_EOB_TOKEN;
-
- if (c < eob)
- {
- int rc = vp8_default_zig_zag1d_8x8[c];
- v = qcoeff_ptr[rc];
-
- assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- x = vp8_dct_value_tokens_ptr[v].Token;
- }
-
- t->Token = x;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+ t->Token = x;
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
#ifdef ENC_DEBUG
- if (t->skip_eob_node && vp8_coef_encodings[x].Len==1)
- printf("Trouble 1 x=%d Len=%d skip=%d eob=%d c=%d band=%d type=%d: [%d %d %d]\n", x, vp8_coef_encodings[x].Len, t->skip_eob_node, eob, c, band, type, cpi->count, mb_row_debug, mb_col_debug);
+ if (t->skip_eob_node && vp8_coef_encodings[x].Len == 1)
+ printf("Trouble 1 x=%d Len=%d skip=%d eob=%d c=%d band=%d type=%d: [%d %d %d]\n", x, vp8_coef_encodings[x].Len, t->skip_eob_node, eob, c, band, type, cpi->count, mb_row_debug, mb_col_debug);
#endif
- ++cpi->coef_counts_8x8 [type] [band] [pt] [x];
- }
- while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
+ ++cpi->coef_counts_8x8 [type] [band] [pt] [x];
+ } while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
- *tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ *tp = t;
+ pt = (c != !type); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
static void tokenize1st_order_b
(
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- VP8_COMP *cpi
-)
-{
- unsigned int block;
- const BLOCKD *b;
- int pt; /* near block/prev token context index */
- int c;
- int token;
- TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
- ENTROPY_CONTEXT * a;
- ENTROPY_CONTEXT * l;
- int band, rc, v;
- int tmp1, tmp2;
-
- int seg_eob = 16;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
- {
- seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
- }
-
- b = xd->block;
- /* Luma */
- for (block = 0; block < 16; block++, b++)
- {
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- c = type ? 0 : 1;
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ VP8_COMP *cpi
+) {
+ unsigned int block;
+ const BLOCKD *b;
+ int pt; /* near block/prev token context index */
+ int c;
+ int token;
+ TOKENEXTRA *t = *tp;/* store tokens starting here */
+ const short *qcoeff_ptr;
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int band, rc, v;
+ int tmp1, tmp2;
+
+ int seg_eob = 16;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
+ }
+
+ b = xd->block;
+ /* Luma */
+ for (block = 0; block < 16; block++, b++) {
+ tmp1 = vp8_block2above[block];
+ tmp2 = vp8_block2left[block];
+ qcoeff_ptr = b->qcoeff;
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- for (; c < b->eob; c++)
- {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ c = type ? 0 : 1;
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ for (; c < b->eob; c++) {
+ rc = vp8_default_zig_zag1d[c];
+ band = vp8_coef_bands[c];
+ v = qcoeff_ptr[rc];
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
- ++cpi->coef_counts [type] [band] [pt] [token];
+ t->skip_eob_node = pt == 0 &&
+ ((band > 0 && type > 0) || (band > 1 && type == 0));
- pt = vp8_prev_token_class[token];
- t++;
- }
+ ++cpi->coef_counts [type] [band] [pt] [token];
- if (c < seg_eob)
- {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ pt = vp8_prev_token_class[token];
+ t++;
+ }
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
+ if (c < seg_eob) {
+ band = vp8_coef_bands[c];
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
- ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
+ t->skip_eob_node = pt == 0 &&
+ ((band > 0 && type > 0) || (band > 1 && type == 0));
- t++;
- }
- *tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
+ t++;
}
- /* Chroma */
- for (block = 16; block < 24; block++, b++)
- {
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
+ *tp = t;
+ pt = (c != !type); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ }
+ /* Chroma */
+ for (block = 16; block < 24; block++, b++) {
+ tmp1 = vp8_block2above[block];
+ tmp2 = vp8_block2left[block];
+ qcoeff_ptr = b->qcoeff;
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- for (c = 0; c < b->eob; c++)
- {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ for (c = 0; c < b->eob; c++) {
+ rc = vp8_default_zig_zag1d[c];
+ band = vp8_coef_bands[c];
+ v = qcoeff_ptr[rc];
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
- t->skip_eob_node = ((pt == 0) && (band > 0));
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
- ++cpi->coef_counts [2] [band] [pt] [token];
+ t->skip_eob_node = ((pt == 0) && (band > 0));
- pt = vp8_prev_token_class[token];
- t++;
- }
+ ++cpi->coef_counts [2] [band] [pt] [token];
+
+ pt = vp8_prev_token_class[token];
+ t++;
+ }
- if (c < seg_eob)
- {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
+ if (c < seg_eob) {
+ band = vp8_coef_bands[c];
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
- t->skip_eob_node = ((pt == 0) && (band > 0));
+ t->skip_eob_node = ((pt == 0) && (band > 0));
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
- t++;
- }
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ t++;
}
+ *tp = t;
+ pt = (c != 0); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+ }
}
-int mby_is_skippable(MACROBLOCKD *x, int has_y2_block)
-{
- int skip = 1;
- int i = 0;
+int mby_is_skippable(MACROBLOCKD *x, int has_y2_block) {
+ int skip = 1;
+ int i = 0;
- if (has_y2_block)
- {
- for (i = 0; i < 16; i++)
- skip &= (x->block[i].eob < 2);
- skip &= (!x->block[24].eob);
- }
- else
- {
- for (i = 0; i < 16; i++)
- skip &= (!x->block[i].eob);
- }
- return skip;
+ if (has_y2_block) {
+ for (i = 0; i < 16; i++)
+ skip &= (x->block[i].eob < 2);
+ skip &= (!x->block[24].eob);
+ } else {
+ for (i = 0; i < 16; i++)
+ skip &= (!x->block[i].eob);
+ }
+ return skip;
}
-int mbuv_is_skippable(MACROBLOCKD *x)
-{
- int skip = 1;
- int i;
+int mbuv_is_skippable(MACROBLOCKD *x) {
+ int skip = 1;
+ int i;
- for (i = 16; i < 24; i++)
- skip &= (!x->block[i].eob);
- return skip;
+ for (i = 16; i < 24; i++)
+ skip &= (!x->block[i].eob);
+ return skip;
}
-int mb_is_skippable(MACROBLOCKD *x, int has_y2_block)
-{
- return (mby_is_skippable(x, has_y2_block) &
- mbuv_is_skippable(x));
+int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) {
+ return (mby_is_skippable(x, has_y2_block) &
+ mbuv_is_skippable(x));
}
-int mby_is_skippable_8x8(MACROBLOCKD *x)
-{
- int skip = 1;
- int i = 0;
+int mby_is_skippable_8x8(MACROBLOCKD *x) {
+ int skip = 1;
+ int i = 0;
- for (i = 0; i < 16; i+=4)
- skip &= (x->block[i].eob < 2);
- skip &= (!x->block[24].eob);
- return skip;
+ for (i = 0; i < 16; i += 4)
+ skip &= (x->block[i].eob < 2);
+ skip &= (!x->block[24].eob);
+ return skip;
}
-int mbuv_is_skippable_8x8(MACROBLOCKD *x)
-{
- return (!x->block[16].eob) & (!x->block[20].eob);
+int mbuv_is_skippable_8x8(MACROBLOCKD *x) {
+ return (!x->block[16].eob) & (!x->block[20].eob);
}
-int mb_is_skippable_8x8(MACROBLOCKD *x)
-{
- return (mby_is_skippable_8x8(x) & mbuv_is_skippable_8x8(x));
+int mb_is_skippable_8x8(MACROBLOCKD *x) {
+ return (mby_is_skippable_8x8(x) & mbuv_is_skippable_8x8(x));
}
-void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
-{
- int plane_type;
- int has_y2_block;
- int b;
- int tx_type = x->mode_info_context->mbmi.txfm_size;
+void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) {
+ int plane_type;
+ int has_y2_block;
+ int b;
+ int tx_type = x->mode_info_context->mbmi.txfm_size;
#if CONFIG_NEWENTROPY
- int mb_skip_context = get_pred_context(&cpi->common, x, PRED_MBSKIP);
+ int mb_skip_context = get_pred_context(&cpi->common, x, PRED_MBSKIP);
#endif
- // If the MB is going to be skipped because of a segment level flag
- // exclude this from the skip count stats used to calculate the
- // transmitted skip probability;
- int skip_inc;
- int segment_id = x->mode_info_context->mbmi.segment_id;
+ // If the MB is going to be skipped because of a segment level flag
+ // exclude this from the skip count stats used to calculate the
+ // transmitted skip probability;
+ int skip_inc;
+ int segment_id = x->mode_info_context->mbmi.segment_id;
- if ( !segfeature_active( x, segment_id, SEG_LVL_EOB ) ||
- ( get_segdata( x, segment_id, SEG_LVL_EOB ) != 0) )
- {
- skip_inc = 1;
- }
- else
- skip_inc = 0;
+ if (!segfeature_active(x, segment_id, SEG_LVL_EOB) ||
+ (get_segdata(x, segment_id, SEG_LVL_EOB) != 0)) {
+ skip_inc = 1;
+ } else
+ skip_inc = 0;
- has_y2_block = (x->mode_info_context->mbmi.mode != B_PRED
- && x->mode_info_context->mbmi.mode != I8X8_PRED
- && x->mode_info_context->mbmi.mode != SPLITMV);
+ has_y2_block = (x->mode_info_context->mbmi.mode != B_PRED
+ && x->mode_info_context->mbmi.mode != I8X8_PRED
+ && x->mode_info_context->mbmi.mode != SPLITMV);
- x->mode_info_context->mbmi.mb_skip_coeff =
- (( tx_type == TX_8X8 ) ?
- mb_is_skippable_8x8(x) :
- mb_is_skippable(x, has_y2_block));
+ x->mode_info_context->mbmi.mb_skip_coeff =
+ ((tx_type == TX_8X8) ?
+ mb_is_skippable_8x8(x) :
+ mb_is_skippable(x, has_y2_block));
- if (x->mode_info_context->mbmi.mb_skip_coeff)
- {
+ if (x->mode_info_context->mbmi.mb_skip_coeff) {
#if CONFIG_NEWENTROPY
- cpi->skip_true_count[mb_skip_context] += skip_inc;
+ cpi->skip_true_count[mb_skip_context] += skip_inc;
#else
- cpi->skip_true_count += skip_inc;
+ cpi->skip_true_count += skip_inc;
#endif
- if (!cpi->common.mb_no_coeff_skip)
- {
- if ( tx_type == TX_8X8 )
- vp8_stuff_mb_8x8(cpi, x, t) ;
- else
- vp8_stuff_mb(cpi, x, t) ;
- }
- else
- {
- vp8_fix_contexts(x);
- }
-
- return;
+ if (!cpi->common.mb_no_coeff_skip) {
+ if (tx_type == TX_8X8)
+ vp8_stuff_mb_8x8(cpi, x, t);
+ else
+ vp8_stuff_mb(cpi, x, t);
+ } else {
+ vp8_fix_contexts(x);
}
+ return;
+ }
+
#if CONFIG_NEWENTROPY
- cpi->skip_false_count[mb_skip_context] += skip_inc;
+ cpi->skip_false_count[mb_skip_context] += skip_inc;
#else
- cpi->skip_false_count += skip_inc;
+ cpi->skip_false_count += skip_inc;
#endif
- plane_type = 3;
- if(has_y2_block)
- {
- if ( tx_type == TX_8X8 )
- {
- ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
- ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
- tokenize2nd_order_b_8x8(x,
- x->block + 24, t, 1, x->frame_type,
- A + vp8_block2above_8x8[24],
- L + vp8_block2left_8x8[24], cpi);
- }
- else
- tokenize2nd_order_b(x, t, cpi);
-
- plane_type = 0;
+ plane_type = 3;
+ if (has_y2_block) {
+ if (tx_type == TX_8X8) {
+ ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
+ ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
+ tokenize2nd_order_b_8x8(x,
+ x->block + 24, t, 1, x->frame_type,
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi);
+ } else
+ tokenize2nd_order_b(x, t, cpi);
- }
+ plane_type = 0;
- if ( tx_type == TX_8X8 )
- {
- ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
- ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
- for (b = 0; b < 16; b+=4)
- {
- tokenize1st_order_b_8x8(x,
- x->block + b, t, plane_type, x->frame_type,
- A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b] );
- }
- for (b = 16; b < 24; b+=4)
- {
- tokenize1st_order_b_8x8(x,
- x->block + b, t, 2, x->frame_type,
- A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi);
- *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
- }
+ }
+
+ if (tx_type == TX_8X8) {
+ ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
+ ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
+ for (b = 0; b < 16; b += 4) {
+ tokenize1st_order_b_8x8(x,
+ x->block + b, t, plane_type, x->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ }
+ for (b = 16; b < 24; b += 4) {
+ tokenize1st_order_b_8x8(x,
+ x->block + b, t, 2, x->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
- else
+ } else
- tokenize1st_order_b(x, t, plane_type, cpi);
+ tokenize1st_order_b(x, t, plane_type, cpi);
}
#ifdef ENTROPY_STATS
-void init_context_counters(void)
-{
- FILE *f = fopen("context.bin", "rb");
- if(!f)
- {
- vpx_memset(context_counters, 0, sizeof(context_counters));
- vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
- }
- else
- {
- fread(context_counters, sizeof(context_counters), 1, f);
- fread(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
- fclose(f);
- }
-
- f = fopen("treeupdate.bin", "rb");
- if(!f)
- {
- vpx_memset(tree_update_hist, 0, sizeof(tree_update_hist));
- vpx_memset(tree_update_hist_8x8, 0, sizeof(tree_update_hist_8x8));
- }
- else
- {
- fread(tree_update_hist, sizeof(tree_update_hist), 1, f);
- fread(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
- fclose(f);
- }
+void init_context_counters(void) {
+ FILE *f = fopen("context.bin", "rb");
+ if (!f) {
+ vpx_memset(context_counters, 0, sizeof(context_counters));
+ vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
+ } else {
+ fread(context_counters, sizeof(context_counters), 1, f);
+ fread(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
+ fclose(f);
+ }
+
+ f = fopen("treeupdate.bin", "rb");
+ if (!f) {
+ vpx_memset(tree_update_hist, 0, sizeof(tree_update_hist));
+ vpx_memset(tree_update_hist_8x8, 0, sizeof(tree_update_hist_8x8));
+ } else {
+ fread(tree_update_hist, sizeof(tree_update_hist), 1, f);
+ fread(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
+ fclose(f);
+ }
}
-void print_context_counters()
-{
+void print_context_counters() {
- int type, band, pt, t;
- FILE *f = fopen("context.c", "w");
+ int type, band, pt, t;
+ FILE *f = fopen("context.c", "w");
- fprintf(f, "#include \"entropy.h\"\n");
- fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n");
- fprintf(f, "static const unsigned int\n"
- "vp8_default_coef_counts[BLOCK_TYPES]\n"
- " [COEF_BANDS]\n"
- " [PREV_COEF_CONTEXTS]\n"
- " [MAX_ENTROPY_TOKENS]={\n");
+ fprintf(f, "#include \"entropy.h\"\n");
+ fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n");
+ fprintf(f, "static const unsigned int\n"
+ "vp8_default_coef_counts[BLOCK_TYPES]\n"
+ " [COEF_BANDS]\n"
+ " [PREV_COEF_CONTEXTS]\n"
+ " [MAX_ENTROPY_TOKENS]={\n");
# define Comma( X) (X? ",":"")
- type = 0;
- do
- {
- fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
- band = 0;
- do
- {
- fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
- pt = 0;
- do
- {
- fprintf(f, "%s\n {", Comma(pt));
-
- t = 0;
- do
- {
- const INT64 x = context_counters [type] [band] [pt] [t];
- const int y = (int) x;
- assert(x == (INT64) y); /* no overflow handling yet */
- fprintf(f, "%s %d", Comma(t), y);
- }
- while (++t < MAX_ENTROPY_TOKENS);
- fprintf(f, "}");
- }
- while (++pt < PREV_COEF_CONTEXTS);
- fprintf(f, "\n }");
- }
- while (++band < COEF_BANDS);
- fprintf(f, "\n }");
- }
- while (++type < BLOCK_TYPES);
- fprintf(f, "\n};\n");
+ type = 0;
+ do {
+ fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
+ band = 0;
+ do {
+ fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
+ pt = 0;
+ do {
+ fprintf(f, "%s\n {", Comma(pt));
+
+ t = 0;
+ do {
+ const INT64 x = context_counters [type] [band] [pt] [t];
+ const int y = (int) x;
+ assert(x == (INT64) y); /* no overflow handling yet */
+ fprintf(f, "%s %d", Comma(t), y);
+ } while (++t < MAX_ENTROPY_TOKENS);
+ fprintf(f, "}");
+ } while (++pt < PREV_COEF_CONTEXTS);
+ fprintf(f, "\n }");
+ } while (++band < COEF_BANDS);
+ fprintf(f, "\n }");
+ } while (++type < BLOCK_TYPES);
+ fprintf(f, "\n};\n");
- fprintf(f, "static const unsigned int\nvp8_default_coef_counts_8x8"
- "[BLOCK_TYPES_8X8] [COEF_BANDS]"
- "[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
+ fprintf(f, "static const unsigned int\nvp8_default_coef_counts_8x8"
+ "[BLOCK_TYPES_8X8] [COEF_BANDS]"
+ "[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
+
+ type = 0;
+ do {
+ fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
+ band = 0;
+ do {
+ fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
+ pt = 0;
+ do {
+ fprintf(f, "%s\n {", Comma(pt));
+ t = 0;
+ do {
+ const INT64 x = context_counters_8x8 [type] [band] [pt] [t];
+ const int y = (int) x;
- type = 0;
- do
- {
- fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
- band = 0;
- do
- {
- fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
- pt = 0;
- do
- {
- fprintf(f, "%s\n {", Comma(pt));
- t = 0;
- do
- {
- const INT64 x = context_counters_8x8 [type] [band] [pt] [t];
- const int y = (int) x;
-
- assert(x == (INT64) y); /* no overflow handling yet */
- fprintf(f, "%s %d", Comma(t), y);
-
- }
- while (++t < MAX_ENTROPY_TOKENS);
-
- fprintf(f, "}");
- }
- while (++pt < PREV_COEF_CONTEXTS);
-
- fprintf(f, "\n }");
-
- }
- while (++band < COEF_BANDS);
-
- fprintf(f, "\n }");
- }
- while (++type < BLOCK_TYPES_8X8);
+ assert(x == (INT64) y); /* no overflow handling yet */
+ fprintf(f, "%s %d", Comma(t), y);
- fprintf(f, "\n};\n");
+ } while (++t < MAX_ENTROPY_TOKENS);
- fprintf(f, "static const vp8_prob\n"
- "vp8_default_coef_probs[BLOCK_TYPES] [COEF_BANDS] \n"
- "[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {");
- type = 0;
+ fprintf(f, "}");
+ } while (++pt < PREV_COEF_CONTEXTS);
- do
- {
- fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
+ fprintf(f, "\n }");
- band = 0;
+ } while (++band < COEF_BANDS);
- do
- {
- fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
+ fprintf(f, "\n }");
+ } while (++type < BLOCK_TYPES_8X8);
- pt = 0;
+ fprintf(f, "\n};\n");
- do
- {
+ fprintf(f, "static const vp8_prob\n"
+ "vp8_default_coef_probs[BLOCK_TYPES] [COEF_BANDS] \n"
+ "[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {");
+ type = 0;
- unsigned int branch_ct [ENTROPY_NODES] [2];
- unsigned int coef_counts[MAX_ENTROPY_TOKENS];
- vp8_prob coef_probs[ENTROPY_NODES];
- for (t=0; t<MAX_ENTROPY_TOKENS; ++t)
- coef_counts[t]=context_counters [type] [band] [pt] [t];
- vp8_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
- coef_probs, branch_ct, coef_counts, 256, 1);
- fprintf(f, "%s\n {", Comma(pt));
+ do {
+ fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
- t = 0;
+ band = 0;
- do
- {
- fprintf(f, "%s %d", Comma(t), coef_probs[t]);
+ do {
+ fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
- }
- while (++t < ENTROPY_NODES);
+ pt = 0;
- fprintf(f, "}");
- }
- while (++pt < PREV_COEF_CONTEXTS);
+ do {
- fprintf(f, "\n }");
+ unsigned int branch_ct [ENTROPY_NODES] [2];
+ unsigned int coef_counts[MAX_ENTROPY_TOKENS];
+ vp8_prob coef_probs[ENTROPY_NODES];
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ coef_counts[t] = context_counters [type] [band] [pt] [t];
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ coef_probs, branch_ct, coef_counts, 256, 1);
+ fprintf(f, "%s\n {", Comma(pt));
- }
- while (++band < COEF_BANDS);
+ t = 0;
- fprintf(f, "\n }");
- }
- while (++type < BLOCK_TYPES);
- fprintf(f, "\n};\n");
+ do {
+ fprintf(f, "%s %d", Comma(t), coef_probs[t]);
- fprintf(f, "static const vp8_prob\n"
- "vp8_default_coef_probs_8x8[BLOCK_TYPES_8X8] [COEF_BANDS]\n"
- "[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {");
- type = 0;
+ } while (++t < ENTROPY_NODES);
- do
- {
- fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
+ fprintf(f, "}");
+ } while (++pt < PREV_COEF_CONTEXTS);
- band = 0;
+ fprintf(f, "\n }");
- do
- {
- fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
+ } while (++band < COEF_BANDS);
- pt = 0;
+ fprintf(f, "\n }");
+ } while (++type < BLOCK_TYPES);
+ fprintf(f, "\n};\n");
- do
- {
+ fprintf(f, "static const vp8_prob\n"
+ "vp8_default_coef_probs_8x8[BLOCK_TYPES_8X8] [COEF_BANDS]\n"
+ "[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {");
+ type = 0;
- unsigned int branch_ct [ENTROPY_NODES] [2];
- unsigned int coef_counts[MAX_ENTROPY_TOKENS];
- vp8_prob coef_probs[ENTROPY_NODES];
- for (t=0; t<MAX_ENTROPY_TOKENS; ++t)
- coef_counts[t]=context_counters_8x8[type] [band] [pt] [t];
- vp8_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
- coef_probs, branch_ct, coef_counts, 256, 1);
+ do {
+ fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
- fprintf(f, "%s\n {", Comma(pt));
- t = 0;
+ band = 0;
- do
- {
- fprintf(f, "%s %d", Comma(t), coef_probs[t]);
+ do {
+ fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
- }
- while (++t < ENTROPY_NODES);
+ pt = 0;
- fprintf(f, "}");
- }
- while (++pt < PREV_COEF_CONTEXTS);
+ do {
- fprintf(f, "\n }");
+ unsigned int branch_ct [ENTROPY_NODES] [2];
+ unsigned int coef_counts[MAX_ENTROPY_TOKENS];
+ vp8_prob coef_probs[ENTROPY_NODES];
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ coef_counts[t] = context_counters_8x8[type] [band] [pt] [t];
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ coef_probs, branch_ct, coef_counts, 256, 1);
- }
- while (++band < COEF_BANDS);
+ fprintf(f, "%s\n {", Comma(pt));
+ t = 0;
- fprintf(f, "\n }");
- }
- while (++type < BLOCK_TYPES_8X8);
- fprintf(f, "\n};\n");
+ do {
+ fprintf(f, "%s %d", Comma(t), coef_probs[t]);
- fclose(f);
+ } while (++t < ENTROPY_NODES);
- f = fopen("context.bin", "wb");
- fwrite(context_counters, sizeof(context_counters), 1, f);
- fwrite(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
- fclose(f);
+ fprintf(f, "}");
+ } while (++pt < PREV_COEF_CONTEXTS);
+
+ fprintf(f, "\n }");
+
+ } while (++band < COEF_BANDS);
+
+ fprintf(f, "\n }");
+ } while (++type < BLOCK_TYPES_8X8);
+ fprintf(f, "\n};\n");
+
+ fclose(f);
+
+ f = fopen("context.bin", "wb");
+ fwrite(context_counters, sizeof(context_counters), 1, f);
+ fwrite(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
+ fclose(f);
}
#endif
-void vp8_tokenize_initialize()
-{
- fill_value_tokens();
+void vp8_tokenize_initialize() {
+ fill_value_tokens();
}
static __inline void stuff2nd_order_b_8x8
(
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
- (void) b;
-
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [1] [0] [pt];
- //t->section = 11;
- t->skip_eob_node = 0;
- ++t;
-
- *tp = t;
- ++cpi->coef_counts_8x8 [1] [0] [pt] [DCT_EOB_TOKEN];
- pt = 0;
- *a = *l = pt;
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ const FRAME_TYPE frametype,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ (void) frametype;
+ (void) type;
+ (void) b;
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [1] [0] [pt];
+ // t->section = 11;
+ t->skip_eob_node = 0;
+ ++t;
+
+ *tp = t;
+ ++cpi->coef_counts_8x8 [1] [0] [pt] [DCT_EOB_TOKEN];
+ pt = 0;
+ *a = *l = pt;
}
static __inline void stuff1st_order_b_8x8
(
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
- (void) b;
-
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [0] [1] [pt];
- //t->section = 8;
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- ++cpi->coef_counts_8x8 [0] [1] [pt] [DCT_EOB_TOKEN];
- pt = 0; /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ const FRAME_TYPE frametype,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ (void) frametype;
+ (void) type;
+ (void) b;
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [0] [1] [pt];
+ // t->section = 8;
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ ++cpi->coef_counts_8x8 [0] [1] [pt] [DCT_EOB_TOKEN];
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
@@ -928,173 +833,161 @@ static __inline void stuff1st_order_b_8x8
static __inline
void stuff1st_order_buv_8x8
(
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
- (void) b;
-
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [2] [0] [pt];
- //t->section = 13;
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- ++cpi->coef_counts_8x8[2] [0] [pt] [DCT_EOB_TOKEN];
- pt = 0; /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
+ const FRAME_TYPE frametype,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ (void) frametype;
+ (void) type;
+ (void) b;
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [2] [0] [pt];
+ // t->section = 13;
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ ++cpi->coef_counts_8x8[2] [0] [pt] [DCT_EOB_TOKEN];
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
-void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
-{
- ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
- ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
- int plane_type;
- int b;
-
- stuff2nd_order_b_8x8(x->block + 24, t, 1, x->frame_type,
- A + vp8_block2above_8x8[24],
- L + vp8_block2left_8x8[24], cpi);
- plane_type = 0;
-
- for (b = 0; b < 16; b+=4)
- {
- stuff1st_order_b_8x8(x->block + b, t, plane_type, x->frame_type,
- A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b] );
- }
-
- for (b = 16; b < 24; b+=4)
- {
- stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi);
- *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
- }
+void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) {
+ ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
+ ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
+ int plane_type;
+ int b;
+
+ stuff2nd_order_b_8x8(x->block + 24, t, 1, x->frame_type,
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi);
+ plane_type = 0;
+
+ for (b = 0; b < 16; b += 4) {
+ stuff1st_order_b_8x8(x->block + b, t, plane_type, x->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ }
+
+ for (b = 16; b < 24; b += 4) {
+ stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ }
}
static __inline void stuff2nd_order_b
(
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt];
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- ++cpi->coef_counts [1] [0] [pt] [DCT_EOB_TOKEN];
-
- pt = 0;
- *a = *l = pt;
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt];
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ ++cpi->coef_counts [1] [0] [pt] [DCT_EOB_TOKEN];
+
+ pt = 0;
+ *a = *l = pt;
}
static __inline void stuff1st_order_b
(
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [0] [1] [pt];
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- ++cpi->coef_counts [0] [1] [pt] [DCT_EOB_TOKEN];
- pt = 0; /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs [0] [1] [pt];
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ ++cpi->coef_counts [0] [1] [pt] [DCT_EOB_TOKEN];
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
static __inline
void stuff1st_order_buv
(
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi
-)
-{
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt];
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- ++cpi->coef_counts[2] [0] [pt] [DCT_EOB_TOKEN];
- pt = 0; /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi
+) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+
+ t->Token = DCT_EOB_TOKEN;
+ t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt];
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ ++cpi->coef_counts[2] [0] [pt] [DCT_EOB_TOKEN];
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
-void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
-{
- ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
- ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
- int plane_type;
- int b;
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) {
+ ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
+ ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
+ int plane_type;
+ int b;
- stuff2nd_order_b(t,
- A + vp8_block2above[24], L + vp8_block2left[24], cpi);
- plane_type = 0;
+ stuff2nd_order_b(t,
+ A + vp8_block2above[24], L + vp8_block2left[24], cpi);
+ plane_type = 0;
- for (b = 0; b < 16; b++)
- stuff1st_order_b(t,
- A + vp8_block2above[b],
- L + vp8_block2left[b], cpi);
+ for (b = 0; b < 16; b++)
+ stuff1st_order_b(t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b], cpi);
- for (b = 16; b < 24; b++)
- stuff1st_order_buv(t,
- A + vp8_block2above[b],
- L + vp8_block2left[b], cpi);
+ for (b = 16; b < 24; b++)
+ stuff1st_order_buv(t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b], cpi);
}
-void vp8_fix_contexts(MACROBLOCKD *x)
-{
- /* Clear entropy contexts for Y2 blocks */
- if (x->mode_info_context->mbmi.mode != B_PRED
- && x->mode_info_context->mbmi.mode != I8X8_PRED
- && x->mode_info_context->mbmi.mode != SPLITMV)
- {
- vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
- }
- else
- {
- vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
- vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
- }
+void vp8_fix_contexts(MACROBLOCKD *x) {
+ /* Clear entropy contexts for Y2 blocks */
+ if (x->mode_info_context->mbmi.mode != B_PRED
+ && x->mode_info_context->mbmi.mode != I8X8_PRED
+ && x->mode_info_context->mbmi.mode != SPLITMV) {
+ vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+ } else {
+ vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
+ vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
+ }
}
diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h
index bc3a05bd5..4ee676e7f 100644
--- a/vp8/encoder/tokenize.h
+++ b/vp8/encoder/tokenize.h
@@ -17,25 +17,23 @@
void vp8_tokenize_initialize();
-typedef struct
-{
- short Token;
- short Extra;
+typedef struct {
+ short Token;
+ short Extra;
} TOKENVALUE;
-typedef struct
-{
- const vp8_prob *context_tree;
- short Extra;
- unsigned char Token;
- unsigned char skip_eob_node;
+typedef struct {
+ const vp8_prob *context_tree;
+ short Extra;
+ unsigned char Token;
+ unsigned char skip_eob_node;
} TOKENEXTRA;
int rd_cost_mby(MACROBLOCKD *);
-extern int mby_is_skippable(MACROBLOCKD *x,int has_y2_block);
+extern int mby_is_skippable(MACROBLOCKD *x, int has_y2_block);
extern int mbuv_is_skippable(MACROBLOCKD *x);
-extern int mb_is_skippable(MACROBLOCKD *x,int has_y2_block);
+extern int mb_is_skippable(MACROBLOCKD *x, int has_y2_block);
extern int mby_is_skippable_8x8(MACROBLOCKD *x);
extern int mbuv_is_skippable_8x8(MACROBLOCKD *x);
extern int mb_is_skippable_8x8(MACROBLOCKD *x);
diff --git a/vp8/encoder/treewriter.c b/vp8/encoder/treewriter.c
index 6d46d369f..2dc7d8e62 100644
--- a/vp8/encoder/treewriter.c
+++ b/vp8/encoder/treewriter.c
@@ -12,33 +12,28 @@
#include "treewriter.h"
static void cost(
- int *const C,
- vp8_tree T,
- const vp8_prob *const P,
- int i,
- int c
-)
-{
- const vp8_prob p = P [i>>1];
+ int *const C,
+ vp8_tree T,
+ const vp8_prob *const P,
+ int i,
+ int c
+) {
+ const vp8_prob p = P [i >> 1];
- do
- {
- const vp8_tree_index j = T[i];
- const int d = c + vp8_cost_bit(p, i & 1);
+ do {
+ const vp8_tree_index j = T[i];
+ const int d = c + vp8_cost_bit(p, i & 1);
- if (j <= 0)
- C[-j] = d;
- else
- cost(C, T, P, j, d);
- }
- while (++i & 1);
+ if (j <= 0)
+ C[-j] = d;
+ else
+ cost(C, T, P, j, d);
+ } while (++i & 1);
}
-void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t)
-{
- cost(c, t, p, 0, 0);
+void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) {
+ cost(c, t, p, 0, 0);
}
-void vp8_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t)
-{
- cost(c, t, p, 2, 0);
+void vp8_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t) {
+ cost(c, t, p, 2, 0);
}
diff --git a/vp8/encoder/treewriter.h b/vp8/encoder/treewriter.h
index 5fd6138d3..7292072fe 100644
--- a/vp8/encoder/treewriter.h
+++ b/vp8/encoder/treewriter.h
@@ -42,20 +42,18 @@ typedef BOOL_CODER vp8_writer;
/* Both of these return bits, not scaled bits. */
-static __inline unsigned int vp8_cost_branch(const unsigned int ct[2], vp8_prob p)
-{
- /* Imitate existing calculation */
+static __inline unsigned int vp8_cost_branch(const unsigned int ct[2], vp8_prob p) {
+ /* Imitate existing calculation */
- return ((ct[0] * vp8_cost_zero(p))
- + (ct[1] * vp8_cost_one(p))) >> 8;
+ return ((ct[0] * vp8_cost_zero(p))
+ + (ct[1] * vp8_cost_one(p))) >> 8;
}
-static __inline unsigned int vp8_cost_branch256(const unsigned int ct[2], vp8_prob p)
-{
- /* Imitate existing calculation */
+static __inline unsigned int vp8_cost_branch256(const unsigned int ct[2], vp8_prob p) {
+ /* Imitate existing calculation */
- return ((ct[0] * vp8_cost_zero(p))
- + (ct[1] * vp8_cost_one(p)));
+ return ((ct[0] * vp8_cost_zero(p))
+ + (ct[1] * vp8_cost_one(p)));
}
/* Small functions to write explicit values and tokens, as well as
@@ -63,68 +61,60 @@ static __inline unsigned int vp8_cost_branch256(const unsigned int ct[2], vp8_pr
static __inline void vp8_treed_write
(
- vp8_writer *const w,
- vp8_tree t,
- const vp8_prob *const p,
- int v,
- int n /* number of bits in v, assumed nonzero */
-)
-{
- vp8_tree_index i = 0;
-
- do
- {
- const int b = (v >> --n) & 1;
- vp8_write(w, b, p[i>>1]);
- i = t[i+b];
- }
- while (n);
+ vp8_writer *const w,
+ vp8_tree t,
+ const vp8_prob *const p,
+ int v,
+ int n /* number of bits in v, assumed nonzero */
+) {
+ vp8_tree_index i = 0;
+
+ do {
+ const int b = (v >> --n) & 1;
+ vp8_write(w, b, p[i >> 1]);
+ i = t[i + b];
+ } while (n);
}
static __inline void vp8_write_token
(
- vp8_writer *const w,
- vp8_tree t,
- const vp8_prob *const p,
- vp8_token *const x
-)
-{
- vp8_treed_write(w, t, p, x->value, x->Len);
+ vp8_writer *const w,
+ vp8_tree t,
+ const vp8_prob *const p,
+ vp8_token *const x
+) {
+ vp8_treed_write(w, t, p, x->value, x->Len);
}
static __inline int vp8_treed_cost(
- vp8_tree t,
- const vp8_prob *const p,
- int v,
- int n /* number of bits in v, assumed nonzero */
-)
-{
- int c = 0;
- vp8_tree_index i = 0;
-
- do
- {
- const int b = (v >> --n) & 1;
- c += vp8_cost_bit(p[i>>1], b);
- i = t[i+b];
- }
- while (n);
-
- return c;
+ vp8_tree t,
+ const vp8_prob *const p,
+ int v,
+ int n /* number of bits in v, assumed nonzero */
+) {
+ int c = 0;
+ vp8_tree_index i = 0;
+
+ do {
+ const int b = (v >> --n) & 1;
+ c += vp8_cost_bit(p[i >> 1], b);
+ i = t[i + b];
+ } while (n);
+
+ return c;
}
static __inline int vp8_cost_token
(
- vp8_tree t,
- const vp8_prob *const p,
- vp8_token *const x
-)
-{
- return vp8_treed_cost(t, p, x->value, x->Len);
+ vp8_tree t,
+ const vp8_prob *const p,
+ vp8_token *const x
+) {
+ return vp8_treed_cost(t, p, x->value, x->Len);
}
/* Fill array of costs for all possible token values. */
void vp8_cost_tokens(
- int *Costs, const vp8_prob *, vp8_tree
+ int *Costs, const vp8_prob *, vp8_tree
);
void vp8_cost_tokens_skip(int *c, const vp8_prob *p, vp8_tree t);
diff --git a/vp8/encoder/variance.h b/vp8/encoder/variance.h
index fde8ade09..0bfd93c2c 100644
--- a/vp8/encoder/variance.h
+++ b/vp8/encoder/variance.h
@@ -13,102 +13,102 @@
#define VARIANCE_H
#define prototype_sad(sym)\
- unsigned int (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- int max_sad\
- )
+ unsigned int (sym)\
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ const unsigned char *ref_ptr, \
+ int ref_stride, \
+ int max_sad\
+ )
#define prototype_sad_multi_same_address(sym)\
- void (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array\
- )
+ void (sym)\
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ const unsigned char *ref_ptr, \
+ int ref_stride, \
+ unsigned int *sad_array\
+ )
#define prototype_sad_multi_same_address_1(sym)\
- void (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned short *sad_array\
- )
+ void (sym)\
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ const unsigned char *ref_ptr, \
+ int ref_stride, \
+ unsigned short *sad_array\
+ )
#define prototype_sad_multi_dif_address(sym)\
- void (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- unsigned char *ref_ptr[4], \
- int ref_stride, \
- unsigned int *sad_array\
- )
+ void (sym)\
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ unsigned char *ref_ptr[4], \
+ int ref_stride, \
+ unsigned int *sad_array\
+ )
#define prototype_variance(sym) \
- unsigned int (sym) \
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned int *sse\
- )
+ unsigned int (sym) \
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ const unsigned char *ref_ptr, \
+ int ref_stride, \
+ unsigned int *sse\
+ )
#define prototype_variance2(sym) \
- unsigned int (sym) \
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned int *sse,\
- int *sum\
- )
+ unsigned int (sym) \
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ const unsigned char *ref_ptr, \
+ int ref_stride, \
+ unsigned int *sse,\
+ int *sum\
+ )
#define prototype_subpixvariance(sym) \
- unsigned int (sym) \
- ( \
- const unsigned char *src_ptr, \
- int source_stride, \
- int xoffset, \
- int yoffset, \
- const unsigned char *ref_ptr, \
- int Refstride, \
- unsigned int *sse \
- );
+ unsigned int (sym) \
+ ( \
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ int xoffset, \
+ int yoffset, \
+ const unsigned char *ref_ptr, \
+ int Refstride, \
+ unsigned int *sse \
+ );
#define prototype_ssimpf(sym) \
- void (sym) \
- ( \
- unsigned char *s, \
- int sp, \
- unsigned char *r, \
- int rp, \
- unsigned long *sum_s, \
- unsigned long *sum_r, \
- unsigned long *sum_sq_s, \
- unsigned long *sum_sq_r, \
- unsigned long *sum_sxr \
- );
+ void (sym) \
+ ( \
+ unsigned char *s, \
+ int sp, \
+ unsigned char *r, \
+ int rp, \
+ unsigned long *sum_s, \
+ unsigned long *sum_r, \
+ unsigned long *sum_sq_s, \
+ unsigned long *sum_sq_r, \
+ unsigned long *sum_sxr \
+ );
#define prototype_getmbss(sym) unsigned int (sym)(const short *)
#define prototype_get16x16prederror(sym)\
- unsigned int (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride \
- )
+ unsigned int (sym)\
+ (\
+ const unsigned char *src_ptr, \
+ int source_stride, \
+ const unsigned char *ref_ptr, \
+ int ref_stride \
+ )
#if ARCH_X86 || ARCH_X86_64
#include "x86/variance_x86.h"
@@ -143,7 +143,7 @@ extern prototype_sad(vp8_variance_sad16x8);
#endif
extern prototype_sad(vp8_variance_sad16x16);
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#ifndef vp8_variance_sad16x16x3
#define vp8_variance_sad16x16x3 vp8_sad16x16x3_c
@@ -195,7 +195,7 @@ extern prototype_sad_multi_same_address_1(vp8_variance_sad8x16x8);
#endif
extern prototype_sad_multi_same_address_1(vp8_variance_sad4x4x8);
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#ifndef vp8_variance_sad16x16x4d
#define vp8_variance_sad16x16x4d vp8_sad16x16x4d_c
@@ -229,7 +229,7 @@ extern prototype_sad_multi_dif_address(vp8_variance_sad4x4x4d);
extern prototype_sad(vp8_variance_copy32xn);
#endif
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#ifndef vp8_variance_var4x4
#define vp8_variance_var4x4 vp8_variance4x4_c
@@ -256,7 +256,7 @@ extern prototype_variance(vp8_variance_var16x8);
#endif
extern prototype_variance(vp8_variance_var16x16);
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#ifndef vp8_variance_subpixvar4x4
#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
@@ -303,7 +303,7 @@ extern prototype_variance(vp8_variance_halfpixvar16x16_hv);
#endif
extern prototype_subpixvariance(vp8_variance_subpixmse16x16);
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#ifndef vp8_variance_getmbss
#define vp8_variance_getmbss vp8_get_mb_ss_c
@@ -341,76 +341,74 @@ typedef prototype_getmbss(*vp8_getmbss_fn_t);
typedef prototype_ssimpf(*vp8_ssimpf_fn_t);
typedef prototype_get16x16prederror(*vp8_get16x16prederror_fn_t);
-typedef struct
-{
- vp8_sad_fn_t sad4x4;
- vp8_sad_fn_t sad8x8;
- vp8_sad_fn_t sad8x16;
- vp8_sad_fn_t sad16x8;
- vp8_sad_fn_t sad16x16;
-
- vp8_variance_fn_t var4x4;
- vp8_variance_fn_t var8x8;
- vp8_variance_fn_t var8x16;
- vp8_variance_fn_t var16x8;
- vp8_variance_fn_t var16x16;
-
- vp8_subpixvariance_fn_t subpixvar4x4;
- vp8_subpixvariance_fn_t subpixvar8x8;
- vp8_subpixvariance_fn_t subpixvar8x16;
- vp8_subpixvariance_fn_t subpixvar16x8;
- vp8_subpixvariance_fn_t subpixvar16x16;
- vp8_variance_fn_t halfpixvar16x16_h;
- vp8_variance_fn_t halfpixvar16x16_v;
- vp8_variance_fn_t halfpixvar16x16_hv;
- vp8_subpixvariance_fn_t subpixmse16x16;
-
- vp8_getmbss_fn_t getmbss;
- vp8_variance_fn_t mse16x16;
-
- vp8_sad_multi_fn_t sad16x16x3;
- vp8_sad_multi_fn_t sad16x8x3;
- vp8_sad_multi_fn_t sad8x16x3;
- vp8_sad_multi_fn_t sad8x8x3;
- vp8_sad_multi_fn_t sad4x4x3;
-
- vp8_sad_multi1_fn_t sad16x16x8;
- vp8_sad_multi1_fn_t sad16x8x8;
- vp8_sad_multi1_fn_t sad8x16x8;
- vp8_sad_multi1_fn_t sad8x8x8;
- vp8_sad_multi1_fn_t sad4x4x8;
-
- vp8_sad_multi_d_fn_t sad16x16x4d;
- vp8_sad_multi_d_fn_t sad16x8x4d;
- vp8_sad_multi_d_fn_t sad8x16x4d;
- vp8_sad_multi_d_fn_t sad8x8x4d;
- vp8_sad_multi_d_fn_t sad4x4x4d;
+typedef struct {
+ vp8_sad_fn_t sad4x4;
+ vp8_sad_fn_t sad8x8;
+ vp8_sad_fn_t sad8x16;
+ vp8_sad_fn_t sad16x8;
+ vp8_sad_fn_t sad16x16;
+
+ vp8_variance_fn_t var4x4;
+ vp8_variance_fn_t var8x8;
+ vp8_variance_fn_t var8x16;
+ vp8_variance_fn_t var16x8;
+ vp8_variance_fn_t var16x16;
+
+ vp8_subpixvariance_fn_t subpixvar4x4;
+ vp8_subpixvariance_fn_t subpixvar8x8;
+ vp8_subpixvariance_fn_t subpixvar8x16;
+ vp8_subpixvariance_fn_t subpixvar16x8;
+ vp8_subpixvariance_fn_t subpixvar16x16;
+ vp8_variance_fn_t halfpixvar16x16_h;
+ vp8_variance_fn_t halfpixvar16x16_v;
+ vp8_variance_fn_t halfpixvar16x16_hv;
+ vp8_subpixvariance_fn_t subpixmse16x16;
+
+ vp8_getmbss_fn_t getmbss;
+ vp8_variance_fn_t mse16x16;
+
+ vp8_sad_multi_fn_t sad16x16x3;
+ vp8_sad_multi_fn_t sad16x8x3;
+ vp8_sad_multi_fn_t sad8x16x3;
+ vp8_sad_multi_fn_t sad8x8x3;
+ vp8_sad_multi_fn_t sad4x4x3;
+
+ vp8_sad_multi1_fn_t sad16x16x8;
+ vp8_sad_multi1_fn_t sad16x8x8;
+ vp8_sad_multi1_fn_t sad8x16x8;
+ vp8_sad_multi1_fn_t sad8x8x8;
+ vp8_sad_multi1_fn_t sad4x4x8;
+
+ vp8_sad_multi_d_fn_t sad16x16x4d;
+ vp8_sad_multi_d_fn_t sad16x8x4d;
+ vp8_sad_multi_d_fn_t sad8x16x4d;
+ vp8_sad_multi_d_fn_t sad8x8x4d;
+ vp8_sad_multi_d_fn_t sad4x4x4d;
#if ARCH_X86 || ARCH_X86_64
- vp8_sad_fn_t copy32xn;
+ vp8_sad_fn_t copy32xn;
#endif
#if CONFIG_INTERNAL_STATS
- vp8_ssimpf_fn_t ssimpf_8x8;
- vp8_ssimpf_fn_t ssimpf_16x16;
+ vp8_ssimpf_fn_t ssimpf_8x8;
+ vp8_ssimpf_fn_t ssimpf_16x16;
#endif
- vp8_variance_fn_t satd16x16;
+ vp8_variance_fn_t satd16x16;
} vp8_variance_rtcd_vtable_t;
-typedef struct
-{
- vp8_sad_fn_t sdf;
- vp8_variance_fn_t vf;
- vp8_subpixvariance_fn_t svf;
- vp8_variance_fn_t svf_halfpix_h;
- vp8_variance_fn_t svf_halfpix_v;
- vp8_variance_fn_t svf_halfpix_hv;
- vp8_sad_multi_fn_t sdx3f;
- vp8_sad_multi1_fn_t sdx8f;
- vp8_sad_multi_d_fn_t sdx4df;
+typedef struct {
+ vp8_sad_fn_t sdf;
+ vp8_variance_fn_t vf;
+ vp8_subpixvariance_fn_t svf;
+ vp8_variance_fn_t svf_halfpix_h;
+ vp8_variance_fn_t svf_halfpix_v;
+ vp8_variance_fn_t svf_halfpix_hv;
+ vp8_sad_multi_fn_t sdx3f;
+ vp8_sad_multi1_fn_t sdx8f;
+ vp8_sad_multi_d_fn_t sdx4df;
#if ARCH_X86 || ARCH_X86_64
- vp8_sad_fn_t copymem;
+ vp8_sad_fn_t copymem;
#endif
} vp8_variance_fn_ptr_t;
diff --git a/vp8/encoder/variance_c.c b/vp8/encoder/variance_c.c
index 402ff0450..9444ed87d 100644
--- a/vp8/encoder/variance_c.c
+++ b/vp8/encoder/variance_c.c
@@ -15,148 +15,136 @@
unsigned int vp8_get_mb_ss_c
(
- const short *src_ptr
-)
-{
- unsigned int i = 0, sum = 0;
-
- do
- {
- sum += (src_ptr[i] * src_ptr[i]);
- i++;
- }
- while (i < 256);
+ const short *src_ptr
+) {
+ unsigned int i = 0, sum = 0;
+
+ do {
+ sum += (src_ptr[i] * src_ptr[i]);
+ i++;
+ } while (i < 256);
- return sum;
+ return sum;
}
static void variance(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- int w,
- int h,
- unsigned int *sse,
- int *sum)
-{
- int i, j;
- int diff;
-
- *sum = 0;
- *sse = 0;
-
- for (i = 0; i < h; i++)
- {
- for (j = 0; j < w; j++)
- {
- diff = src_ptr[j] - ref_ptr[j];
- *sum += diff;
- *sse += diff * diff;
- }
-
- src_ptr += source_stride;
- ref_ptr += recon_stride;
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ int w,
+ int h,
+ unsigned int *sse,
+ int *sum) {
+ int i, j;
+ int diff;
+
+ *sum = 0;
+ *sse = 0;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ diff = src_ptr[j] - ref_ptr[j];
+ *sum += diff;
+ *sse += diff * diff;
}
+
+ src_ptr += source_stride;
+ ref_ptr += recon_stride;
+ }
}
unsigned int vp8_variance16x16_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
-
- variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
- *sse = var;
- return (var - ((avg * avg) >> 8));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 8));
}
unsigned int vp8_variance8x16_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
-
- variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
- *sse = var;
- return (var - ((avg * avg) >> 7));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 7));
}
unsigned int vp8_variance16x8_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
-
- variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
- *sse = var;
- return (var - ((avg * avg) >> 7));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 7));
}
unsigned int vp8_variance8x8_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
-
- variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
- *sse = var;
- return (var - ((avg * avg) >> 6));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 6));
}
unsigned int vp8_variance4x4_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
-
- variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
- *sse = var;
- return (var - ((avg * avg) >> 4));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 4));
}
unsigned int vp8_mse16x16_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
- variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
- *sse = var;
- return var;
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ *sse = var;
+ return var;
}
@@ -190,32 +178,29 @@ unsigned int vp8_mse16x16_c(
****************************************************************************/
static void var_filter_block2d_bil_first_pass
(
- const unsigned char *src_ptr,
- unsigned short *output_ptr,
- unsigned int src_pixels_per_line,
- int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const short *vp8_filter
-)
-{
- unsigned int i, j;
-
- for (i = 0; i < output_height; i++)
- {
- for (j = 0; j < output_width; j++)
- {
- // Apply bilinear filter
- output_ptr[j] = (((int)src_ptr[0] * vp8_filter[0]) +
- ((int)src_ptr[pixel_step] * vp8_filter[1]) +
- (VP8_FILTER_WEIGHT / 2)) >> VP8_FILTER_SHIFT;
- src_ptr++;
- }
-
- // Next row...
- src_ptr += src_pixels_per_line - output_width;
- output_ptr += output_width;
+ const unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter
+) {
+ unsigned int i, j;
+
+ for (i = 0; i < output_height; i++) {
+ for (j = 0; j < output_width; j++) {
+ // Apply bilinear filter
+ output_ptr[j] = (((int)src_ptr[0] * vp8_filter[0]) +
+ ((int)src_ptr[pixel_step] * vp8_filter[1]) +
+ (VP8_FILTER_WEIGHT / 2)) >> VP8_FILTER_SHIFT;
+ src_ptr++;
}
+
+ // Next row...
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
}
/****************************************************************************
@@ -248,226 +233,214 @@ static void var_filter_block2d_bil_first_pass
****************************************************************************/
static void var_filter_block2d_bil_second_pass
(
- const unsigned short *src_ptr,
- unsigned char *output_ptr,
- unsigned int src_pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const short *vp8_filter
-)
-{
- unsigned int i, j;
- int Temp;
-
- for (i = 0; i < output_height; i++)
- {
- for (j = 0; j < output_width; j++)
- {
- // Apply filter
- Temp = ((int)src_ptr[0] * vp8_filter[0]) +
- ((int)src_ptr[pixel_step] * vp8_filter[1]) +
- (VP8_FILTER_WEIGHT / 2);
- output_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT);
- src_ptr++;
- }
-
- // Next row...
- src_ptr += src_pixels_per_line - output_width;
- output_ptr += output_width;
+ const unsigned short *src_ptr,
+ unsigned char *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const short *vp8_filter
+) {
+ unsigned int i, j;
+ int Temp;
+
+ for (i = 0; i < output_height; i++) {
+ for (j = 0; j < output_width; j++) {
+ // Apply filter
+ Temp = ((int)src_ptr[0] * vp8_filter[0]) +
+ ((int)src_ptr[pixel_step] * vp8_filter[1]) +
+ (VP8_FILTER_WEIGHT / 2);
+ output_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT);
+ src_ptr++;
}
+
+ // Next row...
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
}
unsigned int vp8_sub_pixel_variance4x4_c
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned char temp2[20*16];
- const short *HFilter, *VFilter;
- unsigned short FData3[5*4]; // Temp data bufffer used in filtering
-
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
-
- // First filter 1d Horizontal
- var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
-
- // Now filter Verticaly
- var_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
-
- return vp8_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned char temp2[20 * 16];
+ const short *HFilter, *VFilter;
+ unsigned short FData3[5 * 4]; // Temp data bufffer used in filtering
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ // First filter 1d Horizontal
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
+
+ // Now filter Verticaly
+ var_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
+
+ return vp8_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance8x8_c
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned short FData3[9*8]; // Temp data bufffer used in filtering
- unsigned char temp2[20*16];
- const short *HFilter, *VFilter;
-
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
-
- var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
- var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
-
- return vp8_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned short FData3[9 * 8]; // Temp data bufffer used in filtering
+ unsigned char temp2[20 * 16];
+ const short *HFilter, *VFilter;
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
+
+ return vp8_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance16x16_c
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned short FData3[17*16]; // Temp data bufffer used in filtering
- unsigned char temp2[20*16];
- const short *HFilter, *VFilter;
-
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
-
- var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
- var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
-
- return vp8_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned short FData3[17 * 16]; // Temp data bufffer used in filtering
+ unsigned char temp2[20 * 16];
+ const short *HFilter, *VFilter;
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
+
+ return vp8_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_variance_halfpixvar16x16_h_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
#else
- return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 0,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 0,
+ ref_ptr, recon_stride, sse);
#endif
}
unsigned int vp8_variance_halfpixvar16x16_v_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
#else
- return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 4,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 4,
+ ref_ptr, recon_stride, sse);
#endif
}
unsigned int vp8_variance_halfpixvar16x16_hv_c(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
#else
- return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 4,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 4,
+ ref_ptr, recon_stride, sse);
#endif
}
unsigned int vp8_sub_pixel_mse16x16_c
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- vp8_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
- return *sse;
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ vp8_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
+ return *sse;
}
unsigned int vp8_sub_pixel_variance16x8_c
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned short FData3[16*9]; // Temp data bufffer used in filtering
- unsigned char temp2[20*16];
- const short *HFilter, *VFilter;
-
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
-
- var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
- var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
-
- return vp8_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned short FData3[16 * 9]; // Temp data bufffer used in filtering
+ unsigned char temp2[20 * 16];
+ const short *HFilter, *VFilter;
+
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
+
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
+
+ return vp8_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp8_sub_pixel_variance8x16_c
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- unsigned short FData3[9*16]; // Temp data bufffer used in filtering
- unsigned char temp2[20*16];
- const short *HFilter, *VFilter;
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ unsigned short FData3[9 * 16]; // Temp data bufffer used in filtering
+ unsigned char temp2[20 * 16];
+ const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp8_bilinear_filters[xoffset];
+ VFilter = vp8_bilinear_filters[yoffset];
- var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
- var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
+ var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
+ var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
- return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+ return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
diff --git a/vp8/encoder/x86/variance_mmx.c b/vp8/encoder/x86/variance_mmx.c
index b84d00034..19264489d 100644
--- a/vp8/encoder/x86/variance_mmx.c
+++ b/vp8/encoder/x86/variance_mmx.c
@@ -15,185 +15,179 @@
extern void filter_block1d_h6_mmx
(
- const unsigned char *src_ptr,
- unsigned short *output_ptr,
- unsigned int src_pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- short *vp7_filter
+ const unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ short *vp7_filter
);
extern void filter_block1d_v6_mmx
(
- const short *src_ptr,
- unsigned char *output_ptr,
- unsigned int pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- short *vp7_filter
+ const short *src_ptr,
+ unsigned char *output_ptr,
+ unsigned int pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ short *vp7_filter
);
extern unsigned int vp8_get_mb_ss_mmx(const short *src_ptr);
extern unsigned int vp8_get8x8var_mmx
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *SSE,
- int *Sum
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
);
extern unsigned int vp8_get4x4var_mmx
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *SSE,
- int *Sum
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
);
extern void vp8_filter_block2d_bil4x4_var_mmx
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- const short *HFilter,
- const short *VFilter,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const short *HFilter,
+ const short *VFilter,
+ int *sum,
+ unsigned int *sumsquared
);
extern void vp8_filter_block2d_bil_var_mmx
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- const short *HFilter,
- const short *VFilter,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ const short *HFilter,
+ const short *VFilter,
+ int *sum,
+ unsigned int *sumsquared
);
unsigned int vp8_variance4x4_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
- vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
- *sse = var;
- return (var - ((avg * avg) >> 4));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 4));
}
unsigned int vp8_variance8x8_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
- vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
- *sse = var;
+ vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
- return (var - ((avg * avg) >> 6));
+ return (var - ((avg * avg) >> 6));
}
unsigned int vp8_mse16x16_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0, sse1, sse2, sse3, var;
- int sum0, sum1, sum2, sum3;
-
-
- vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
- vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
- vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
-
- var = sse0 + sse1 + sse2 + sse3;
- *sse = var;
- return var;
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, sse2, sse3, var;
+ int sum0, sum1, sum2, sum3;
+
+
+ vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+ vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
+ vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
+
+ var = sse0 + sse1 + sse2 + sse3;
+ *sse = var;
+ return var;
}
unsigned int vp8_variance16x16_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0, sse1, sse2, sse3, var;
- int sum0, sum1, sum2, sum3, avg;
-
-
- vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
- vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
- vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
-
- var = sse0 + sse1 + sse2 + sse3;
- avg = sum0 + sum1 + sum2 + sum3;
- *sse = var;
- return (var - ((avg * avg) >> 8));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, sse2, sse3, var;
+ int sum0, sum1, sum2, sum3, avg;
+
+
+ vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+ vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
+ vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
+
+ var = sse0 + sse1 + sse2 + sse3;
+ avg = sum0 + sum1 + sum2 + sum3;
+ *sse = var;
+ return (var - ((avg * avg) >> 8));
}
unsigned int vp8_variance16x8_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0, sse1, var;
- int sum0, sum1, avg;
-
- vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-
- var = sse0 + sse1;
- avg = sum0 + sum1;
- *sse = var;
- return (var - ((avg * avg) >> 7));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+ return (var - ((avg * avg) >> 7));
}
unsigned int vp8_variance8x16_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0, sse1, var;
- int sum0, sum1, avg;
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
- vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
+ vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
- var = sse0 + sse1;
- avg = sum0 + sum1;
- *sse = var;
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
- return (var - ((avg * avg) >> 7));
+ return (var - ((avg * avg) >> 7));
}
@@ -205,246 +199,236 @@ unsigned int vp8_variance8x16_mmx(
// int one pass //
///////////////////////////////////////////////////////////////////////////
#if CONFIG_SIXTEENTH_SUBPEL_UV
-DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) =
-{
- { 128, 128, 128, 128, 0, 0, 0, 0 },
- { 120, 120, 120, 120, 8, 8, 8, 8 },
- { 112, 112, 112, 112, 16, 16, 16, 16 },
- { 104, 104, 104, 104, 24, 24, 24, 24 },
- { 96, 96, 96, 96, 32, 32, 32, 32 },
- { 88, 88, 88, 88, 40, 40, 40, 40 },
- { 80, 80, 80, 80, 48, 48, 48, 48 },
- { 72, 72, 72, 72, 56, 56, 56, 56 },
- { 64, 64, 64, 64, 64, 64, 64, 64 },
- { 56, 56, 56, 56, 72, 72, 72, 72 },
- { 48, 48, 48, 48, 80, 80, 80, 80 },
- { 40, 40, 40, 40, 88, 88, 88, 88 },
- { 32, 32, 32, 32, 96, 96, 96, 96 },
- { 24, 24, 24, 24, 104, 104, 104, 104 },
- { 16, 16, 16, 16, 112, 112, 112, 112 },
- { 8, 8, 8, 8, 120, 120, 120, 120 }
+DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
+ { 128, 128, 128, 128, 0, 0, 0, 0 },
+ { 120, 120, 120, 120, 8, 8, 8, 8 },
+ { 112, 112, 112, 112, 16, 16, 16, 16 },
+ { 104, 104, 104, 104, 24, 24, 24, 24 },
+ { 96, 96, 96, 96, 32, 32, 32, 32 },
+ { 88, 88, 88, 88, 40, 40, 40, 40 },
+ { 80, 80, 80, 80, 48, 48, 48, 48 },
+ { 72, 72, 72, 72, 56, 56, 56, 56 },
+ { 64, 64, 64, 64, 64, 64, 64, 64 },
+ { 56, 56, 56, 56, 72, 72, 72, 72 },
+ { 48, 48, 48, 48, 80, 80, 80, 80 },
+ { 40, 40, 40, 40, 88, 88, 88, 88 },
+ { 32, 32, 32, 32, 96, 96, 96, 96 },
+ { 24, 24, 24, 24, 104, 104, 104, 104 },
+ { 16, 16, 16, 16, 112, 112, 112, 112 },
+ { 8, 8, 8, 8, 120, 120, 120, 120 }
};
#else
-DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[8][8]) =
-{
- { 128, 128, 128, 128, 0, 0, 0, 0 },
- { 112, 112, 112, 112, 16, 16, 16, 16 },
- { 96, 96, 96, 96, 32, 32, 32, 32 },
- { 80, 80, 80, 80, 48, 48, 48, 48 },
- { 64, 64, 64, 64, 64, 64, 64, 64 },
- { 48, 48, 48, 48, 80, 80, 80, 80 },
- { 32, 32, 32, 32, 96, 96, 96, 96 },
- { 16, 16, 16, 16, 112, 112, 112, 112 }
+DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[8][8]) = {
+ { 128, 128, 128, 128, 0, 0, 0, 0 },
+ { 112, 112, 112, 112, 16, 16, 16, 16 },
+ { 96, 96, 96, 96, 32, 32, 32, 32 },
+ { 80, 80, 80, 80, 48, 48, 48, 48 },
+ { 64, 64, 64, 64, 64, 64, 64, 64 },
+ { 48, 48, 48, 48, 80, 80, 80, 80 },
+ { 32, 32, 32, 32, 96, 96, 96, 96 },
+ { 16, 16, 16, 16, 112, 112, 112, 112 }
};
#endif
unsigned int vp8_sub_pixel_variance4x4_mmx
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse)
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse)
{
- int xsum;
- unsigned int xxsum;
- vp8_filter_block2d_bil4x4_var_mmx(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum, &xxsum
- );
- *sse = xxsum;
- return (xxsum - ((xsum * xsum) >> 4));
+ int xsum;
+ unsigned int xxsum;
+ vp8_filter_block2d_bil4x4_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - ((xsum * xsum) >> 4));
}
unsigned int vp8_sub_pixel_variance8x8_mmx
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
-
- int xsum;
- unsigned int xxsum;
- vp8_filter_block2d_bil_var_mmx(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum, &xxsum
- );
- *sse = xxsum;
- return (xxsum - ((xsum * xsum) >> 6));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+
+ int xsum;
+ unsigned int xxsum;
+ vp8_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - ((xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_mmx
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
-
- int xsum0, xsum1;
- unsigned int xxsum0, xxsum1;
-
- vp8_filter_block2d_bil_var_mmx(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum0, &xxsum0
- );
-
- vp8_filter_block2d_bil_var_mmx(
- src_ptr + 8, src_pixels_per_line,
- dst_ptr + 8, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum1, &xxsum1
- );
-
- xsum0 += xsum1;
- xxsum0 += xxsum1;
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 8));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+ vp8_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum0, &xxsum0
+ );
+
+ vp8_filter_block2d_bil_var_mmx(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 16,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum1, &xxsum1
+ );
+
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_mse16x16_mmx(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
- return *sse;
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
+ return *sse;
}
unsigned int vp8_sub_pixel_variance16x8_mmx
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum0, xsum1;
- unsigned int xxsum0, xxsum1;
-
-
- vp8_filter_block2d_bil_var_mmx(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum0, &xxsum0
- );
-
-
- vp8_filter_block2d_bil_var_mmx(
- src_ptr + 8, src_pixels_per_line,
- dst_ptr + 8, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum1, &xxsum1
- );
-
- xsum0 += xsum1;
- xxsum0 += xxsum1;
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 7));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+
+ vp8_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum0, &xxsum0
+ );
+
+
+ vp8_filter_block2d_bil_var_mmx(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 8,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum1, &xxsum1
+ );
+
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_mmx
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum;
- unsigned int xxsum;
- vp8_filter_block2d_bil_var_mmx(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum, &xxsum
- );
- *sse = xxsum;
- return (xxsum - ((xsum * xsum) >> 7));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+ vp8_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - ((xsum * xsum) >> 7));
}
unsigned int vp8_variance_halfpixvar16x16_h_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
#else
- return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
+ ref_ptr, recon_stride, sse);
#endif
}
unsigned int vp8_variance_halfpixvar16x16_v_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
#else
- return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
+ ref_ptr, recon_stride, sse);
#endif
}
unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
#if CONFIG_SIXTEENTH_SUBPEL_UV
- return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
#else
- return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
- ref_ptr, recon_stride, sse);
+ return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
+ ref_ptr, recon_stride, sse);
#endif
}
diff --git a/vp8/encoder/x86/variance_sse2.c b/vp8/encoder/x86/variance_sse2.c
index e3c6268ea..0d6b2372e 100644
--- a/vp8/encoder/x86/variance_sse2.c
+++ b/vp8/encoder/x86/variance_sse2.c
@@ -26,119 +26,119 @@ extern void filter_block1d8_v6_sse2(const short *src_ptr, unsigned char *output_
extern void vp8_filter_block2d_bil4x4_var_mmx
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- const short *HFilter,
- const short *VFilter,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const short *HFilter,
+ const short *VFilter,
+ int *sum,
+ unsigned int *sumsquared
);
extern unsigned int vp8_get4x4var_mmx
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *SSE,
- int *Sum
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
);
unsigned int vp8_get_mb_ss_sse2
(
- const short *src_ptr
+ const short *src_ptr
);
unsigned int vp8_get16x16var_sse2
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *SSE,
- int *Sum
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
);
unsigned int vp8_get8x8var_sse2
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *SSE,
- int *Sum
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
);
void vp8_filter_block2d_bil_var_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int xoffset,
- int yoffset,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int xoffset,
+ int yoffset,
+ int *sum,
+ unsigned int *sumsquared
);
void vp8_half_horiz_vert_variance8x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
void vp8_half_horiz_vert_variance16x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
void vp8_half_horiz_variance8x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
void vp8_half_horiz_variance16x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
void vp8_half_vert_variance8x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
void vp8_half_vert_variance16x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
#if CONFIG_SIXTEENTH_SUBPEL_UV
@@ -148,421 +148,378 @@ DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[8][8]);
#endif
unsigned int vp8_variance4x4_wmt(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
- vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
- *sse = var;
- return (var - ((avg * avg) >> 4));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 4));
}
unsigned int vp8_variance8x8_wmt
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int var;
- int avg;
-
- vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
- *sse = var;
- return (var - ((avg * avg) >> 6));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+ return (var - ((avg * avg) >> 6));
}
unsigned int vp8_variance16x16_wmt
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0;
- int sum0;
-
-
- vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- *sse = sse0;
- return (sse0 - ((sum0 * sum0) >> 8));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0;
+ int sum0;
+
+
+ vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ *sse = sse0;
+ return (sse0 - ((sum0 * sum0) >> 8));
}
unsigned int vp8_mse16x16_wmt(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
-
- unsigned int sse0;
- int sum0;
- vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- *sse = sse0;
- return sse0;
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+
+ unsigned int sse0;
+ int sum0;
+ vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ *sse = sse0;
+ return sse0;
}
unsigned int vp8_variance16x8_wmt
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0, sse1, var;
- int sum0, sum1, avg;
-
- vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- vp8_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-
- var = sse0 + sse1;
- avg = sum0 + sum1;
- *sse = var;
- return (var - ((avg * avg) >> 7));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp8_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+ return (var - ((avg * avg) >> 7));
}
unsigned int vp8_variance8x16_wmt
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *sse)
-{
- unsigned int sse0, sse1, var;
- int sum0, sum1, avg;
-
- vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
- vp8_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
-
- var = sse0 + sse1;
- avg = sum0 + sum1;
- *sse = var;
- return (var - ((avg * avg) >> 7));
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp8_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+ return (var - ((avg * avg) >> 7));
}
unsigned int vp8_sub_pixel_variance4x4_wmt
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum;
- unsigned int xxsum;
- vp8_filter_block2d_bil4x4_var_mmx(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
- &xsum, &xxsum
- );
- *sse = xxsum;
- return (xxsum - ((xsum * xsum) >> 4));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+ vp8_filter_block2d_bil4x4_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line,
+ vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - ((xsum * xsum) >> 4));
}
unsigned int vp8_sub_pixel_variance8x8_wmt
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum;
- unsigned int xxsum;
-
- if (xoffset == HALFNDX && yoffset == 0)
- {
- vp8_half_horiz_variance8x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum, &xxsum);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- vp8_half_vert_variance8x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum, &xxsum);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- vp8_half_horiz_vert_variance8x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum, &xxsum);
- }
- else
- {
- vp8_filter_block2d_bil_var_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- xoffset, yoffset,
- &xsum, &xxsum);
- }
-
- *sse = xxsum;
- return (xxsum - ((xsum * xsum) >> 6));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp8_half_horiz_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum, &xxsum);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp8_half_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum, &xxsum);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp8_half_horiz_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum, &xxsum);
+ } else {
+ vp8_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum, &xxsum);
+ }
+
+ *sse = xxsum;
+ return (xxsum - ((xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_wmt
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum0, xsum1;
- unsigned int xxsum0, xxsum1;
-
-
- // note we could avoid these if statements if the calling function
- // just called the appropriate functions inside.
- if (xoffset == HALFNDX && yoffset == 0)
- {
- vp8_half_horiz_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- vp8_half_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- vp8_half_horiz_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
- }
- else
- {
- vp8_filter_block2d_bil_var_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- xoffset, yoffset,
- &xsum0, &xxsum0
- );
-
- vp8_filter_block2d_bil_var_sse2(
- src_ptr + 8, src_pixels_per_line,
- dst_ptr + 8, dst_pixels_per_line, 16,
- xoffset, yoffset,
- &xsum1, &xxsum1
- );
- xsum0 += xsum1;
- xxsum0 += xxsum1;
- }
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 8));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+
+ // note we could avoid these if statements if the calling function
+ // just called the appropriate functions inside.
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp8_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp8_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp8_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else {
+ vp8_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum0, &xxsum0
+ );
+
+ vp8_filter_block2d_bil_var_sse2(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum1, &xxsum1
+ );
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+ }
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_mse16x16_wmt(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- vp8_sub_pixel_variance16x16_wmt(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
- return *sse;
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ vp8_sub_pixel_variance16x16_wmt(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
+ return *sse;
}
unsigned int vp8_sub_pixel_variance16x8_wmt
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-
-)
-{
- int xsum0, xsum1;
- unsigned int xxsum0, xxsum1;
-
- if (xoffset == HALFNDX && yoffset == 0)
- {
- vp8_half_horiz_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum0, &xxsum0);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- vp8_half_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum0, &xxsum0);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- vp8_half_horiz_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum0, &xxsum0);
- }
- else
- {
- vp8_filter_block2d_bil_var_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- xoffset, yoffset,
- &xsum0, &xxsum0);
-
- vp8_filter_block2d_bil_var_sse2(
- src_ptr + 8, src_pixels_per_line,
- dst_ptr + 8, dst_pixels_per_line, 8,
- xoffset, yoffset,
- &xsum1, &xxsum1);
- xsum0 += xsum1;
- xxsum0 += xxsum1;
- }
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 7));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+
+) {
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp8_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp8_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp8_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else {
+ vp8_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum0, &xxsum0);
+
+ vp8_filter_block2d_bil_var_sse2(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum1, &xxsum1);
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+ }
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_wmt
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum;
- unsigned int xxsum;
-
- if (xoffset == HALFNDX && yoffset == 0)
- {
- vp8_half_horiz_variance8x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum, &xxsum);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- vp8_half_vert_variance8x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum, &xxsum);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- vp8_half_horiz_vert_variance8x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum, &xxsum);
- }
- else
- {
- vp8_filter_block2d_bil_var_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- xoffset, yoffset,
- &xsum, &xxsum);
- }
-
- *sse = xxsum;
- return (xxsum - ((xsum * xsum) >> 7));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp8_half_horiz_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum, &xxsum);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp8_half_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum, &xxsum);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp8_half_horiz_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum, &xxsum);
+ } else {
+ vp8_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum, &xxsum);
+ }
+
+ *sse = xxsum;
+ return (xxsum - ((xsum * xsum) >> 7));
}
unsigned int vp8_variance_halfpixvar16x16_h_wmt(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse)
-{
- int xsum0;
- unsigned int xxsum0;
-
- vp8_half_horiz_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 8));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ vp8_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_variance_halfpixvar16x16_v_wmt(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse)
-{
- int xsum0;
- unsigned int xxsum0;
- vp8_half_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 8));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+ vp8_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_variance_halfpixvar16x16_hv_wmt(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse)
-{
- int xsum0;
- unsigned int xxsum0;
-
- vp8_half_horiz_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
-
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 8));
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ vp8_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
diff --git a/vp8/encoder/x86/variance_ssse3.c b/vp8/encoder/x86/variance_ssse3.c
index fc2a3c3f2..fc3b313f6 100644
--- a/vp8/encoder/x86/variance_ssse3.c
+++ b/vp8/encoder/x86/variance_ssse3.c
@@ -21,151 +21,135 @@
extern unsigned int vp8_get16x16var_sse2
(
- const unsigned char *src_ptr,
- int source_stride,
- const unsigned char *ref_ptr,
- int recon_stride,
- unsigned int *SSE,
- int *Sum
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
);
extern void vp8_half_horiz_vert_variance16x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
extern void vp8_half_horiz_variance16x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
extern void vp8_half_vert_variance16x_h_sse2
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
);
extern void vp8_filter_block2d_bil_var_ssse3
(
- const unsigned char *ref_ptr,
- int ref_pixels_per_line,
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- unsigned int Height,
- int xoffset,
- int yoffset,
- int *sum,
- unsigned int *sumsquared
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int xoffset,
+ int yoffset,
+ int *sum,
+ unsigned int *sumsquared
);
unsigned int vp8_sub_pixel_variance16x16_ssse3
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
-)
-{
- int xsum0;
- unsigned int xxsum0;
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum0;
+ unsigned int xxsum0;
- // note we could avoid these if statements if the calling function
- // just called the appropriate functions inside.
- if (xoffset == HALFNDX && yoffset == 0)
- {
- vp8_half_horiz_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- vp8_half_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- vp8_half_horiz_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- &xsum0, &xxsum0);
- }
- else
- {
- vp8_filter_block2d_bil_var_ssse3(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 16,
- xoffset, yoffset,
- &xsum0, &xxsum0);
- }
+ // note we could avoid these if statements if the calling function
+ // just called the appropriate functions inside.
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp8_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp8_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp8_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else {
+ vp8_filter_block2d_bil_var_ssse3(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum0, &xxsum0);
+ }
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 8));
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_variance16x8_ssse3
(
- const unsigned char *src_ptr,
- int src_pixels_per_line,
- int xoffset,
- int yoffset,
- const unsigned char *dst_ptr,
- int dst_pixels_per_line,
- unsigned int *sse
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
-)
-{
- int xsum0;
- unsigned int xxsum0;
+) {
+ int xsum0;
+ unsigned int xxsum0;
- if (xoffset == HALFNDX && yoffset == 0)
- {
- vp8_half_horiz_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum0, &xxsum0);
- }
- else if (xoffset == 0 && yoffset == HALFNDX)
- {
- vp8_half_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum0, &xxsum0);
- }
- else if (xoffset == HALFNDX && yoffset == HALFNDX)
- {
- vp8_half_horiz_vert_variance16x_h_sse2(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- &xsum0, &xxsum0);
- }
- else
- {
- vp8_filter_block2d_bil_var_ssse3(
- src_ptr, src_pixels_per_line,
- dst_ptr, dst_pixels_per_line, 8,
- xoffset, yoffset,
- &xsum0, &xxsum0);
- }
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp8_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp8_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp8_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else {
+ vp8_filter_block2d_bil_var_ssse3(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum0, &xxsum0);
+ }
- *sse = xxsum0;
- return (xxsum0 - ((xsum0 * xsum0) >> 7));
+ *sse = xxsum0;
+ return (xxsum0 - ((xsum0 * xsum0) >> 7));
}
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index ae803f860..146888a1f 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -16,227 +16,214 @@
#if HAVE_MMX
-void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch)
-{
- vp8_short_fdct4x4_mmx(input, output, pitch);
- vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
+void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch) {
+ vp8_short_fdct4x4_mmx(input, output, pitch);
+ vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
}
int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
-int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc)
-{
- short *coeff_ptr = mb->block[0].coeff;
- short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
- return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc);
+int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc) {
+ short *coeff_ptr = mb->block[0].coeff;
+ short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
+ return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc);
}
int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
-int vp8_mbuverror_mmx(MACROBLOCK *mb)
-{
- short *s_ptr = &mb->coeff[256];
- short *d_ptr = &mb->e_mbd.dqcoeff[256];
- return vp8_mbuverror_mmx_impl(s_ptr, d_ptr);
+int vp8_mbuverror_mmx(MACROBLOCK *mb) {
+ short *s_ptr = &mb->coeff[256];
+ short *d_ptr = &mb->e_mbd.dqcoeff[256];
+ return vp8_mbuverror_mmx_impl(s_ptr, d_ptr);
}
void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
short *diff, unsigned char *predictor,
int pitch);
-void vp8_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
-{
- unsigned char *z = *(be->base_src) + be->src;
- unsigned int src_stride = be->src_stride;
- short *diff = &be->src_diff[0];
- unsigned char *predictor = &bd->predictor[0];
- vp8_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
+void vp8_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *z = *(be->base_src) + be->src;
+ unsigned int src_stride = be->src_stride;
+ short *diff = &be->src_diff[0];
+ unsigned char *predictor = &bd->predictor[0];
+ vp8_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
}
#endif
#if HAVE_SSE2
int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
-int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc)
-{
- short *coeff_ptr = mb->block[0].coeff;
- short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
- return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc);
+int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc) {
+ short *coeff_ptr = mb->block[0].coeff;
+ short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
+ return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc);
}
int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
-int vp8_mbuverror_xmm(MACROBLOCK *mb)
-{
- short *s_ptr = &mb->coeff[256];
- short *d_ptr = &mb->e_mbd.dqcoeff[256];
- return vp8_mbuverror_xmm_impl(s_ptr, d_ptr);
+int vp8_mbuverror_xmm(MACROBLOCK *mb) {
+ short *s_ptr = &mb->coeff[256];
+ short *d_ptr = &mb->e_mbd.dqcoeff[256];
+ return vp8_mbuverror_xmm_impl(s_ptr, d_ptr);
}
void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
- short *diff, unsigned char *predictor,
- int pitch);
-void vp8_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
-{
- unsigned char *z = *(be->base_src) + be->src;
- unsigned int src_stride = be->src_stride;
- short *diff = &be->src_diff[0];
- unsigned char *predictor = &bd->predictor[0];
- vp8_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
+ short *diff, unsigned char *predictor,
+ int pitch);
+void vp8_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *z = *(be->base_src) + be->src;
+ unsigned int src_stride = be->src_stride;
+ short *diff = &be->src_diff[0];
+ unsigned char *predictor = &bd->predictor[0];
+ vp8_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
}
#endif
-void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
-{
+void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
- int flags = x86_simd_caps();
+ int flags = x86_simd_caps();
- /* Note:
- *
- * This platform can be built without runtime CPU detection as well. If
- * you modify any of the function mappings present in this file, be sure
- * to also update them in static mapings (<arch>/filename_<arch>.h)
- */
+ /* Note:
+ *
+ * This platform can be built without runtime CPU detection as well. If
+ * you modify any of the function mappings present in this file, be sure
+ * to also update them in static mapings (<arch>/filename_<arch>.h)
+ */
- /* Override default functions with fastest ones for this CPU. */
+ /* Override default functions with fastest ones for this CPU. */
#if HAVE_MMX
- if (flags & HAS_MMX)
- {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
-
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx;
-
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
-
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
-
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
-
- cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_mmx;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
- }
+ if (flags & HAS_MMX) {
+ cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
+ cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
+ cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
+ cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
+ cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
+
+ cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
+ cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
+ cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
+ cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
+ cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx;
+
+ cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
+ cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
+ cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
+ cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
+ cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
+ cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
+ cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
+ cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
+ cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
+
+ cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
+ cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
+
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
+
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
+
+ cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
+ cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
+ cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
+ cpi->rtcd.encodemb.subb = vp8_subtract_b_mmx;
+ cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
+ cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
+ }
#endif
#if HAVE_SSE2
- if (flags & HAS_SSE2)
- {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_wmt;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_wmt;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_wmt;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_wmt;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_wmt;
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse2;
-
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_wmt;
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_wmt;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_wmt;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_wmt;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_wmt;
-
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
-
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
-
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2 ;
-
- cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_sse2;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
- cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
+ if (flags & HAS_SSE2) {
+ cpi->rtcd.variance.sad16x16 = vp8_sad16x16_wmt;
+ cpi->rtcd.variance.sad16x8 = vp8_sad16x8_wmt;
+ cpi->rtcd.variance.sad8x16 = vp8_sad8x16_wmt;
+ cpi->rtcd.variance.sad8x8 = vp8_sad8x8_wmt;
+ cpi->rtcd.variance.sad4x4 = vp8_sad4x4_wmt;
+ cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse2;
+
+ cpi->rtcd.variance.var4x4 = vp8_variance4x4_wmt;
+ cpi->rtcd.variance.var8x8 = vp8_variance8x8_wmt;
+ cpi->rtcd.variance.var8x16 = vp8_variance8x16_wmt;
+ cpi->rtcd.variance.var16x8 = vp8_variance16x8_wmt;
+ cpi->rtcd.variance.var16x16 = vp8_variance16x16_wmt;
+
+ cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
+ cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
+ cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
+ cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
+ cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
+ cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
+ cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
+ cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
+ cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
+
+ cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
+ cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
+
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
+
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2;
+
+ cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
+ cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
+ cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
+ cpi->rtcd.encodemb.subb = vp8_subtract_b_sse2;
+ cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
+ cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
+ cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
#if CONFIG_INTERNAL_STATS
#if ARCH_X86_64
- cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
- cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
+ cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
+ cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
#endif
#endif
- }
+ }
#endif
#if HAVE_SSE3
- if (flags & HAS_SSE3)
- {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_sse3;
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_sse3;
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_sse3;
- cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_sse3;
- cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_sse3;
- cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_sse3;
- cpi->rtcd.search.full_search = vp8_full_search_sadx3;
- cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
- cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
- cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
- cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
- cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse3;
- cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
- cpi->rtcd.search.refining_search = vp8_refining_search_sadx4;
- }
+ if (flags & HAS_SSE3) {
+ cpi->rtcd.variance.sad16x16 = vp8_sad16x16_sse3;
+ cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_sse3;
+ cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_sse3;
+ cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_sse3;
+ cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_sse3;
+ cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_sse3;
+ cpi->rtcd.search.full_search = vp8_full_search_sadx3;
+ cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
+ cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
+ cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
+ cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
+ cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
+ cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse3;
+ cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
+ cpi->rtcd.search.refining_search = vp8_refining_search_sadx4;
+ }
#endif
#if HAVE_SSSE3
- if (flags & HAS_SSSE3)
- {
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
-
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
- }
+ if (flags & HAS_SSSE3) {
+ cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
+ cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
+
+ cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
+ cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
+ }
#endif
#if HAVE_SSE4_1
- if (flags & HAS_SSE4_1)
- {
- cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_sse4;
- cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_sse4;
- cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_sse4;
- cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
- cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
- cpi->rtcd.search.full_search = vp8_full_search_sadx8;
- }
+ if (flags & HAS_SSE4_1) {
+ cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_sse4;
+ cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_sse4;
+ cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_sse4;
+ cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
+ cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
+ cpi->rtcd.search.full_search = vp8_full_search_sadx8;
+ }
#endif
#endif