diff options
author | Johann <johannkoenig@google.com> | 2015-06-05 09:54:19 -0700 |
---|---|---|
committer | Johann <johannkoenig@google.com> | 2015-07-07 15:51:04 -0700 |
commit | 6a82f0d7fb9ee908c389e8d55444bbaed3d54e9c (patch) | |
tree | 5b346f932d7256defc451958f474a33cd8b51205 /test | |
parent | 155b9416b36d9708b18f22ef2bc396fba264f513 (diff) | |
download | libvpx-6a82f0d7fb9ee908c389e8d55444bbaed3d54e9c.tar.gz |
Move sub pixel variance to vpx_dsp
Change-Id: I66bf6720c396c89aa2d1fd26d5d52bf5d5e3dff1
Diffstat (limited to 'test')
-rw-r--r-- | test/variance_test.cc | 1614 |
1 files changed, 756 insertions, 858 deletions
diff --git a/test/variance_test.cc b/test/variance_test.cc index c9dbcd469..64095bc03 100644 --- a/test/variance_test.cc +++ b/test/variance_test.cc @@ -21,13 +21,6 @@ #include "vpx/vpx_integer.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" -#if CONFIG_VP8_ENCODER -# include "./vp8_rtcd.h" -#endif // CONFIG_VP8_ENCODER -#if CONFIG_VP9_ENCODER -# include "./vp9_rtcd.h" -# include "vp9/encoder/vp9_variance.h" -#endif // CONFIG_VP9_ENCODER #include "./vpx_dsp_rtcd.h" namespace { @@ -39,8 +32,15 @@ typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, unsigned int *sse); +typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride, + int xoffset, int yoffset, + const uint8_t *b, int b_stride, + uint32_t *sse, + const uint8_t *second_pred); typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride); +typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src); + using ::std::tr1::get; using ::std::tr1::make_tuple; @@ -166,8 +166,6 @@ static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src, (l2w + l2h))); } -typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src); - class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> { public: SumOfSquaresTest() : func_(GetParam()) {} @@ -687,9 +685,8 @@ void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() { } } -#if CONFIG_VP9_ENCODER template<> -void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() { +void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() { for (int x = 0; x < 8; ++x) { for (int y = 0; y < 8; ++y) { if (!use_high_bit_depth_) { @@ -726,11 +723,12 @@ void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() { } } } -#endif // CONFIG_VP9_ENCODER typedef MseTest<Get4x4SseFunc> VpxSseTest; typedef MseTest<VarianceMxNFunc> VpxMseTest; typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest; +typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest; +typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest; TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); } TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); } @@ -742,6 +740,9 @@ TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); } TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); } TEST_P(SumOfSquaresTest, Const) { ConstTest(); } TEST_P(SumOfSquaresTest, Ref) { RefTest(); } +TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); } +TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); } +TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); } INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest, ::testing::Values(vpx_get_mb_ss_c)); @@ -773,7 +774,6 @@ const VarianceMxNFunc variance8x8_c = vpx_variance8x8_c; const VarianceMxNFunc variance8x4_c = vpx_variance8x4_c; const VarianceMxNFunc variance4x8_c = vpx_variance4x8_c; const VarianceMxNFunc variance4x4_c = vpx_variance4x4_c; - INSTANTIATE_TEST_CASE_P( C, VpxVarianceTest, ::testing::Values(make_tuple(6, 6, variance64x64_c, 0), @@ -790,9 +790,79 @@ INSTANTIATE_TEST_CASE_P( make_tuple(2, 3, variance4x8_c, 0), make_tuple(2, 2, variance4x4_c, 0))); +const SubpixVarMxNFunc subpel_var64x64_c = vpx_sub_pixel_variance64x64_c; +const SubpixVarMxNFunc subpel_var64x32_c = vpx_sub_pixel_variance64x32_c; +const SubpixVarMxNFunc subpel_var32x64_c = vpx_sub_pixel_variance32x64_c; +const SubpixVarMxNFunc subpel_var32x32_c = vpx_sub_pixel_variance32x32_c; +const SubpixVarMxNFunc subpel_var32x16_c = vpx_sub_pixel_variance32x16_c; +const SubpixVarMxNFunc subpel_var16x32_c = vpx_sub_pixel_variance16x32_c; +const SubpixVarMxNFunc subpel_var16x16_c = vpx_sub_pixel_variance16x16_c; +const SubpixVarMxNFunc subpel_var16x8_c = vpx_sub_pixel_variance16x8_c; +const SubpixVarMxNFunc subpel_var8x16_c = vpx_sub_pixel_variance8x16_c; +const SubpixVarMxNFunc subpel_var8x8_c = vpx_sub_pixel_variance8x8_c; +const SubpixVarMxNFunc subpel_var8x4_c = vpx_sub_pixel_variance8x4_c; +const SubpixVarMxNFunc subpel_var4x8_c = vpx_sub_pixel_variance4x8_c; +const SubpixVarMxNFunc subpel_var4x4_c = vpx_sub_pixel_variance4x4_c; +INSTANTIATE_TEST_CASE_P( + C, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_var64x64_c, 0), + make_tuple(6, 5, subpel_var64x32_c, 0), + make_tuple(5, 6, subpel_var32x64_c, 0), + make_tuple(5, 5, subpel_var32x32_c, 0), + make_tuple(5, 4, subpel_var32x16_c, 0), + make_tuple(4, 5, subpel_var16x32_c, 0), + make_tuple(4, 4, subpel_var16x16_c, 0), + make_tuple(4, 3, subpel_var16x8_c, 0), + make_tuple(3, 4, subpel_var8x16_c, 0), + make_tuple(3, 3, subpel_var8x8_c, 0), + make_tuple(3, 2, subpel_var8x4_c, 0), + make_tuple(2, 3, subpel_var4x8_c, 0), + make_tuple(2, 2, subpel_var4x4_c, 0))); + +const SubpixAvgVarMxNFunc subpel_avg_var64x64_c = + vpx_sub_pixel_avg_variance64x64_c; +const SubpixAvgVarMxNFunc subpel_avg_var64x32_c = + vpx_sub_pixel_avg_variance64x32_c; +const SubpixAvgVarMxNFunc subpel_avg_var32x64_c = + vpx_sub_pixel_avg_variance32x64_c; +const SubpixAvgVarMxNFunc subpel_avg_var32x32_c = + vpx_sub_pixel_avg_variance32x32_c; +const SubpixAvgVarMxNFunc subpel_avg_var32x16_c = + vpx_sub_pixel_avg_variance32x16_c; +const SubpixAvgVarMxNFunc subpel_avg_var16x32_c = + vpx_sub_pixel_avg_variance16x32_c; +const SubpixAvgVarMxNFunc subpel_avg_var16x16_c = + vpx_sub_pixel_avg_variance16x16_c; +const SubpixAvgVarMxNFunc subpel_avg_var16x8_c = + vpx_sub_pixel_avg_variance16x8_c; +const SubpixAvgVarMxNFunc subpel_avg_var8x16_c = + vpx_sub_pixel_avg_variance8x16_c; +const SubpixAvgVarMxNFunc subpel_avg_var8x8_c = vpx_sub_pixel_avg_variance8x8_c; +const SubpixAvgVarMxNFunc subpel_avg_var8x4_c = vpx_sub_pixel_avg_variance8x4_c; +const SubpixAvgVarMxNFunc subpel_avg_var4x8_c = vpx_sub_pixel_avg_variance4x8_c; +const SubpixAvgVarMxNFunc subpel_avg_var4x4_c = vpx_sub_pixel_avg_variance4x4_c; +INSTANTIATE_TEST_CASE_P( + C, VpxSubpelAvgVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_avg_var64x64_c, 0), + make_tuple(6, 5, subpel_avg_var64x32_c, 0), + make_tuple(5, 6, subpel_avg_var32x64_c, 0), + make_tuple(5, 5, subpel_avg_var32x32_c, 0), + make_tuple(5, 4, subpel_avg_var32x16_c, 0), + make_tuple(4, 5, subpel_avg_var16x32_c, 0), + make_tuple(4, 4, subpel_avg_var16x16_c, 0), + make_tuple(4, 3, subpel_avg_var16x8_c, 0), + make_tuple(3, 4, subpel_avg_var8x16_c, 0), + make_tuple(3, 3, subpel_avg_var8x8_c, 0), + make_tuple(3, 2, subpel_avg_var8x4_c, 0), + make_tuple(2, 3, subpel_avg_var4x8_c, 0), + make_tuple(2, 2, subpel_avg_var4x4_c, 0))); + #if CONFIG_VP9_HIGHBITDEPTH typedef MseTest<VarianceMxNFunc> VpxHBDMseTest; typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest; +typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest; +typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> + VpxHBDSubpelAvgVarianceTest; TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); } TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); } @@ -800,6 +870,9 @@ TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); } TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); } TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); } TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); } +TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); } +TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); } +TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); } /* TODO(debargha): This test does not support the highbd version const VarianceMxNFunc highbd_12_mse16x16_c = vpx_highbd_12_mse16x16_c; @@ -844,7 +917,6 @@ const VarianceMxNFunc highbd_12_variance8x8_c = vpx_highbd_12_variance8x8_c; const VarianceMxNFunc highbd_12_variance8x4_c = vpx_highbd_12_variance8x4_c; const VarianceMxNFunc highbd_12_variance4x8_c = vpx_highbd_12_variance4x8_c; const VarianceMxNFunc highbd_12_variance4x4_c = vpx_highbd_12_variance4x4_c; - const VarianceMxNFunc highbd_10_variance64x64_c = vpx_highbd_10_variance64x64_c; const VarianceMxNFunc highbd_10_variance64x32_c = vpx_highbd_10_variance64x32_c; const VarianceMxNFunc highbd_10_variance32x64_c = vpx_highbd_10_variance32x64_c; @@ -858,7 +930,6 @@ const VarianceMxNFunc highbd_10_variance8x8_c = vpx_highbd_10_variance8x8_c; const VarianceMxNFunc highbd_10_variance8x4_c = vpx_highbd_10_variance8x4_c; const VarianceMxNFunc highbd_10_variance4x8_c = vpx_highbd_10_variance4x8_c; const VarianceMxNFunc highbd_10_variance4x4_c = vpx_highbd_10_variance4x4_c; - const VarianceMxNFunc highbd_8_variance64x64_c = vpx_highbd_8_variance64x64_c; const VarianceMxNFunc highbd_8_variance64x32_c = vpx_highbd_8_variance64x32_c; const VarianceMxNFunc highbd_8_variance32x64_c = vpx_highbd_8_variance32x64_c; @@ -913,6 +984,247 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 2, highbd_8_variance8x4_c, 8), make_tuple(2, 3, highbd_8_variance4x8_c, 8), make_tuple(2, 2, highbd_8_variance4x4_c, 8))); + +const SubpixVarMxNFunc highbd_8_subpel_var64x64_c = + vpx_highbd_8_sub_pixel_variance64x64_c; +const SubpixVarMxNFunc highbd_8_subpel_var64x32_c = + vpx_highbd_8_sub_pixel_variance64x32_c; +const SubpixVarMxNFunc highbd_8_subpel_var32x64_c = + vpx_highbd_8_sub_pixel_variance32x64_c; +const SubpixVarMxNFunc highbd_8_subpel_var32x32_c = + vpx_highbd_8_sub_pixel_variance32x32_c; +const SubpixVarMxNFunc highbd_8_subpel_var32x16_c = + vpx_highbd_8_sub_pixel_variance32x16_c; +const SubpixVarMxNFunc highbd_8_subpel_var16x32_c = + vpx_highbd_8_sub_pixel_variance16x32_c; +const SubpixVarMxNFunc highbd_8_subpel_var16x16_c = + vpx_highbd_8_sub_pixel_variance16x16_c; +const SubpixVarMxNFunc highbd_8_subpel_var16x8_c = + vpx_highbd_8_sub_pixel_variance16x8_c; +const SubpixVarMxNFunc highbd_8_subpel_var8x16_c = + vpx_highbd_8_sub_pixel_variance8x16_c; +const SubpixVarMxNFunc highbd_8_subpel_var8x8_c = + vpx_highbd_8_sub_pixel_variance8x8_c; +const SubpixVarMxNFunc highbd_8_subpel_var8x4_c = + vpx_highbd_8_sub_pixel_variance8x4_c; +const SubpixVarMxNFunc highbd_8_subpel_var4x8_c = + vpx_highbd_8_sub_pixel_variance4x8_c; +const SubpixVarMxNFunc highbd_8_subpel_var4x4_c = + vpx_highbd_8_sub_pixel_variance4x4_c; +const SubpixVarMxNFunc highbd_10_subpel_var64x64_c = + vpx_highbd_10_sub_pixel_variance64x64_c; +const SubpixVarMxNFunc highbd_10_subpel_var64x32_c = + vpx_highbd_10_sub_pixel_variance64x32_c; +const SubpixVarMxNFunc highbd_10_subpel_var32x64_c = + vpx_highbd_10_sub_pixel_variance32x64_c; +const SubpixVarMxNFunc highbd_10_subpel_var32x32_c = + vpx_highbd_10_sub_pixel_variance32x32_c; +const SubpixVarMxNFunc highbd_10_subpel_var32x16_c = + vpx_highbd_10_sub_pixel_variance32x16_c; +const SubpixVarMxNFunc highbd_10_subpel_var16x32_c = + vpx_highbd_10_sub_pixel_variance16x32_c; +const SubpixVarMxNFunc highbd_10_subpel_var16x16_c = + vpx_highbd_10_sub_pixel_variance16x16_c; +const SubpixVarMxNFunc highbd_10_subpel_var16x8_c = + vpx_highbd_10_sub_pixel_variance16x8_c; +const SubpixVarMxNFunc highbd_10_subpel_var8x16_c = + vpx_highbd_10_sub_pixel_variance8x16_c; +const SubpixVarMxNFunc highbd_10_subpel_var8x8_c = + vpx_highbd_10_sub_pixel_variance8x8_c; +const SubpixVarMxNFunc highbd_10_subpel_var8x4_c = + vpx_highbd_10_sub_pixel_variance8x4_c; +const SubpixVarMxNFunc highbd_10_subpel_var4x8_c = + vpx_highbd_10_sub_pixel_variance4x8_c; +const SubpixVarMxNFunc highbd_10_subpel_var4x4_c = + vpx_highbd_10_sub_pixel_variance4x4_c; +const SubpixVarMxNFunc highbd_12_subpel_var64x64_c = + vpx_highbd_12_sub_pixel_variance64x64_c; +const SubpixVarMxNFunc highbd_12_subpel_var64x32_c = + vpx_highbd_12_sub_pixel_variance64x32_c; +const SubpixVarMxNFunc highbd_12_subpel_var32x64_c = + vpx_highbd_12_sub_pixel_variance32x64_c; +const SubpixVarMxNFunc highbd_12_subpel_var32x32_c = + vpx_highbd_12_sub_pixel_variance32x32_c; +const SubpixVarMxNFunc highbd_12_subpel_var32x16_c = + vpx_highbd_12_sub_pixel_variance32x16_c; +const SubpixVarMxNFunc highbd_12_subpel_var16x32_c = + vpx_highbd_12_sub_pixel_variance16x32_c; +const SubpixVarMxNFunc highbd_12_subpel_var16x16_c = + vpx_highbd_12_sub_pixel_variance16x16_c; +const SubpixVarMxNFunc highbd_12_subpel_var16x8_c = + vpx_highbd_12_sub_pixel_variance16x8_c; +const SubpixVarMxNFunc highbd_12_subpel_var8x16_c = + vpx_highbd_12_sub_pixel_variance8x16_c; +const SubpixVarMxNFunc highbd_12_subpel_var8x8_c = + vpx_highbd_12_sub_pixel_variance8x8_c; +const SubpixVarMxNFunc highbd_12_subpel_var8x4_c = + vpx_highbd_12_sub_pixel_variance8x4_c; +const SubpixVarMxNFunc highbd_12_subpel_var4x8_c = + vpx_highbd_12_sub_pixel_variance4x8_c; +const SubpixVarMxNFunc highbd_12_subpel_var4x4_c = + vpx_highbd_12_sub_pixel_variance4x4_c; +INSTANTIATE_TEST_CASE_P( + C, VpxHBDSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, highbd_8_subpel_var64x64_c, 8), + make_tuple(6, 5, highbd_8_subpel_var64x32_c, 8), + make_tuple(5, 6, highbd_8_subpel_var32x64_c, 8), + make_tuple(5, 5, highbd_8_subpel_var32x32_c, 8), + make_tuple(5, 4, highbd_8_subpel_var32x16_c, 8), + make_tuple(4, 5, highbd_8_subpel_var16x32_c, 8), + make_tuple(4, 4, highbd_8_subpel_var16x16_c, 8), + make_tuple(4, 3, highbd_8_subpel_var16x8_c, 8), + make_tuple(3, 4, highbd_8_subpel_var8x16_c, 8), + make_tuple(3, 3, highbd_8_subpel_var8x8_c, 8), + make_tuple(3, 2, highbd_8_subpel_var8x4_c, 8), + make_tuple(2, 3, highbd_8_subpel_var4x8_c, 8), + make_tuple(2, 2, highbd_8_subpel_var4x4_c, 8), + make_tuple(6, 6, highbd_10_subpel_var64x64_c, 10), + make_tuple(6, 5, highbd_10_subpel_var64x32_c, 10), + make_tuple(5, 6, highbd_10_subpel_var32x64_c, 10), + make_tuple(5, 5, highbd_10_subpel_var32x32_c, 10), + make_tuple(5, 4, highbd_10_subpel_var32x16_c, 10), + make_tuple(4, 5, highbd_10_subpel_var16x32_c, 10), + make_tuple(4, 4, highbd_10_subpel_var16x16_c, 10), + make_tuple(4, 3, highbd_10_subpel_var16x8_c, 10), + make_tuple(3, 4, highbd_10_subpel_var8x16_c, 10), + make_tuple(3, 3, highbd_10_subpel_var8x8_c, 10), + make_tuple(3, 2, highbd_10_subpel_var8x4_c, 10), + make_tuple(2, 3, highbd_10_subpel_var4x8_c, 10), + make_tuple(2, 2, highbd_10_subpel_var4x4_c, 10), + make_tuple(6, 6, highbd_12_subpel_var64x64_c, 12), + make_tuple(6, 5, highbd_12_subpel_var64x32_c, 12), + make_tuple(5, 6, highbd_12_subpel_var32x64_c, 12), + make_tuple(5, 5, highbd_12_subpel_var32x32_c, 12), + make_tuple(5, 4, highbd_12_subpel_var32x16_c, 12), + make_tuple(4, 5, highbd_12_subpel_var16x32_c, 12), + make_tuple(4, 4, highbd_12_subpel_var16x16_c, 12), + make_tuple(4, 3, highbd_12_subpel_var16x8_c, 12), + make_tuple(3, 4, highbd_12_subpel_var8x16_c, 12), + make_tuple(3, 3, highbd_12_subpel_var8x8_c, 12), + make_tuple(3, 2, highbd_12_subpel_var8x4_c, 12), + make_tuple(2, 3, highbd_12_subpel_var4x8_c, 12), + make_tuple(2, 2, highbd_12_subpel_var4x4_c, 12))); + +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x64_c = + vpx_highbd_8_sub_pixel_avg_variance64x64_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var64x32_c = + vpx_highbd_8_sub_pixel_avg_variance64x32_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x64_c = + vpx_highbd_8_sub_pixel_avg_variance32x64_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x32_c = + vpx_highbd_8_sub_pixel_avg_variance32x32_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var32x16_c = + vpx_highbd_8_sub_pixel_avg_variance32x16_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x32_c = + vpx_highbd_8_sub_pixel_avg_variance16x32_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x16_c = + vpx_highbd_8_sub_pixel_avg_variance16x16_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var16x8_c = + vpx_highbd_8_sub_pixel_avg_variance16x8_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x16_c = + vpx_highbd_8_sub_pixel_avg_variance8x16_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x8_c = + vpx_highbd_8_sub_pixel_avg_variance8x8_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var8x4_c = + vpx_highbd_8_sub_pixel_avg_variance8x4_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x8_c = + vpx_highbd_8_sub_pixel_avg_variance4x8_c; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_var4x4_c = + vpx_highbd_8_sub_pixel_avg_variance4x4_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x64_c = + vpx_highbd_10_sub_pixel_avg_variance64x64_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var64x32_c = + vpx_highbd_10_sub_pixel_avg_variance64x32_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x64_c = + vpx_highbd_10_sub_pixel_avg_variance32x64_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x32_c = + vpx_highbd_10_sub_pixel_avg_variance32x32_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var32x16_c = + vpx_highbd_10_sub_pixel_avg_variance32x16_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x32_c = + vpx_highbd_10_sub_pixel_avg_variance16x32_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x16_c = + vpx_highbd_10_sub_pixel_avg_variance16x16_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var16x8_c = + vpx_highbd_10_sub_pixel_avg_variance16x8_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x16_c = + vpx_highbd_10_sub_pixel_avg_variance8x16_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x8_c = + vpx_highbd_10_sub_pixel_avg_variance8x8_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var8x4_c = + vpx_highbd_10_sub_pixel_avg_variance8x4_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x8_c = + vpx_highbd_10_sub_pixel_avg_variance4x8_c; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_var4x4_c = + vpx_highbd_10_sub_pixel_avg_variance4x4_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x64_c = + vpx_highbd_12_sub_pixel_avg_variance64x64_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var64x32_c = + vpx_highbd_12_sub_pixel_avg_variance64x32_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x64_c = + vpx_highbd_12_sub_pixel_avg_variance32x64_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x32_c = + vpx_highbd_12_sub_pixel_avg_variance32x32_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var32x16_c = + vpx_highbd_12_sub_pixel_avg_variance32x16_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x32_c = + vpx_highbd_12_sub_pixel_avg_variance16x32_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x16_c = + vpx_highbd_12_sub_pixel_avg_variance16x16_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var16x8_c = + vpx_highbd_12_sub_pixel_avg_variance16x8_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x16_c = + vpx_highbd_12_sub_pixel_avg_variance8x16_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x8_c = + vpx_highbd_12_sub_pixel_avg_variance8x8_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var8x4_c = + vpx_highbd_12_sub_pixel_avg_variance8x4_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x8_c = + vpx_highbd_12_sub_pixel_avg_variance4x8_c; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_var4x4_c = + vpx_highbd_12_sub_pixel_avg_variance4x4_c; +INSTANTIATE_TEST_CASE_P( + C, VpxHBDSubpelAvgVarianceTest, + ::testing::Values( + make_tuple(6, 6, highbd_8_subpel_avg_var64x64_c, 8), + make_tuple(6, 5, highbd_8_subpel_avg_var64x32_c, 8), + make_tuple(5, 6, highbd_8_subpel_avg_var32x64_c, 8), + make_tuple(5, 5, highbd_8_subpel_avg_var32x32_c, 8), + make_tuple(5, 4, highbd_8_subpel_avg_var32x16_c, 8), + make_tuple(4, 5, highbd_8_subpel_avg_var16x32_c, 8), + make_tuple(4, 4, highbd_8_subpel_avg_var16x16_c, 8), + make_tuple(4, 3, highbd_8_subpel_avg_var16x8_c, 8), + make_tuple(3, 4, highbd_8_subpel_avg_var8x16_c, 8), + make_tuple(3, 3, highbd_8_subpel_avg_var8x8_c, 8), + make_tuple(3, 2, highbd_8_subpel_avg_var8x4_c, 8), + make_tuple(2, 3, highbd_8_subpel_avg_var4x8_c, 8), + make_tuple(2, 2, highbd_8_subpel_avg_var4x4_c, 8), + make_tuple(6, 6, highbd_10_subpel_avg_var64x64_c, 10), + make_tuple(6, 5, highbd_10_subpel_avg_var64x32_c, 10), + make_tuple(5, 6, highbd_10_subpel_avg_var32x64_c, 10), + make_tuple(5, 5, highbd_10_subpel_avg_var32x32_c, 10), + make_tuple(5, 4, highbd_10_subpel_avg_var32x16_c, 10), + make_tuple(4, 5, highbd_10_subpel_avg_var16x32_c, 10), + make_tuple(4, 4, highbd_10_subpel_avg_var16x16_c, 10), + make_tuple(4, 3, highbd_10_subpel_avg_var16x8_c, 10), + make_tuple(3, 4, highbd_10_subpel_avg_var8x16_c, 10), + make_tuple(3, 3, highbd_10_subpel_avg_var8x8_c, 10), + make_tuple(3, 2, highbd_10_subpel_avg_var8x4_c, 10), + make_tuple(2, 3, highbd_10_subpel_avg_var4x8_c, 10), + make_tuple(2, 2, highbd_10_subpel_avg_var4x4_c, 10), + make_tuple(6, 6, highbd_12_subpel_avg_var64x64_c, 12), + make_tuple(6, 5, highbd_12_subpel_avg_var64x32_c, 12), + make_tuple(5, 6, highbd_12_subpel_avg_var32x64_c, 12), + make_tuple(5, 5, highbd_12_subpel_avg_var32x32_c, 12), + make_tuple(5, 4, highbd_12_subpel_avg_var32x16_c, 12), + make_tuple(4, 5, highbd_12_subpel_avg_var16x32_c, 12), + make_tuple(4, 4, highbd_12_subpel_avg_var16x16_c, 12), + make_tuple(4, 3, highbd_12_subpel_avg_var16x8_c, 12), + make_tuple(3, 4, highbd_12_subpel_avg_var8x16_c, 12), + make_tuple(3, 3, highbd_12_subpel_avg_var8x8_c, 12), + make_tuple(3, 2, highbd_12_subpel_avg_var8x4_c, 12), + make_tuple(2, 3, highbd_12_subpel_avg_var4x8_c, 12), + make_tuple(2, 2, highbd_12_subpel_avg_var4x4_c, 12))); #endif // CONFIG_VP9_HIGHBITDEPTH #if HAVE_MMX @@ -935,6 +1247,19 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 4, variance8x16_mmx, 0), make_tuple(3, 3, variance8x8_mmx, 0), make_tuple(2, 2, variance4x4_mmx, 0))); + +const SubpixVarMxNFunc subpel_var16x16_mmx = vpx_sub_pixel_variance16x16_mmx; +const SubpixVarMxNFunc subpel_var16x8_mmx = vpx_sub_pixel_variance16x8_mmx; +const SubpixVarMxNFunc subpel_var8x16_mmx = vpx_sub_pixel_variance8x16_mmx; +const SubpixVarMxNFunc subpel_var8x8_mmx = vpx_sub_pixel_variance8x8_mmx; +const SubpixVarMxNFunc subpel_var4x4_mmx = vpx_sub_pixel_variance4x4_mmx; +INSTANTIATE_TEST_CASE_P( + MMX, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(4, 4, subpel_var16x16_mmx, 0), + make_tuple(4, 3, subpel_var16x8_mmx, 0), + make_tuple(3, 4, subpel_var8x16_mmx, 0), + make_tuple(3, 3, subpel_var8x8_mmx, 0), + make_tuple(2, 2, subpel_var4x4_mmx, 0))); #endif // HAVE_MMX #if HAVE_SSE2 @@ -979,6 +1304,90 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 2, variance8x4_sse2, 0), make_tuple(2, 3, variance4x8_sse2, 0), make_tuple(2, 2, variance4x4_sse2, 0))); + +#if CONFIG_USE_X86INC +const SubpixVarMxNFunc subpel_variance64x64_sse2 = + vpx_sub_pixel_variance64x64_sse2; +const SubpixVarMxNFunc subpel_variance64x32_sse2 = + vpx_sub_pixel_variance64x32_sse2; +const SubpixVarMxNFunc subpel_variance32x64_sse2 = + vpx_sub_pixel_variance32x64_sse2; +const SubpixVarMxNFunc subpel_variance32x32_sse2 = + vpx_sub_pixel_variance32x32_sse2; +const SubpixVarMxNFunc subpel_variance32x16_sse2 = + vpx_sub_pixel_variance32x16_sse2; +const SubpixVarMxNFunc subpel_variance16x32_sse2 = + vpx_sub_pixel_variance16x32_sse2; +const SubpixVarMxNFunc subpel_variance16x16_sse2 = + vpx_sub_pixel_variance16x16_sse2; +const SubpixVarMxNFunc subpel_variance16x8_sse2 = + vpx_sub_pixel_variance16x8_sse2; +const SubpixVarMxNFunc subpel_variance8x16_sse2 = + vpx_sub_pixel_variance8x16_sse2; +const SubpixVarMxNFunc subpel_variance8x8_sse2 = vpx_sub_pixel_variance8x8_sse2; +const SubpixVarMxNFunc subpel_variance8x4_sse2 = vpx_sub_pixel_variance8x4_sse2; +const SubpixVarMxNFunc subpel_variance4x8_sse = vpx_sub_pixel_variance4x8_sse; +const SubpixVarMxNFunc subpel_variance4x4_sse = vpx_sub_pixel_variance4x4_sse; +INSTANTIATE_TEST_CASE_P( + SSE2, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_variance64x64_sse2, 0), + make_tuple(6, 5, subpel_variance64x32_sse2, 0), + make_tuple(5, 6, subpel_variance32x64_sse2, 0), + make_tuple(5, 5, subpel_variance32x32_sse2, 0), + make_tuple(5, 4, subpel_variance32x16_sse2, 0), + make_tuple(4, 5, subpel_variance16x32_sse2, 0), + make_tuple(4, 4, subpel_variance16x16_sse2, 0), + make_tuple(4, 3, subpel_variance16x8_sse2, 0), + make_tuple(3, 4, subpel_variance8x16_sse2, 0), + make_tuple(3, 3, subpel_variance8x8_sse2, 0), + make_tuple(3, 2, subpel_variance8x4_sse2, 0), + make_tuple(2, 3, subpel_variance4x8_sse, 0), + make_tuple(2, 2, subpel_variance4x4_sse, 0))); + +const SubpixAvgVarMxNFunc subpel_avg_variance64x64_sse2 = + vpx_sub_pixel_avg_variance64x64_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance64x32_sse2 = + vpx_sub_pixel_avg_variance64x32_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance32x64_sse2 = + vpx_sub_pixel_avg_variance32x64_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance32x32_sse2 = + vpx_sub_pixel_avg_variance32x32_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance32x16_sse2 = + vpx_sub_pixel_avg_variance32x16_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance16x32_sse2 = + vpx_sub_pixel_avg_variance16x32_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance16x16_sse2 = + vpx_sub_pixel_avg_variance16x16_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance16x8_sse2 = + vpx_sub_pixel_avg_variance16x8_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance8x16_sse2 = + vpx_sub_pixel_avg_variance8x16_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance8x8_sse2 = + vpx_sub_pixel_avg_variance8x8_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance8x4_sse2 = + vpx_sub_pixel_avg_variance8x4_sse2; +const SubpixAvgVarMxNFunc subpel_avg_variance4x8_sse = + vpx_sub_pixel_avg_variance4x8_sse; +const SubpixAvgVarMxNFunc subpel_avg_variance4x4_sse = + vpx_sub_pixel_avg_variance4x4_sse; +INSTANTIATE_TEST_CASE_P( + SSE2, VpxSubpelAvgVarianceTest, + ::testing::Values( + make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0), + make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0), + make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0), + make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0), + make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0), + make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0), + make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0), + make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0), + make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0), + make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0), + make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0), + make_tuple(2, 3, subpel_avg_variance4x8_sse, 0), + make_tuple(2, 2, subpel_avg_variance4x4_sse, 0))); +#endif // CONFIG_USE_X86INC + #if CONFIG_VP9_HIGHBITDEPTH /* TODO(debargha): This test does not support the highbd version const VarianceMxNFunc highbd_12_mse16x16_sse2 = vpx_highbd_12_mse16x16_sse2; @@ -1103,794 +1512,303 @@ INSTANTIATE_TEST_CASE_P( make_tuple(4, 3, highbd_8_variance16x8_sse2, 8), make_tuple(3, 4, highbd_8_variance8x16_sse2, 8), make_tuple(3, 3, highbd_8_variance8x8_sse2, 8))); -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif // HAVE_SSE2 - -#if CONFIG_VP8_ENCODER -typedef SubpelVarianceTest<SubpixVarMxNFunc> VP8SubpelVarianceTest; - -TEST_P(VP8SubpelVarianceTest, Ref) { RefTest(); } -TEST_P(VP8SubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); } -#endif // CONFIG_VP8_ENCODER - -#if CONFIG_VP9_ENCODER -typedef SubpelVarianceTest<SubpixVarMxNFunc> VP9SubpelVarianceTest; -typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest; - -TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); } -TEST_P(VP9SubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); } -TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); } - -#if CONFIG_VP9_HIGHBITDEPTH -typedef SubpelVarianceTest<SubpixVarMxNFunc> VP9SubpelVarianceHighTest; -typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> - VP9SubpelAvgVarianceHighTest; - -TEST_P(VP9SubpelVarianceHighTest, Ref) { RefTest(); } -TEST_P(VP9SubpelVarianceHighTest, ExtremeRef) { ExtremeRefTest(); } -TEST_P(VP9SubpelAvgVarianceHighTest, Ref) { RefTest(); } -#endif // CONFIG_VP9_HIGHBITDEPTH - -const SubpixVarMxNFunc subpel_variance4x4_c = vp9_sub_pixel_variance4x4_c; -const SubpixVarMxNFunc subpel_variance4x8_c = vp9_sub_pixel_variance4x8_c; -const SubpixVarMxNFunc subpel_variance8x4_c = vp9_sub_pixel_variance8x4_c; -const SubpixVarMxNFunc subpel_variance8x8_c = vp9_sub_pixel_variance8x8_c; -const SubpixVarMxNFunc subpel_variance8x16_c = vp9_sub_pixel_variance8x16_c; -const SubpixVarMxNFunc subpel_variance16x8_c = vp9_sub_pixel_variance16x8_c; -const SubpixVarMxNFunc subpel_variance16x16_c = vp9_sub_pixel_variance16x16_c; -const SubpixVarMxNFunc subpel_variance16x32_c = vp9_sub_pixel_variance16x32_c; -const SubpixVarMxNFunc subpel_variance32x16_c = vp9_sub_pixel_variance32x16_c; -const SubpixVarMxNFunc subpel_variance32x32_c = vp9_sub_pixel_variance32x32_c; -const SubpixVarMxNFunc subpel_variance32x64_c = vp9_sub_pixel_variance32x64_c; -const SubpixVarMxNFunc subpel_variance64x32_c = vp9_sub_pixel_variance64x32_c; -const SubpixVarMxNFunc subpel_variance64x64_c = vp9_sub_pixel_variance64x64_c; -INSTANTIATE_TEST_CASE_P( - C, VP9SubpelVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c, 0), - make_tuple(2, 3, subpel_variance4x8_c, 0), - make_tuple(3, 2, subpel_variance8x4_c, 0), - make_tuple(3, 3, subpel_variance8x8_c, 0), - make_tuple(3, 4, subpel_variance8x16_c, 0), - make_tuple(4, 3, subpel_variance16x8_c, 0), - make_tuple(4, 4, subpel_variance16x16_c, 0), - make_tuple(4, 5, subpel_variance16x32_c, 0), - make_tuple(5, 4, subpel_variance32x16_c, 0), - make_tuple(5, 5, subpel_variance32x32_c, 0), - make_tuple(5, 6, subpel_variance32x64_c, 0), - make_tuple(6, 5, subpel_variance64x32_c, 0), - make_tuple(6, 6, subpel_variance64x64_c, 0))); - -#if CONFIG_VP8_ENCODER -const SubpixVarMxNFunc vp8_subpel_variance16x16_c = - vp8_sub_pixel_variance16x16_c; -const SubpixVarMxNFunc vp8_subpel_variance16x8_c = vp8_sub_pixel_variance16x8_c; -const SubpixVarMxNFunc vp8_subpel_variance8x16_c = vp8_sub_pixel_variance8x16_c; -const SubpixVarMxNFunc vp8_subpel_variance8x8_c = vp8_sub_pixel_variance8x8_c; -const SubpixVarMxNFunc vp8_subpel_variance4x4_c = vp8_sub_pixel_variance4x4_c; -INSTANTIATE_TEST_CASE_P( - C, VP8SubpelVarianceTest, - ::testing::Values(make_tuple(2, 2, vp8_subpel_variance4x4_c, 0), - make_tuple(3, 3, vp8_subpel_variance8x8_c, 0), - make_tuple(3, 4, vp8_subpel_variance8x16_c, 0), - make_tuple(4, 3, vp8_subpel_variance16x8_c, 0), - make_tuple(4, 4, vp8_subpel_variance16x16_c, 0))); -#endif // CONFIG_VP8_ENCODER - -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c = - vp9_sub_pixel_avg_variance4x4_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c = - vp9_sub_pixel_avg_variance4x8_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c = - vp9_sub_pixel_avg_variance8x4_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c = - vp9_sub_pixel_avg_variance8x8_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c = - vp9_sub_pixel_avg_variance8x16_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c = - vp9_sub_pixel_avg_variance16x8_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c = - vp9_sub_pixel_avg_variance16x16_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c = - vp9_sub_pixel_avg_variance16x32_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c = - vp9_sub_pixel_avg_variance32x16_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c = - vp9_sub_pixel_avg_variance32x32_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c = - vp9_sub_pixel_avg_variance32x64_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c = - vp9_sub_pixel_avg_variance64x32_c; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c = - vp9_sub_pixel_avg_variance64x64_c; -INSTANTIATE_TEST_CASE_P( - C, VP9SubpelAvgVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c, 0), - make_tuple(2, 3, subpel_avg_variance4x8_c, 0), - make_tuple(3, 2, subpel_avg_variance8x4_c, 0), - make_tuple(3, 3, subpel_avg_variance8x8_c, 0), - make_tuple(3, 4, subpel_avg_variance8x16_c, 0), - make_tuple(4, 3, subpel_avg_variance16x8_c, 0), - make_tuple(4, 4, subpel_avg_variance16x16_c, 0), - make_tuple(4, 5, subpel_avg_variance16x32_c, 0), - make_tuple(5, 4, subpel_avg_variance32x16_c, 0), - make_tuple(5, 5, subpel_avg_variance32x32_c, 0), - make_tuple(5, 6, subpel_avg_variance32x64_c, 0), - make_tuple(6, 5, subpel_avg_variance64x32_c, 0), - make_tuple(6, 6, subpel_avg_variance64x64_c, 0))); -#if CONFIG_VP9_HIGHBITDEPTH -const SubpixVarMxNFunc highbd_10_subpel_variance4x4_c = - vp9_highbd_10_sub_pixel_variance4x4_c; -const SubpixVarMxNFunc highbd_10_subpel_variance4x8_c = - vp9_highbd_10_sub_pixel_variance4x8_c; -const SubpixVarMxNFunc highbd_10_subpel_variance8x4_c = - vp9_highbd_10_sub_pixel_variance8x4_c; -const SubpixVarMxNFunc highbd_10_subpel_variance8x8_c = - vp9_highbd_10_sub_pixel_variance8x8_c; -const SubpixVarMxNFunc highbd_10_subpel_variance8x16_c = - vp9_highbd_10_sub_pixel_variance8x16_c; -const SubpixVarMxNFunc highbd_10_subpel_variance16x8_c = - vp9_highbd_10_sub_pixel_variance16x8_c; -const SubpixVarMxNFunc highbd_10_subpel_variance16x16_c = - vp9_highbd_10_sub_pixel_variance16x16_c; -const SubpixVarMxNFunc highbd_10_subpel_variance16x32_c = - vp9_highbd_10_sub_pixel_variance16x32_c; -const SubpixVarMxNFunc highbd_10_subpel_variance32x16_c = - vp9_highbd_10_sub_pixel_variance32x16_c; -const SubpixVarMxNFunc highbd_10_subpel_variance32x32_c = - vp9_highbd_10_sub_pixel_variance32x32_c; -const SubpixVarMxNFunc highbd_10_subpel_variance32x64_c = - vp9_highbd_10_sub_pixel_variance32x64_c; -const SubpixVarMxNFunc highbd_10_subpel_variance64x32_c = - vp9_highbd_10_sub_pixel_variance64x32_c; -const SubpixVarMxNFunc highbd_10_subpel_variance64x64_c = - vp9_highbd_10_sub_pixel_variance64x64_c; -const SubpixVarMxNFunc highbd_12_subpel_variance4x4_c = - vp9_highbd_12_sub_pixel_variance4x4_c; -const SubpixVarMxNFunc highbd_12_subpel_variance4x8_c = - vp9_highbd_12_sub_pixel_variance4x8_c; -const SubpixVarMxNFunc highbd_12_subpel_variance8x4_c = - vp9_highbd_12_sub_pixel_variance8x4_c; -const SubpixVarMxNFunc highbd_12_subpel_variance8x8_c = - vp9_highbd_12_sub_pixel_variance8x8_c; -const SubpixVarMxNFunc highbd_12_subpel_variance8x16_c = - vp9_highbd_12_sub_pixel_variance8x16_c; -const SubpixVarMxNFunc highbd_12_subpel_variance16x8_c = - vp9_highbd_12_sub_pixel_variance16x8_c; -const SubpixVarMxNFunc highbd_12_subpel_variance16x16_c = - vp9_highbd_12_sub_pixel_variance16x16_c; -const SubpixVarMxNFunc highbd_12_subpel_variance16x32_c = - vp9_highbd_12_sub_pixel_variance16x32_c; -const SubpixVarMxNFunc highbd_12_subpel_variance32x16_c = - vp9_highbd_12_sub_pixel_variance32x16_c; -const SubpixVarMxNFunc highbd_12_subpel_variance32x32_c = - vp9_highbd_12_sub_pixel_variance32x32_c; -const SubpixVarMxNFunc highbd_12_subpel_variance32x64_c = - vp9_highbd_12_sub_pixel_variance32x64_c; -const SubpixVarMxNFunc highbd_12_subpel_variance64x32_c = - vp9_highbd_12_sub_pixel_variance64x32_c; -const SubpixVarMxNFunc highbd_12_subpel_variance64x64_c = - vp9_highbd_12_sub_pixel_variance64x64_c; -const SubpixVarMxNFunc highbd_subpel_variance4x4_c = - vp9_highbd_sub_pixel_variance4x4_c; -const SubpixVarMxNFunc highbd_subpel_variance4x8_c = - vp9_highbd_sub_pixel_variance4x8_c; -const SubpixVarMxNFunc highbd_subpel_variance8x4_c = - vp9_highbd_sub_pixel_variance8x4_c; -const SubpixVarMxNFunc highbd_subpel_variance8x8_c = - vp9_highbd_sub_pixel_variance8x8_c; -const SubpixVarMxNFunc highbd_subpel_variance8x16_c = - vp9_highbd_sub_pixel_variance8x16_c; -const SubpixVarMxNFunc highbd_subpel_variance16x8_c = - vp9_highbd_sub_pixel_variance16x8_c; -const SubpixVarMxNFunc highbd_subpel_variance16x16_c = - vp9_highbd_sub_pixel_variance16x16_c; -const SubpixVarMxNFunc highbd_subpel_variance16x32_c = - vp9_highbd_sub_pixel_variance16x32_c; -const SubpixVarMxNFunc highbd_subpel_variance32x16_c = - vp9_highbd_sub_pixel_variance32x16_c; -const SubpixVarMxNFunc highbd_subpel_variance32x32_c = - vp9_highbd_sub_pixel_variance32x32_c; -const SubpixVarMxNFunc highbd_subpel_variance32x64_c = - vp9_highbd_sub_pixel_variance32x64_c; -const SubpixVarMxNFunc highbd_subpel_variance64x32_c = - vp9_highbd_sub_pixel_variance64x32_c; -const SubpixVarMxNFunc highbd_subpel_variance64x64_c = - vp9_highbd_sub_pixel_variance64x64_c; -INSTANTIATE_TEST_CASE_P( - C, VP9SubpelVarianceHighTest, - ::testing::Values(make_tuple(2, 2, highbd_10_subpel_variance4x4_c, 10), - make_tuple(2, 3, highbd_10_subpel_variance4x8_c, 10), - make_tuple(3, 2, highbd_10_subpel_variance8x4_c, 10), - make_tuple(3, 3, highbd_10_subpel_variance8x8_c, 10), - make_tuple(3, 4, highbd_10_subpel_variance8x16_c, 10), - make_tuple(4, 3, highbd_10_subpel_variance16x8_c, 10), - make_tuple(4, 4, highbd_10_subpel_variance16x16_c, 10), - make_tuple(4, 5, highbd_10_subpel_variance16x32_c, 10), - make_tuple(5, 4, highbd_10_subpel_variance32x16_c, 10), - make_tuple(5, 5, highbd_10_subpel_variance32x32_c, 10), - make_tuple(5, 6, highbd_10_subpel_variance32x64_c, 10), - make_tuple(6, 5, highbd_10_subpel_variance64x32_c, 10), - make_tuple(6, 6, highbd_10_subpel_variance64x64_c, 10), - make_tuple(2, 2, highbd_12_subpel_variance4x4_c, 12), - make_tuple(2, 3, highbd_12_subpel_variance4x8_c, 12), - make_tuple(3, 2, highbd_12_subpel_variance8x4_c, 12), - make_tuple(3, 3, highbd_12_subpel_variance8x8_c, 12), - make_tuple(3, 4, highbd_12_subpel_variance8x16_c, 12), - make_tuple(4, 3, highbd_12_subpel_variance16x8_c, 12), - make_tuple(4, 4, highbd_12_subpel_variance16x16_c, 12), - make_tuple(4, 5, highbd_12_subpel_variance16x32_c, 12), - make_tuple(5, 4, highbd_12_subpel_variance32x16_c, 12), - make_tuple(5, 5, highbd_12_subpel_variance32x32_c, 12), - make_tuple(5, 6, highbd_12_subpel_variance32x64_c, 12), - make_tuple(6, 5, highbd_12_subpel_variance64x32_c, 12), - make_tuple(6, 6, highbd_12_subpel_variance64x64_c, 12), - make_tuple(2, 2, highbd_subpel_variance4x4_c, 8), - make_tuple(2, 3, highbd_subpel_variance4x8_c, 8), - make_tuple(3, 2, highbd_subpel_variance8x4_c, 8), - make_tuple(3, 3, highbd_subpel_variance8x8_c, 8), - make_tuple(3, 4, highbd_subpel_variance8x16_c, 8), - make_tuple(4, 3, highbd_subpel_variance16x8_c, 8), - make_tuple(4, 4, highbd_subpel_variance16x16_c, 8), - make_tuple(4, 5, highbd_subpel_variance16x32_c, 8), - make_tuple(5, 4, highbd_subpel_variance32x16_c, 8), - make_tuple(5, 5, highbd_subpel_variance32x32_c, 8), - make_tuple(5, 6, highbd_subpel_variance32x64_c, 8), - make_tuple(6, 5, highbd_subpel_variance64x32_c, 8), - make_tuple(6, 6, highbd_subpel_variance64x64_c, 8))); -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance4x4_c = - vp9_highbd_10_sub_pixel_avg_variance4x4_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance4x8_c = - vp9_highbd_10_sub_pixel_avg_variance4x8_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x4_c = - vp9_highbd_10_sub_pixel_avg_variance8x4_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x8_c = - vp9_highbd_10_sub_pixel_avg_variance8x8_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x16_c = - vp9_highbd_10_sub_pixel_avg_variance8x16_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x8_c = - vp9_highbd_10_sub_pixel_avg_variance16x8_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x16_c = - vp9_highbd_10_sub_pixel_avg_variance16x16_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x32_c = - vp9_highbd_10_sub_pixel_avg_variance16x32_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x16_c = - vp9_highbd_10_sub_pixel_avg_variance32x16_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x32_c = - vp9_highbd_10_sub_pixel_avg_variance32x32_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x64_c = - vp9_highbd_10_sub_pixel_avg_variance32x64_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x32_c = - vp9_highbd_10_sub_pixel_avg_variance64x32_c; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x64_c = - vp9_highbd_10_sub_pixel_avg_variance64x64_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance4x4_c = - vp9_highbd_12_sub_pixel_avg_variance4x4_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance4x8_c = - vp9_highbd_12_sub_pixel_avg_variance4x8_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x4_c = - vp9_highbd_12_sub_pixel_avg_variance8x4_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x8_c = - vp9_highbd_12_sub_pixel_avg_variance8x8_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x16_c = - vp9_highbd_12_sub_pixel_avg_variance8x16_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x8_c = - vp9_highbd_12_sub_pixel_avg_variance16x8_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x16_c = - vp9_highbd_12_sub_pixel_avg_variance16x16_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x32_c = - vp9_highbd_12_sub_pixel_avg_variance16x32_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x16_c = - vp9_highbd_12_sub_pixel_avg_variance32x16_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x32_c = - vp9_highbd_12_sub_pixel_avg_variance32x32_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x64_c = - vp9_highbd_12_sub_pixel_avg_variance32x64_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x32_c = - vp9_highbd_12_sub_pixel_avg_variance64x32_c; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x64_c = - vp9_highbd_12_sub_pixel_avg_variance64x64_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance4x4_c = - vp9_highbd_sub_pixel_avg_variance4x4_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance4x8_c = - vp9_highbd_sub_pixel_avg_variance4x8_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x4_c = - vp9_highbd_sub_pixel_avg_variance8x4_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x8_c = - vp9_highbd_sub_pixel_avg_variance8x8_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x16_c = - vp9_highbd_sub_pixel_avg_variance8x16_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x8_c = - vp9_highbd_sub_pixel_avg_variance16x8_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x16_c = - vp9_highbd_sub_pixel_avg_variance16x16_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x32_c = - vp9_highbd_sub_pixel_avg_variance16x32_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x16_c = - vp9_highbd_sub_pixel_avg_variance32x16_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x32_c = - vp9_highbd_sub_pixel_avg_variance32x32_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x64_c = - vp9_highbd_sub_pixel_avg_variance32x64_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x32_c = - vp9_highbd_sub_pixel_avg_variance64x32_c; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x64_c = - vp9_highbd_sub_pixel_avg_variance64x64_c; -INSTANTIATE_TEST_CASE_P( - C, VP9SubpelAvgVarianceHighTest, - ::testing::Values( - make_tuple(2, 2, highbd_10_subpel_avg_variance4x4_c, 10), - make_tuple(2, 3, highbd_10_subpel_avg_variance4x8_c, 10), - make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_c, 10), - make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_c, 10), - make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_c, 10), - make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_c, 10), - make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_c, 10), - make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_c, 10), - make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_c, 10), - make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_c, 10), - make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_c, 10), - make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_c, 10), - make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_c, 10), - make_tuple(2, 2, highbd_12_subpel_avg_variance4x4_c, 12), - make_tuple(2, 3, highbd_12_subpel_avg_variance4x8_c, 12), - make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_c, 12), - make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_c, 12), - make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_c, 12), - make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_c, 12), - make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_c, 12), - make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_c, 12), - make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_c, 12), - make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_c, 12), - make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_c, 12), - make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_c, 12), - make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_c, 12), - make_tuple(2, 2, highbd_subpel_avg_variance4x4_c, 8), - make_tuple(2, 3, highbd_subpel_avg_variance4x8_c, 8), - make_tuple(3, 2, highbd_subpel_avg_variance8x4_c, 8), - make_tuple(3, 3, highbd_subpel_avg_variance8x8_c, 8), - make_tuple(3, 4, highbd_subpel_avg_variance8x16_c, 8), - make_tuple(4, 3, highbd_subpel_avg_variance16x8_c, 8), - make_tuple(4, 4, highbd_subpel_avg_variance16x16_c, 8), - make_tuple(4, 5, highbd_subpel_avg_variance16x32_c, 8), - make_tuple(5, 4, highbd_subpel_avg_variance32x16_c, 8), - make_tuple(5, 5, highbd_subpel_avg_variance32x32_c, 8), - make_tuple(5, 6, highbd_subpel_avg_variance32x64_c, 8), - make_tuple(6, 5, highbd_subpel_avg_variance64x32_c, 8), - make_tuple(6, 6, highbd_subpel_avg_variance64x64_c, 8))); -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif // CONFIG_VP9_ENCODER - -#if CONFIG_VP8_ENCODER -#if HAVE_MMX -const SubpixVarMxNFunc subpel_variance16x16_mmx = - vp8_sub_pixel_variance16x16_mmx; -const SubpixVarMxNFunc subpel_variance16x8_mmx = vp8_sub_pixel_variance16x8_mmx; -const SubpixVarMxNFunc subpel_variance8x16_mmx = vp8_sub_pixel_variance8x16_mmx; -const SubpixVarMxNFunc subpel_variance8x8_mmx = vp8_sub_pixel_variance8x8_mmx; -const SubpixVarMxNFunc subpel_variance4x4_mmx = vp8_sub_pixel_variance4x4_mmx; -INSTANTIATE_TEST_CASE_P( - MMX, VP8SubpelVarianceTest, - ::testing::Values(make_tuple(4, 4, subpel_variance16x16_mmx, 0), - make_tuple(4, 3, subpel_variance16x8_mmx, 0), - make_tuple(3, 4, subpel_variance8x16_mmx, 0), - make_tuple(3, 3, subpel_variance8x8_mmx, 0), - make_tuple(2, 2, subpel_variance4x4_mmx, 0))); -#endif // HAVE_MMX -#endif // CONFIG_VP8_ENCODER -#if CONFIG_VP9_ENCODER -#if HAVE_SSE2 #if CONFIG_USE_X86INC -const SubpixVarMxNFunc subpel_variance4x4_sse = vp9_sub_pixel_variance4x4_sse; -const SubpixVarMxNFunc subpel_variance4x8_sse = vp9_sub_pixel_variance4x8_sse; -const SubpixVarMxNFunc subpel_variance8x4_sse2 = vp9_sub_pixel_variance8x4_sse2; -const SubpixVarMxNFunc subpel_variance8x8_sse2 = vp9_sub_pixel_variance8x8_sse2; -const SubpixVarMxNFunc subpel_variance8x16_sse2 = - vp9_sub_pixel_variance8x16_sse2; -const SubpixVarMxNFunc subpel_variance16x8_sse2 = - vp9_sub_pixel_variance16x8_sse2; -const SubpixVarMxNFunc subpel_variance16x16_sse2 = - vp9_sub_pixel_variance16x16_sse2; -const SubpixVarMxNFunc subpel_variance16x32_sse2 = - vp9_sub_pixel_variance16x32_sse2; -const SubpixVarMxNFunc subpel_variance32x16_sse2 = - vp9_sub_pixel_variance32x16_sse2; -const SubpixVarMxNFunc subpel_variance32x32_sse2 = - vp9_sub_pixel_variance32x32_sse2; -const SubpixVarMxNFunc subpel_variance32x64_sse2 = - vp9_sub_pixel_variance32x64_sse2; -const SubpixVarMxNFunc subpel_variance64x32_sse2 = - vp9_sub_pixel_variance64x32_sse2; -const SubpixVarMxNFunc subpel_variance64x64_sse2 = - vp9_sub_pixel_variance64x64_sse2; -INSTANTIATE_TEST_CASE_P( - SSE2, VP9SubpelVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse, 0), - make_tuple(2, 3, subpel_variance4x8_sse, 0), - make_tuple(3, 2, subpel_variance8x4_sse2, 0), - make_tuple(3, 3, subpel_variance8x8_sse2, 0), - make_tuple(3, 4, subpel_variance8x16_sse2, 0), - make_tuple(4, 3, subpel_variance16x8_sse2, 0), - make_tuple(4, 4, subpel_variance16x16_sse2, 0), - make_tuple(4, 5, subpel_variance16x32_sse2, 0), - make_tuple(5, 4, subpel_variance32x16_sse2, 0), - make_tuple(5, 5, subpel_variance32x32_sse2, 0), - make_tuple(5, 6, subpel_variance32x64_sse2, 0), - make_tuple(6, 5, subpel_variance64x32_sse2, 0), - make_tuple(6, 6, subpel_variance64x64_sse2, 0))); -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse = - vp9_sub_pixel_avg_variance4x4_sse; -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse = - vp9_sub_pixel_avg_variance4x8_sse; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 = - vp9_sub_pixel_avg_variance8x4_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 = - vp9_sub_pixel_avg_variance8x8_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 = - vp9_sub_pixel_avg_variance8x16_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 = - vp9_sub_pixel_avg_variance16x8_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 = - vp9_sub_pixel_avg_variance16x16_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 = - vp9_sub_pixel_avg_variance16x32_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 = - vp9_sub_pixel_avg_variance32x16_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 = - vp9_sub_pixel_avg_variance32x32_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 = - vp9_sub_pixel_avg_variance32x64_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 = - vp9_sub_pixel_avg_variance64x32_sse2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 = - vp9_sub_pixel_avg_variance64x64_sse2; -INSTANTIATE_TEST_CASE_P( - SSE2, VP9SubpelAvgVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse, 0), - make_tuple(2, 3, subpel_avg_variance4x8_sse, 0), - make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0), - make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0), - make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0), - make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0), - make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0), - make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0), - make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0), - make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0), - make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0), - make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0), - make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0))); -#if CONFIG_VP9_HIGHBITDEPTH -const SubpixVarMxNFunc highbd_subpel_variance8x4_sse2 = - vp9_highbd_sub_pixel_variance8x4_sse2; -const SubpixVarMxNFunc highbd_subpel_variance8x8_sse2 = - vp9_highbd_sub_pixel_variance8x8_sse2; -const SubpixVarMxNFunc highbd_subpel_variance8x16_sse2 = - vp9_highbd_sub_pixel_variance8x16_sse2; -const SubpixVarMxNFunc highbd_subpel_variance16x8_sse2 = - vp9_highbd_sub_pixel_variance16x8_sse2; -const SubpixVarMxNFunc highbd_subpel_variance16x16_sse2 = - vp9_highbd_sub_pixel_variance16x16_sse2; -const SubpixVarMxNFunc highbd_subpel_variance16x32_sse2 = - vp9_highbd_sub_pixel_variance16x32_sse2; -const SubpixVarMxNFunc highbd_subpel_variance32x16_sse2 = - vp9_highbd_sub_pixel_variance32x16_sse2; -const SubpixVarMxNFunc highbd_subpel_variance32x32_sse2 = - vp9_highbd_sub_pixel_variance32x32_sse2; -const SubpixVarMxNFunc highbd_subpel_variance32x64_sse2 = - vp9_highbd_sub_pixel_variance32x64_sse2; -const SubpixVarMxNFunc highbd_subpel_variance64x32_sse2 = - vp9_highbd_sub_pixel_variance64x32_sse2; -const SubpixVarMxNFunc highbd_subpel_variance64x64_sse2 = - vp9_highbd_sub_pixel_variance64x64_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 = - vp9_highbd_10_sub_pixel_variance8x4_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 = - vp9_highbd_10_sub_pixel_variance8x8_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 = - vp9_highbd_10_sub_pixel_variance8x16_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 = - vp9_highbd_10_sub_pixel_variance16x8_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 = - vp9_highbd_10_sub_pixel_variance16x16_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 = - vp9_highbd_10_sub_pixel_variance16x32_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 = - vp9_highbd_10_sub_pixel_variance32x16_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 = - vp9_highbd_10_sub_pixel_variance32x32_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 = - vp9_highbd_10_sub_pixel_variance32x64_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 = - vp9_highbd_10_sub_pixel_variance64x32_sse2; -const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 = - vp9_highbd_10_sub_pixel_variance64x64_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 = - vp9_highbd_12_sub_pixel_variance8x4_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 = - vp9_highbd_12_sub_pixel_variance8x8_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 = - vp9_highbd_12_sub_pixel_variance8x16_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 = - vp9_highbd_12_sub_pixel_variance16x8_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 = - vp9_highbd_12_sub_pixel_variance16x16_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 = - vp9_highbd_12_sub_pixel_variance16x32_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 = - vp9_highbd_12_sub_pixel_variance32x16_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 = - vp9_highbd_12_sub_pixel_variance32x32_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 = - vp9_highbd_12_sub_pixel_variance32x64_sse2; -const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 = - vp9_highbd_12_sub_pixel_variance64x32_sse2; const SubpixVarMxNFunc highbd_12_subpel_variance64x64_sse2 = - vp9_highbd_12_sub_pixel_variance64x64_sse2; + vpx_highbd_12_sub_pixel_variance64x64_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance64x32_sse2 = + vpx_highbd_12_sub_pixel_variance64x32_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance32x64_sse2 = + vpx_highbd_12_sub_pixel_variance32x64_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance32x32_sse2 = + vpx_highbd_12_sub_pixel_variance32x32_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance32x16_sse2 = + vpx_highbd_12_sub_pixel_variance32x16_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance16x32_sse2 = + vpx_highbd_12_sub_pixel_variance16x32_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance16x16_sse2 = + vpx_highbd_12_sub_pixel_variance16x16_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance16x8_sse2 = + vpx_highbd_12_sub_pixel_variance16x8_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance8x16_sse2 = + vpx_highbd_12_sub_pixel_variance8x16_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance8x8_sse2 = + vpx_highbd_12_sub_pixel_variance8x8_sse2; +const SubpixVarMxNFunc highbd_12_subpel_variance8x4_sse2 = + vpx_highbd_12_sub_pixel_variance8x4_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance64x64_sse2 = + vpx_highbd_10_sub_pixel_variance64x64_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance64x32_sse2 = + vpx_highbd_10_sub_pixel_variance64x32_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance32x64_sse2 = + vpx_highbd_10_sub_pixel_variance32x64_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance32x32_sse2 = + vpx_highbd_10_sub_pixel_variance32x32_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance32x16_sse2 = + vpx_highbd_10_sub_pixel_variance32x16_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance16x32_sse2 = + vpx_highbd_10_sub_pixel_variance16x32_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance16x16_sse2 = + vpx_highbd_10_sub_pixel_variance16x16_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance16x8_sse2 = + vpx_highbd_10_sub_pixel_variance16x8_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance8x16_sse2 = + vpx_highbd_10_sub_pixel_variance8x16_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance8x8_sse2 = + vpx_highbd_10_sub_pixel_variance8x8_sse2; +const SubpixVarMxNFunc highbd_10_subpel_variance8x4_sse2 = + vpx_highbd_10_sub_pixel_variance8x4_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance64x64_sse2 = + vpx_highbd_8_sub_pixel_variance64x64_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance64x32_sse2 = + vpx_highbd_8_sub_pixel_variance64x32_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance32x64_sse2 = + vpx_highbd_8_sub_pixel_variance32x64_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance32x32_sse2 = + vpx_highbd_8_sub_pixel_variance32x32_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance32x16_sse2 = + vpx_highbd_8_sub_pixel_variance32x16_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance16x32_sse2 = + vpx_highbd_8_sub_pixel_variance16x32_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance16x16_sse2 = + vpx_highbd_8_sub_pixel_variance16x16_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance16x8_sse2 = + vpx_highbd_8_sub_pixel_variance16x8_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance8x16_sse2 = + vpx_highbd_8_sub_pixel_variance8x16_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance8x8_sse2 = + vpx_highbd_8_sub_pixel_variance8x8_sse2; +const SubpixVarMxNFunc highbd_8_subpel_variance8x4_sse2 = + vpx_highbd_8_sub_pixel_variance8x4_sse2; INSTANTIATE_TEST_CASE_P( - SSE2, VP9SubpelVarianceHighTest, - ::testing::Values(make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10), - make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10), - make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10), - make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10), - make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10), - make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10), - make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10), - make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10), - make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10), - make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10), - make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10), - make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12), - make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12), - make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12), - make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12), - make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12), - make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12), - make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12), - make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12), - make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12), + SSE2, VpxHBDSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12), make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12), - make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12), - make_tuple(3, 2, highbd_subpel_variance8x4_sse2, 8), - make_tuple(3, 3, highbd_subpel_variance8x8_sse2, 8), - make_tuple(3, 4, highbd_subpel_variance8x16_sse2, 8), - make_tuple(4, 3, highbd_subpel_variance16x8_sse2, 8), - make_tuple(4, 4, highbd_subpel_variance16x16_sse2, 8), - make_tuple(4, 5, highbd_subpel_variance16x32_sse2, 8), - make_tuple(5, 4, highbd_subpel_variance32x16_sse2, 8), - make_tuple(5, 5, highbd_subpel_variance32x32_sse2, 8), - make_tuple(5, 6, highbd_subpel_variance32x64_sse2, 8), - make_tuple(6, 5, highbd_subpel_variance64x32_sse2, 8), - make_tuple(6, 6, highbd_subpel_variance64x64_sse2, 8))); -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x4_sse2 = - vp9_highbd_sub_pixel_avg_variance8x4_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x8_sse2 = - vp9_highbd_sub_pixel_avg_variance8x8_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x16_sse2 = - vp9_highbd_sub_pixel_avg_variance8x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x8_sse2 = - vp9_highbd_sub_pixel_avg_variance16x8_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x16_sse2 = - vp9_highbd_sub_pixel_avg_variance16x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x32_sse2 = - vp9_highbd_sub_pixel_avg_variance16x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x16_sse2 = - vp9_highbd_sub_pixel_avg_variance32x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x32_sse2 = - vp9_highbd_sub_pixel_avg_variance32x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x64_sse2 = - vp9_highbd_sub_pixel_avg_variance32x64_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x32_sse2 = - vp9_highbd_sub_pixel_avg_variance64x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x64_sse2 = - vp9_highbd_sub_pixel_avg_variance64x64_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x4_sse2 = - vp9_highbd_10_sub_pixel_avg_variance8x4_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x8_sse2 = - vp9_highbd_10_sub_pixel_avg_variance8x8_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x16_sse2 = - vp9_highbd_10_sub_pixel_avg_variance8x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x8_sse2 = - vp9_highbd_10_sub_pixel_avg_variance16x8_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x16_sse2 = - vp9_highbd_10_sub_pixel_avg_variance16x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x32_sse2 = - vp9_highbd_10_sub_pixel_avg_variance16x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x16_sse2 = - vp9_highbd_10_sub_pixel_avg_variance32x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x32_sse2 = - vp9_highbd_10_sub_pixel_avg_variance32x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x64_sse2 = - vp9_highbd_10_sub_pixel_avg_variance32x64_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x32_sse2 = - vp9_highbd_10_sub_pixel_avg_variance64x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x64_sse2 = - vp9_highbd_10_sub_pixel_avg_variance64x64_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x4_sse2 = - vp9_highbd_12_sub_pixel_avg_variance8x4_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x8_sse2 = - vp9_highbd_12_sub_pixel_avg_variance8x8_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x16_sse2 = - vp9_highbd_12_sub_pixel_avg_variance8x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x8_sse2 = - vp9_highbd_12_sub_pixel_avg_variance16x8_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x16_sse2 = - vp9_highbd_12_sub_pixel_avg_variance16x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x32_sse2 = - vp9_highbd_12_sub_pixel_avg_variance16x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x16_sse2 = - vp9_highbd_12_sub_pixel_avg_variance32x16_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x32_sse2 = - vp9_highbd_12_sub_pixel_avg_variance32x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x64_sse2 = - vp9_highbd_12_sub_pixel_avg_variance32x64_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x32_sse2 = - vp9_highbd_12_sub_pixel_avg_variance64x32_sse2; -const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x64_sse2 = - vp9_highbd_12_sub_pixel_avg_variance64x64_sse2; + make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12), + make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12), + make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12), + make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12), + make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12), + make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12), + make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12), + make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12), + make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12), + make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10), + make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10), + make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10), + make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10), + make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10), + make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10), + make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10), + make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10), + make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10), + make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10), + make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10), + make_tuple(6, 6, highbd_8_subpel_variance64x64_sse2, 8), + make_tuple(6, 5, highbd_8_subpel_variance64x32_sse2, 8), + make_tuple(5, 6, highbd_8_subpel_variance32x64_sse2, 8), + make_tuple(5, 5, highbd_8_subpel_variance32x32_sse2, 8), + make_tuple(5, 4, highbd_8_subpel_variance32x16_sse2, 8), + make_tuple(4, 5, highbd_8_subpel_variance16x32_sse2, 8), + make_tuple(4, 4, highbd_8_subpel_variance16x16_sse2, 8), + make_tuple(4, 3, highbd_8_subpel_variance16x8_sse2, 8), + make_tuple(3, 4, highbd_8_subpel_variance8x16_sse2, 8), + make_tuple(3, 3, highbd_8_subpel_variance8x8_sse2, 8), + make_tuple(3, 2, highbd_8_subpel_variance8x4_sse2, 8))); + +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x64_sse2 = + vpx_highbd_12_sub_pixel_avg_variance64x64_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance64x32_sse2 = + vpx_highbd_12_sub_pixel_avg_variance64x32_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x64_sse2 = + vpx_highbd_12_sub_pixel_avg_variance32x64_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x32_sse2 = + vpx_highbd_12_sub_pixel_avg_variance32x32_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance32x16_sse2 = + vpx_highbd_12_sub_pixel_avg_variance32x16_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x32_sse2 = + vpx_highbd_12_sub_pixel_avg_variance16x32_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x16_sse2 = + vpx_highbd_12_sub_pixel_avg_variance16x16_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance16x8_sse2 = + vpx_highbd_12_sub_pixel_avg_variance16x8_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x16_sse2 = + vpx_highbd_12_sub_pixel_avg_variance8x16_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x8_sse2 = + vpx_highbd_12_sub_pixel_avg_variance8x8_sse2; +const SubpixAvgVarMxNFunc highbd_12_subpel_avg_variance8x4_sse2 = + vpx_highbd_12_sub_pixel_avg_variance8x4_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x64_sse2 = + vpx_highbd_10_sub_pixel_avg_variance64x64_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance64x32_sse2 = + vpx_highbd_10_sub_pixel_avg_variance64x32_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x64_sse2 = + vpx_highbd_10_sub_pixel_avg_variance32x64_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x32_sse2 = + vpx_highbd_10_sub_pixel_avg_variance32x32_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance32x16_sse2 = + vpx_highbd_10_sub_pixel_avg_variance32x16_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x32_sse2 = + vpx_highbd_10_sub_pixel_avg_variance16x32_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x16_sse2 = + vpx_highbd_10_sub_pixel_avg_variance16x16_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance16x8_sse2 = + vpx_highbd_10_sub_pixel_avg_variance16x8_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x16_sse2 = + vpx_highbd_10_sub_pixel_avg_variance8x16_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x8_sse2 = + vpx_highbd_10_sub_pixel_avg_variance8x8_sse2; +const SubpixAvgVarMxNFunc highbd_10_subpel_avg_variance8x4_sse2 = + vpx_highbd_10_sub_pixel_avg_variance8x4_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x64_sse2 = + vpx_highbd_8_sub_pixel_avg_variance64x64_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance64x32_sse2 = + vpx_highbd_8_sub_pixel_avg_variance64x32_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x64_sse2 = + vpx_highbd_8_sub_pixel_avg_variance32x64_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x32_sse2 = + vpx_highbd_8_sub_pixel_avg_variance32x32_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance32x16_sse2 = + vpx_highbd_8_sub_pixel_avg_variance32x16_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x32_sse2 = + vpx_highbd_8_sub_pixel_avg_variance16x32_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x16_sse2 = + vpx_highbd_8_sub_pixel_avg_variance16x16_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance16x8_sse2 = + vpx_highbd_8_sub_pixel_avg_variance16x8_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x16_sse2 = + vpx_highbd_8_sub_pixel_avg_variance8x16_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x8_sse2 = + vpx_highbd_8_sub_pixel_avg_variance8x8_sse2; +const SubpixAvgVarMxNFunc highbd_8_subpel_avg_variance8x4_sse2 = + vpx_highbd_8_sub_pixel_avg_variance8x4_sse2; INSTANTIATE_TEST_CASE_P( - SSE2, VP9SubpelAvgVarianceHighTest, + SSE2, VpxHBDSubpelAvgVarianceTest, ::testing::Values( - make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10), - make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10), - make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10), - make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10), - make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10), - make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10), - make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10), - make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10), - make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10), - make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10), - make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10), - make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12), - make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12), - make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12), - make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12), - make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12), - make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12), - make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12), - make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12), - make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12), - make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12), - make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12), - make_tuple(3, 2, highbd_subpel_avg_variance8x4_sse2, 8), - make_tuple(3, 3, highbd_subpel_avg_variance8x8_sse2, 8), - make_tuple(3, 4, highbd_subpel_avg_variance8x16_sse2, 8), - make_tuple(4, 3, highbd_subpel_avg_variance16x8_sse2, 8), - make_tuple(4, 4, highbd_subpel_avg_variance16x16_sse2, 8), - make_tuple(4, 5, highbd_subpel_avg_variance16x32_sse2, 8), - make_tuple(5, 4, highbd_subpel_avg_variance32x16_sse2, 8), - make_tuple(5, 5, highbd_subpel_avg_variance32x32_sse2, 8), - make_tuple(5, 6, highbd_subpel_avg_variance32x64_sse2, 8), - make_tuple(6, 5, highbd_subpel_avg_variance64x32_sse2, 8), - make_tuple(6, 6, highbd_subpel_avg_variance64x64_sse2, 8))); -#endif // CONFIG_VP9_HIGHBITDEPTH + make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12), + make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12), + make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12), + make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12), + make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12), + make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12), + make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12), + make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12), + make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12), + make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12), + make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12), + make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10), + make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10), + make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10), + make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10), + make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10), + make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10), + make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10), + make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10), + make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10), + make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10), + make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10), + make_tuple(6, 6, highbd_8_subpel_avg_variance64x64_sse2, 8), + make_tuple(6, 5, highbd_8_subpel_avg_variance64x32_sse2, 8), + make_tuple(5, 6, highbd_8_subpel_avg_variance32x64_sse2, 8), + make_tuple(5, 5, highbd_8_subpel_avg_variance32x32_sse2, 8), + make_tuple(5, 4, highbd_8_subpel_avg_variance32x16_sse2, 8), + make_tuple(4, 5, highbd_8_subpel_avg_variance16x32_sse2, 8), + make_tuple(4, 4, highbd_8_subpel_avg_variance16x16_sse2, 8), + make_tuple(4, 3, highbd_8_subpel_avg_variance16x8_sse2, 8), + make_tuple(3, 4, highbd_8_subpel_avg_variance8x16_sse2, 8), + make_tuple(3, 3, highbd_8_subpel_avg_variance8x8_sse2, 8), + make_tuple(3, 2, highbd_8_subpel_avg_variance8x4_sse2, 8))); #endif // CONFIG_USE_X86INC +#endif // CONFIG_VP9_HIGHBITDEPTH #endif // HAVE_SSE2 -#endif // CONFIG_VP9_ENCODER - -#if CONFIG_VP8_ENCODER -#if HAVE_SSE2 -const SubpixVarMxNFunc vp8_subpel_variance16x16_sse2 = - vp8_sub_pixel_variance16x16_wmt; -const SubpixVarMxNFunc vp8_subpel_variance16x8_sse2 = - vp8_sub_pixel_variance16x8_wmt; -const SubpixVarMxNFunc vp8_subpel_variance8x16_sse2 = - vp8_sub_pixel_variance8x16_wmt; -const SubpixVarMxNFunc vp8_subpel_variance8x8_sse2 = - vp8_sub_pixel_variance8x8_wmt; -const SubpixVarMxNFunc vp8_subpel_variance4x4_sse2 = - vp8_sub_pixel_variance4x4_wmt; -INSTANTIATE_TEST_CASE_P( - SSE2, VP8SubpelVarianceTest, - ::testing::Values(make_tuple(2, 2, vp8_subpel_variance4x4_sse2, 0), - make_tuple(3, 3, vp8_subpel_variance8x8_sse2, 0), - make_tuple(3, 4, vp8_subpel_variance8x16_sse2, 0), - make_tuple(4, 3, vp8_subpel_variance16x8_sse2, 0), - make_tuple(4, 4, vp8_subpel_variance16x16_sse2, 0))); -#endif // HAVE_SSE2 -#endif // CONFIG_VP8_ENCODER -#if CONFIG_VP9_ENCODER #if HAVE_SSSE3 #if CONFIG_USE_X86INC -const SubpixVarMxNFunc subpel_variance4x4_ssse3 = - vp9_sub_pixel_variance4x4_ssse3; -const SubpixVarMxNFunc subpel_variance4x8_ssse3 = - vp9_sub_pixel_variance4x8_ssse3; -const SubpixVarMxNFunc subpel_variance8x4_ssse3 = - vp9_sub_pixel_variance8x4_ssse3; -const SubpixVarMxNFunc subpel_variance8x8_ssse3 = - vp9_sub_pixel_variance8x8_ssse3; -const SubpixVarMxNFunc subpel_variance8x16_ssse3 = - vp9_sub_pixel_variance8x16_ssse3; -const SubpixVarMxNFunc subpel_variance16x8_ssse3 = - vp9_sub_pixel_variance16x8_ssse3; -const SubpixVarMxNFunc subpel_variance16x16_ssse3 = - vp9_sub_pixel_variance16x16_ssse3; -const SubpixVarMxNFunc subpel_variance16x32_ssse3 = - vp9_sub_pixel_variance16x32_ssse3; -const SubpixVarMxNFunc subpel_variance32x16_ssse3 = - vp9_sub_pixel_variance32x16_ssse3; -const SubpixVarMxNFunc subpel_variance32x32_ssse3 = - vp9_sub_pixel_variance32x32_ssse3; -const SubpixVarMxNFunc subpel_variance32x64_ssse3 = - vp9_sub_pixel_variance32x64_ssse3; -const SubpixVarMxNFunc subpel_variance64x32_ssse3 = - vp9_sub_pixel_variance64x32_ssse3; const SubpixVarMxNFunc subpel_variance64x64_ssse3 = - vp9_sub_pixel_variance64x64_ssse3; + vpx_sub_pixel_variance64x64_ssse3; +const SubpixVarMxNFunc subpel_variance64x32_ssse3 = + vpx_sub_pixel_variance64x32_ssse3; +const SubpixVarMxNFunc subpel_variance32x64_ssse3 = + vpx_sub_pixel_variance32x64_ssse3; +const SubpixVarMxNFunc subpel_variance32x32_ssse3 = + vpx_sub_pixel_variance32x32_ssse3; +const SubpixVarMxNFunc subpel_variance32x16_ssse3 = + vpx_sub_pixel_variance32x16_ssse3; +const SubpixVarMxNFunc subpel_variance16x32_ssse3 = + vpx_sub_pixel_variance16x32_ssse3; +const SubpixVarMxNFunc subpel_variance16x16_ssse3 = + vpx_sub_pixel_variance16x16_ssse3; +const SubpixVarMxNFunc subpel_variance16x8_ssse3 = + vpx_sub_pixel_variance16x8_ssse3; +const SubpixVarMxNFunc subpel_variance8x16_ssse3 = + vpx_sub_pixel_variance8x16_ssse3; +const SubpixVarMxNFunc subpel_variance8x8_ssse3 = + vpx_sub_pixel_variance8x8_ssse3; +const SubpixVarMxNFunc subpel_variance8x4_ssse3 = + vpx_sub_pixel_variance8x4_ssse3; +const SubpixVarMxNFunc subpel_variance4x8_ssse3 = + vpx_sub_pixel_variance4x8_ssse3; +const SubpixVarMxNFunc subpel_variance4x4_ssse3 = + vpx_sub_pixel_variance4x4_ssse3; INSTANTIATE_TEST_CASE_P( - SSSE3, VP9SubpelVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3, 0), - make_tuple(2, 3, subpel_variance4x8_ssse3, 0), - make_tuple(3, 2, subpel_variance8x4_ssse3, 0), - make_tuple(3, 3, subpel_variance8x8_ssse3, 0), - make_tuple(3, 4, subpel_variance8x16_ssse3, 0), - make_tuple(4, 3, subpel_variance16x8_ssse3, 0), - make_tuple(4, 4, subpel_variance16x16_ssse3, 0), - make_tuple(4, 5, subpel_variance16x32_ssse3, 0), - make_tuple(5, 4, subpel_variance32x16_ssse3, 0), - make_tuple(5, 5, subpel_variance32x32_ssse3, 0), - make_tuple(5, 6, subpel_variance32x64_ssse3, 0), + SSSE3, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_variance64x64_ssse3, 0), make_tuple(6, 5, subpel_variance64x32_ssse3, 0), - make_tuple(6, 6, subpel_variance64x64_ssse3, 0))); -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 = - vp9_sub_pixel_avg_variance4x4_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 = - vp9_sub_pixel_avg_variance4x8_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 = - vp9_sub_pixel_avg_variance8x4_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 = - vp9_sub_pixel_avg_variance8x8_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 = - vp9_sub_pixel_avg_variance8x16_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 = - vp9_sub_pixel_avg_variance16x8_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 = - vp9_sub_pixel_avg_variance16x16_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 = - vp9_sub_pixel_avg_variance16x32_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 = - vp9_sub_pixel_avg_variance32x16_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 = - vp9_sub_pixel_avg_variance32x32_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 = - vp9_sub_pixel_avg_variance32x64_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 = - vp9_sub_pixel_avg_variance64x32_ssse3; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 = - vp9_sub_pixel_avg_variance64x64_ssse3; + make_tuple(5, 6, subpel_variance32x64_ssse3, 0), + make_tuple(5, 5, subpel_variance32x32_ssse3, 0), + make_tuple(5, 4, subpel_variance32x16_ssse3, 0), + make_tuple(4, 5, subpel_variance16x32_ssse3, 0), + make_tuple(4, 4, subpel_variance16x16_ssse3, 0), + make_tuple(4, 3, subpel_variance16x8_ssse3, 0), + make_tuple(3, 4, subpel_variance8x16_ssse3, 0), + make_tuple(3, 3, subpel_variance8x8_ssse3, 0), + make_tuple(3, 2, subpel_variance8x4_ssse3, 0), + make_tuple(2, 3, subpel_variance4x8_ssse3, 0), + make_tuple(2, 2, subpel_variance4x4_ssse3, 0))); + +const SubpixAvgVarMxNFunc subpel_avg_variance64x64_ssse3 = + vpx_sub_pixel_avg_variance64x64_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance64x32_ssse3 = + vpx_sub_pixel_avg_variance64x32_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance32x64_ssse3 = + vpx_sub_pixel_avg_variance32x64_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance32x32_ssse3 = + vpx_sub_pixel_avg_variance32x32_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance32x16_ssse3 = + vpx_sub_pixel_avg_variance32x16_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance16x32_ssse3 = + vpx_sub_pixel_avg_variance16x32_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance16x16_ssse3 = + vpx_sub_pixel_avg_variance16x16_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance16x8_ssse3 = + vpx_sub_pixel_avg_variance16x8_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance8x16_ssse3 = + vpx_sub_pixel_avg_variance8x16_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance8x8_ssse3 = + vpx_sub_pixel_avg_variance8x8_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance8x4_ssse3 = + vpx_sub_pixel_avg_variance8x4_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance4x8_ssse3 = + vpx_sub_pixel_avg_variance4x8_ssse3; +const SubpixAvgVarMxNFunc subpel_avg_variance4x4_ssse3 = + vpx_sub_pixel_avg_variance4x4_ssse3; INSTANTIATE_TEST_CASE_P( - SSSE3, VP9SubpelAvgVarianceTest, - ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0), - make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0), - make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0), - make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0), - make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0), - make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0), - make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0), - make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0), - make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0), - make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0), - make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0), + SSSE3, VpxSubpelAvgVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0), make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0), - make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0))); + make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0), + make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0), + make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0), + make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0), + make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0), + make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0), + make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0), + make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0), + make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0), + make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0), + make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0))); #endif // CONFIG_USE_X86INC #endif // HAVE_SSSE3 -#endif // CONFIG_VP9_ENCODER - -#if CONFIG_VP8_ENCODER -#if HAVE_SSSE3 -const SubpixVarMxNFunc vp8_subpel_variance16x16_ssse3 = - vp8_sub_pixel_variance16x16_ssse3; -const SubpixVarMxNFunc vp8_subpel_variance16x8_ssse3 = - vp8_sub_pixel_variance16x8_ssse3; -INSTANTIATE_TEST_CASE_P( - SSSE3, VP8SubpelVarianceTest, - ::testing::Values(make_tuple(4, 3, vp8_subpel_variance16x8_ssse3, 0), - make_tuple(4, 4, vp8_subpel_variance16x16_ssse3, 0))); -#endif // HAVE_SSSE3 -#endif // CONFIG_VP8_ENCODER #if HAVE_AVX2 const VarianceMxNFunc mse16x16_avx2 = vpx_mse16x16_avx2; @@ -1910,39 +1828,46 @@ INSTANTIATE_TEST_CASE_P( make_tuple(5, 4, variance32x16_avx2, 0), make_tuple(4, 4, variance16x16_avx2, 0))); -#if CONFIG_VP9_ENCODER -const SubpixVarMxNFunc subpel_variance32x32_avx2 = - vp9_sub_pixel_variance32x32_avx2; const SubpixVarMxNFunc subpel_variance64x64_avx2 = - vp9_sub_pixel_variance64x64_avx2; + vpx_sub_pixel_variance64x64_avx2; +const SubpixVarMxNFunc subpel_variance32x32_avx2 = + vpx_sub_pixel_variance32x32_avx2; INSTANTIATE_TEST_CASE_P( - AVX2, VP9SubpelVarianceTest, - ::testing::Values(make_tuple(5, 5, subpel_variance32x32_avx2, 0), - make_tuple(6, 6, subpel_variance64x64_avx2, 0))); - -const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_avx2 = - vp9_sub_pixel_avg_variance32x32_avx2; -const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_avx2 = - vp9_sub_pixel_avg_variance64x64_avx2; + AVX2, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_variance64x64_avx2, 0), + make_tuple(5, 5, subpel_variance32x32_avx2, 0))); + +const SubpixAvgVarMxNFunc subpel_avg_variance64x64_avx2 = + vpx_sub_pixel_avg_variance64x64_avx2; +const SubpixAvgVarMxNFunc subpel_avg_variance32x32_avx2 = + vpx_sub_pixel_avg_variance32x32_avx2; INSTANTIATE_TEST_CASE_P( - AVX2, VP9SubpelAvgVarianceTest, - ::testing::Values(make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0), - make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0))); -#endif // CONFIG_VP9_ENCODER + AVX2, VpxSubpelAvgVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0), + make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0))); #endif // HAVE_AVX2 -#if CONFIG_VP8_ENCODER #if HAVE_MEDIA +const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media; +INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest, + ::testing::Values(make_tuple(4, 4, mse16x16_media))); + +const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media; +const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media; +INSTANTIATE_TEST_CASE_P( + MEDIA, VpxVarianceTest, + ::testing::Values(make_tuple(4, 4, variance16x16_media, 0), + make_tuple(3, 3, variance8x8_media, 0))); + const SubpixVarMxNFunc subpel_variance16x16_media = - vp8_sub_pixel_variance16x16_armv6; + vpx_sub_pixel_variance16x16_media; const SubpixVarMxNFunc subpel_variance8x8_media = - vp8_sub_pixel_variance8x8_armv6; + vpx_sub_pixel_variance8x8_media; INSTANTIATE_TEST_CASE_P( - MEDIA, VP8SubpelVarianceTest, - ::testing::Values(make_tuple(3, 3, subpel_variance8x8_media, 0), - make_tuple(4, 4, subpel_variance16x16_media, 0))); + MEDIA, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(4, 4, subpel_variance16x16_media, 0), + make_tuple(3, 3, subpel_variance8x8_media, 0))); #endif // HAVE_MEDIA -#endif // CONFIG_VP8_ENCODER #if HAVE_NEON const Get4x4SseFunc get4x4sse_cs_neon = vpx_get4x4sse_cs_neon; @@ -1972,46 +1897,21 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 4, variance8x16_neon, 0), make_tuple(3, 3, variance8x8_neon, 0))); -#if CONFIG_VP8_ENCODER -#if HAVE_NEON_ASM -const SubpixVarMxNFunc vp8_subpel_variance16x16_neon = - vp8_sub_pixel_variance16x16_neon; -INSTANTIATE_TEST_CASE_P( - NEON, VP8SubpelVarianceTest, - ::testing::Values(make_tuple(4, 4, vp8_subpel_variance16x16_neon, 0))); -#endif // HAVE_NEON_ASM -#endif // CONFIG_VP8_ENCODER - -#if CONFIG_VP9_ENCODER -const SubpixVarMxNFunc subpel_variance8x8_neon = vp9_sub_pixel_variance8x8_neon; -const SubpixVarMxNFunc subpel_variance16x16_neon = - vp9_sub_pixel_variance16x16_neon; -const SubpixVarMxNFunc subpel_variance32x32_neon = - vp9_sub_pixel_variance32x32_neon; const SubpixVarMxNFunc subpel_variance64x64_neon = - vp9_sub_pixel_variance64x64_neon; + vpx_sub_pixel_variance64x64_neon; +const SubpixVarMxNFunc subpel_variance32x32_neon = + vpx_sub_pixel_variance32x32_neon; +const SubpixVarMxNFunc subpel_variance16x16_neon = + vpx_sub_pixel_variance16x16_neon; +const SubpixVarMxNFunc subpel_variance8x8_neon = vpx_sub_pixel_variance8x8_neon; INSTANTIATE_TEST_CASE_P( - NEON, VP9SubpelVarianceTest, - ::testing::Values(make_tuple(3, 3, subpel_variance8x8_neon, 0), - make_tuple(4, 4, subpel_variance16x16_neon, 0), + NEON, VpxSubpelVarianceTest, + ::testing::Values(make_tuple(6, 6, subpel_variance64x64_neon, 0), make_tuple(5, 5, subpel_variance32x32_neon, 0), - make_tuple(6, 6, subpel_variance64x64_neon, 0))); -#endif // CONFIG_VP9_ENCODER + make_tuple(4, 4, subpel_variance16x16_neon, 0), + make_tuple(3, 3, subpel_variance8x8_neon, 0))); #endif // HAVE_NEON -#if HAVE_MEDIA -const VarianceMxNFunc mse16x16_media = vpx_mse16x16_media; -INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest, - ::testing::Values(make_tuple(4, 4, mse16x16_media))); - -const VarianceMxNFunc variance16x16_media = vpx_variance16x16_media; -const VarianceMxNFunc variance8x8_media = vpx_variance8x8_media; -INSTANTIATE_TEST_CASE_P( - MEDIA, VpxVarianceTest, - ::testing::Values(make_tuple(4, 4, variance16x16_media, 0), - make_tuple(3, 3, variance8x8_media, 0))); -#endif // HAVE_MEDIA - #if HAVE_MSA INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest, ::testing::Values(vpx_get_mb_ss_msa)); @@ -2059,29 +1959,28 @@ INSTANTIATE_TEST_CASE_P( make_tuple(2, 3, variance4x8_msa, 0), make_tuple(2, 2, variance4x4_msa, 0))); -#if CONFIG_VP9_ENCODER -const SubpixVarMxNFunc subpel_variance4x4_msa = vp9_sub_pixel_variance4x4_msa; -const SubpixVarMxNFunc subpel_variance4x8_msa = vp9_sub_pixel_variance4x8_msa; -const SubpixVarMxNFunc subpel_variance8x4_msa = vp9_sub_pixel_variance8x4_msa; -const SubpixVarMxNFunc subpel_variance8x8_msa = vp9_sub_pixel_variance8x8_msa; -const SubpixVarMxNFunc subpel_variance8x16_msa = vp9_sub_pixel_variance8x16_msa; -const SubpixVarMxNFunc subpel_variance16x8_msa = vp9_sub_pixel_variance16x8_msa; +const SubpixVarMxNFunc subpel_variance4x4_msa = vpx_sub_pixel_variance4x4_msa; +const SubpixVarMxNFunc subpel_variance4x8_msa = vpx_sub_pixel_variance4x8_msa; +const SubpixVarMxNFunc subpel_variance8x4_msa = vpx_sub_pixel_variance8x4_msa; +const SubpixVarMxNFunc subpel_variance8x8_msa = vpx_sub_pixel_variance8x8_msa; +const SubpixVarMxNFunc subpel_variance8x16_msa = vpx_sub_pixel_variance8x16_msa; +const SubpixVarMxNFunc subpel_variance16x8_msa = vpx_sub_pixel_variance16x8_msa; const SubpixVarMxNFunc subpel_variance16x16_msa = - vp9_sub_pixel_variance16x16_msa; + vpx_sub_pixel_variance16x16_msa; const SubpixVarMxNFunc subpel_variance16x32_msa = - vp9_sub_pixel_variance16x32_msa; + vpx_sub_pixel_variance16x32_msa; const SubpixVarMxNFunc subpel_variance32x16_msa = - vp9_sub_pixel_variance32x16_msa; + vpx_sub_pixel_variance32x16_msa; const SubpixVarMxNFunc subpel_variance32x32_msa = - vp9_sub_pixel_variance32x32_msa; + vpx_sub_pixel_variance32x32_msa; const SubpixVarMxNFunc subpel_variance32x64_msa = - vp9_sub_pixel_variance32x64_msa; + vpx_sub_pixel_variance32x64_msa; const SubpixVarMxNFunc subpel_variance64x32_msa = - vp9_sub_pixel_variance64x32_msa; + vpx_sub_pixel_variance64x32_msa; const SubpixVarMxNFunc subpel_variance64x64_msa = - vp9_sub_pixel_variance64x64_msa; + vpx_sub_pixel_variance64x64_msa; INSTANTIATE_TEST_CASE_P( - MSA, VP9SubpelVarianceTest, + MSA, VpxSubpelVarianceTest, ::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0), make_tuple(2, 3, subpel_variance4x8_msa, 0), make_tuple(3, 2, subpel_variance8x4_msa, 0), @@ -2095,6 +1994,5 @@ INSTANTIATE_TEST_CASE_P( make_tuple(5, 6, subpel_variance32x64_msa, 0), make_tuple(6, 5, subpel_variance64x32_msa, 0), make_tuple(6, 6, subpel_variance64x64_msa, 0))); -#endif // CONFIG_VP9_ENCODER #endif // HAVE_MSA } // namespace |