summaryrefslogtreecommitdiff
path: root/chromium/third_party/libvpx
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-13 15:05:36 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-14 10:33:47 +0000
commite684a3455bcc29a6e3e66a004e352dea4e1141e7 (patch)
treed55b4003bde34d7d05f558f02cfd82b2a66a7aac /chromium/third_party/libvpx
parent2b94bfe47ccb6c08047959d1c26e392919550e86 (diff)
downloadqtwebengine-chromium-e684a3455bcc29a6e3e66a004e352dea4e1141e7.tar.gz
BASELINE: Update Chromium to 72.0.3626.110 and Ninja to 1.9.0
Change-Id: Ic57220b00ecc929a893c91f5cc552f5d3e99e922 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/third_party/libvpx')
-rw-r--r--chromium/third_party/libvpx/BUILD.gn2
-rw-r--r--chromium/third_party/libvpx/README.chromium4
-rw-r--r--chromium/third_party/libvpx/libvpx_srcs.gni16
-rw-r--r--chromium/third_party/libvpx/source/config/ios/arm-neon/vp8_rtcd.h340
-rw-r--r--chromium/third_party/libvpx/source/config/ios/arm-neon/vp9_rtcd.h8
-rw-r--r--chromium/third_party/libvpx/source/config/ios/arm-neon/vpx_dsp_rtcd.h636
-rw-r--r--chromium/third_party/libvpx/source/config/ios/arm64/vp8_rtcd.h340
-rw-r--r--chromium/third_party/libvpx/source/config/ios/arm64/vp9_rtcd.h8
-rw-r--r--chromium/third_party/libvpx/source/config/ios/arm64/vpx_dsp_rtcd.h636
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp8_rtcd.h498
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp9_rtcd.h12
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vpx_dsp_rtcd.h928
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm-neon/vp8_rtcd.h340
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm-neon/vp9_rtcd.h8
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm-neon/vpx_dsp_rtcd.h636
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm/vp8_rtcd.h182
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm/vp9_rtcd.h4
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm/vpx_dsp_rtcd.h344
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm64/vp8_rtcd.h340
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm64/vp9_rtcd.h8
-rw-r--r--chromium/third_party/libvpx/source/config/linux/arm64/vpx_dsp_rtcd.h636
-rw-r--r--chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp8_rtcd.h340
-rw-r--r--chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp9_rtcd.h16
-rw-r--r--chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vpx_dsp_rtcd.h1503
-rw-r--r--chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp8_rtcd.h340
-rw-r--r--chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp9_rtcd.h16
-rw-r--r--chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vpx_dsp_rtcd.h1503
-rw-r--r--chromium/third_party/libvpx/source/config/linux/generic/vp8_rtcd.h182
-rw-r--r--chromium/third_party/libvpx/source/config/linux/generic/vp9_rtcd.h8
-rw-r--r--chromium/third_party/libvpx/source/config/linux/generic/vpx_dsp_rtcd.h1123
-rw-r--r--chromium/third_party/libvpx/source/config/linux/ia32/vp8_rtcd.h488
-rw-r--r--chromium/third_party/libvpx/source/config/linux/ia32/vp9_rtcd.h20
-rw-r--r--chromium/third_party/libvpx/source/config/linux/ia32/vpx_dsp_rtcd.h2551
-rw-r--r--chromium/third_party/libvpx/source/config/linux/mips64el/vp8_rtcd.h182
-rw-r--r--chromium/third_party/libvpx/source/config/linux/mips64el/vp9_rtcd.h4
-rw-r--r--chromium/third_party/libvpx/source/config/linux/mips64el/vpx_dsp_rtcd.h344
-rw-r--r--chromium/third_party/libvpx/source/config/linux/mipsel/vp8_rtcd.h182
-rw-r--r--chromium/third_party/libvpx/source/config/linux/mipsel/vp9_rtcd.h4
-rw-r--r--chromium/third_party/libvpx/source/config/linux/mipsel/vpx_dsp_rtcd.h344
-rw-r--r--chromium/third_party/libvpx/source/config/linux/x64/vp8_rtcd.h488
-rw-r--r--chromium/third_party/libvpx/source/config/linux/x64/vp9_rtcd.h20
-rw-r--r--chromium/third_party/libvpx/source/config/linux/x64/vpx_dsp_rtcd.h2563
-rw-r--r--chromium/third_party/libvpx/source/config/mac/ia32/vp8_rtcd.h488
-rw-r--r--chromium/third_party/libvpx/source/config/mac/ia32/vp9_rtcd.h20
-rw-r--r--chromium/third_party/libvpx/source/config/mac/ia32/vpx_dsp_rtcd.h2551
-rw-r--r--chromium/third_party/libvpx/source/config/mac/x64/vp8_rtcd.h488
-rw-r--r--chromium/third_party/libvpx/source/config/mac/x64/vp9_rtcd.h20
-rw-r--r--chromium/third_party/libvpx/source/config/mac/x64/vpx_dsp_rtcd.h2563
-rw-r--r--chromium/third_party/libvpx/source/config/nacl/vp8_rtcd.h182
-rw-r--r--chromium/third_party/libvpx/source/config/nacl/vp9_rtcd.h8
-rw-r--r--chromium/third_party/libvpx/source/config/nacl/vpx_dsp_rtcd.h1123
-rw-r--r--chromium/third_party/libvpx/source/config/vpx_version.h6
-rw-r--r--chromium/third_party/libvpx/source/config/win/ia32/vp8_rtcd.h488
-rw-r--r--chromium/third_party/libvpx/source/config/win/ia32/vp9_rtcd.h20
-rw-r--r--chromium/third_party/libvpx/source/config/win/ia32/vpx_dsp_rtcd.h2551
-rw-r--r--chromium/third_party/libvpx/source/config/win/x64/vp8_rtcd.h488
-rw-r--r--chromium/third_party/libvpx/source/config/win/x64/vp9_rtcd.h20
-rw-r--r--chromium/third_party/libvpx/source/config/win/x64/vpx_dsp_rtcd.h2563
-rw-r--r--chromium/third_party/libvpx/source/libvpx/CHANGELOG2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/README1
-rw-r--r--chromium/third_party/libvpx/source/libvpx/build/make/Makefile1
-rw-r--r--chromium/third_party/libvpx/source/libvpx/build/make/configure.sh66
-rwxr-xr-xchromium/third_party/libvpx/source/libvpx/build/make/gen_msvs_vcxproj.sh15
-rwxr-xr-xchromium/third_party/libvpx/source/libvpx/configure2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/examples/vpx_dec_fuzzer.cc163
-rw-r--r--chromium/third_party/libvpx/source/libvpx/libs.mk7
-rw-r--r--chromium/third_party/libvpx/source/libvpx/md5_utils.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/third_party/googletest/README.libvpx2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/third_party/googletest/src/src/gtest-printers.cc2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/tools/tiny_ssim.c21
-rw-r--r--chromium/third_party/libvpx/source/libvpx/tools_common.h1
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/alloccommon.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_blk_neon.c24
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/blockd.h8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/entropymode.c4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.c28
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/idct_blk.c26
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/loopfilter_filters.c22
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/mfqe.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c20
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c20
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/mips/msa/idct_msa.c58
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/onyx.h28
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/onyxd.h12
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/postproc.h6
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/reconinter.h23
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/reconintra4x4.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/rtcd_defs.pl64
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/treecoder.c5
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/bilinear_filter_sse2.c336
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.c29
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.h33
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c24
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/iwalsh_sse2.asm2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_mmx.asm276
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_sse2.asm414
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c1
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/decoder/decodeframe.c6
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/decoder/decoderthreading.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_if.c29
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_int.h8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/decoder/threading.c76
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/decoder/treereader.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c10
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/bitstream.c124
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.c26
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.h47
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/denoising.c47
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/encodemv.c11
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.c98
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.h28
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/modecosts.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/onyx_if.c68
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/pickinter.c21
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/ratectrl.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.c12
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.h18
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.c70
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.h8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/encoder/treewriter.h4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/vp8_common.mk3
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/vp8_cx_iface.c17
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp8/vp8_dx_iface.c59
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_entropymv.h4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.c18
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.h3
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.c22
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.h4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_onyxc_int.h9
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_postproc.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_reconinter.h6
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_rtcd_defs.pl4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_scale.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.c157
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.h19
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodeframe.c122
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.c41
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.h22
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c26
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c31
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_block.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_denoiser.c8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c123
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodemv.h2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.c696
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.h46
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.c166
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.h22
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mbgraph.c3
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.c254
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.h6
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_noise_estimate.c4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_pickmode.c27
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.c202
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.h20
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.c91
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.h9
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rdopt.c12
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_resize.c8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.c59
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.h11
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.c30
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.h11
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_temporal_filter.c118
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c16
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c24
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_avx2.c15
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c16
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/vp9_cx_iface.c12
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.c11
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.h1
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx/src/vpx_encoder.c11
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx/vp8cx.h14
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx/vp8dx.h12
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx/vpx_encoder.h17
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx/vpx_image.h18
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/add_noise.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/quantize_neon.c62
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad4d_neon.c162
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad_neon.c272
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/subpel_variance_neon.c104
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/variance_neon.c170
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/avg.c160
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/bitwriter.h4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/deblock.c43
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/fwd_txfm.c67
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/inv_txfm.c8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/loopfilter.c192
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c3
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c3
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_dspr2.c4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/deblock_msa.c22
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c54
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/variance_mmi.c639
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c8
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c2
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/ppc/variance_vsx.c111
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.c17
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.h17
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/sad.c126
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/subtract.c28
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.c557
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.h39
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp.mk6
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl665
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_filter.h9
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/avg_pred_sse2.c20
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h173
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h57
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_sse2.h88
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/deblock_sse2.asm231
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_convolve_avx2.c473
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_intrapred_sse2.asm16
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c354
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_subpel_variance_impl_sse2.asm264
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_impl_sse2.asm16
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c78
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_avx2.c190
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c516
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h11
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/post_proc_sse2.c141
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_avx.c38
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c15
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.h (renamed from chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_x86.h)16
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_ssse3.c36
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx2.c58
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx512.c26
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm222
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_avx2.c275
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c353
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c32
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c1005
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c579
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c490
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_scale/generic/yv12config.c36
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_scale/yv12config.h4
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpx_util/vpx_thread.h22
-rw-r--r--chromium/third_party/libvpx/source/libvpx/vpxdec.c51
248 files changed, 25057 insertions, 21288 deletions
diff --git a/chromium/third_party/libvpx/BUILD.gn b/chromium/third_party/libvpx/BUILD.gn
index c1b718ebead..afa6b7087e6 100644
--- a/chromium/third_party/libvpx/BUILD.gn
+++ b/chromium/third_party/libvpx/BUILD.gn
@@ -2,8 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//build/config/arm.gni")
import("//build/config/android/config.gni")
+import("//build/config/arm.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//third_party/libvpx/libvpx_srcs.gni")
import("//third_party/yasm/yasm_assemble.gni")
diff --git a/chromium/third_party/libvpx/README.chromium b/chromium/third_party/libvpx/README.chromium
index aeeb9580105..7f38600a651 100644
--- a/chromium/third_party/libvpx/README.chromium
+++ b/chromium/third_party/libvpx/README.chromium
@@ -5,9 +5,9 @@ License: BSD
License File: source/libvpx/LICENSE
Security Critical: yes
-Date: Thursday October 11 2018
+Date: Thursday November 29 2018
Branch: master
-Commit: e188b5435de71bcd602c378f1ac0441111f0f915
+Commit: 932f8fa04dc15f4adf16df37402556e8c4dc72e7
Description:
Contains the sources used to compile libvpx binaries used by Google Chrome and
diff --git a/chromium/third_party/libvpx/libvpx_srcs.gni b/chromium/third_party/libvpx/libvpx_srcs.gni
index 3bd17e20329..30560eacc6b 100644
--- a/chromium/third_party/libvpx/libvpx_srcs.gni
+++ b/chromium/third_party/libvpx/libvpx_srcs.gni
@@ -64,8 +64,6 @@ libvpx_srcs_x86 = [
"//third_party/libvpx/source/libvpx/vp8/common/vp8_loopfilter.c",
"//third_party/libvpx/source/libvpx/vp8/common/vp8_skin_detection.c",
"//third_party/libvpx/source/libvpx/vp8/common/vp8_skin_detection.h",
- "//third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.c",
- "//third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.h",
"//third_party/libvpx/source/libvpx/vp8/common/x86/loopfilter_x86.c",
"//third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c",
"//third_party/libvpx/source/libvpx/vp8/decoder/dboolhuff.c",
@@ -323,6 +321,7 @@ libvpx_srcs_x86 = [
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/bitdepth_conversion_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_ssse3.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h",
@@ -333,7 +332,7 @@ libvpx_srcs_x86 = [
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_ssse3.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h",
- "//third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_x86.h",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/transpose_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/txfm_common_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c",
@@ -414,6 +413,7 @@ libvpx_srcs_x86_mmx = [
"//third_party/libvpx/source/libvpx/vpx_ports/emms_mmx.c",
]
libvpx_srcs_x86_sse2 = [
+ "//third_party/libvpx/source/libvpx/vp8/common/x86/bilinear_filter_sse2.c",
"//third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c",
"//third_party/libvpx/source/libvpx/vp8/encoder/x86/denoising_sse2.c",
"//third_party/libvpx/source/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c",
@@ -436,9 +436,11 @@ libvpx_srcs_x86_sse2 = [
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/post_proc_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/sum_squares_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c",
]
libvpx_srcs_x86_sse3 = []
libvpx_srcs_x86_ssse3 = [
@@ -538,8 +540,6 @@ libvpx_srcs_x86_64 = [
"//third_party/libvpx/source/libvpx/vp8/common/vp8_loopfilter.c",
"//third_party/libvpx/source/libvpx/vp8/common/vp8_skin_detection.c",
"//third_party/libvpx/source/libvpx/vp8/common/vp8_skin_detection.h",
- "//third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.c",
- "//third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.h",
"//third_party/libvpx/source/libvpx/vp8/common/x86/loopfilter_x86.c",
"//third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c",
"//third_party/libvpx/source/libvpx/vp8/decoder/dboolhuff.c",
@@ -797,6 +797,7 @@ libvpx_srcs_x86_64 = [
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/bitdepth_conversion_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_ssse3.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h",
@@ -807,7 +808,7 @@ libvpx_srcs_x86_64 = [
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_ssse3.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h",
- "//third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_x86.h",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/transpose_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/txfm_common_sse2.h",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c",
@@ -893,6 +894,7 @@ libvpx_srcs_x86_64_assembly = [
libvpx_srcs_x86_64_mmx =
[ "//third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_mmx.c" ]
libvpx_srcs_x86_64_sse2 = [
+ "//third_party/libvpx/source/libvpx/vp8/common/x86/bilinear_filter_sse2.c",
"//third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c",
"//third_party/libvpx/source/libvpx/vp8/encoder/x86/denoising_sse2.c",
"//third_party/libvpx/source/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c",
@@ -915,9 +917,11 @@ libvpx_srcs_x86_64_sse2 = [
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/post_proc_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/sum_squares_sse2.c",
"//third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c",
+ "//third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c",
]
libvpx_srcs_x86_64_sse3 = []
libvpx_srcs_x86_64_ssse3 = [
diff --git a/chromium/third_party/libvpx/source/config/ios/arm-neon/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/ios/arm-neon/vp8_rtcd.h
index 737afd52c16..8f2d3e5c228 100644
--- a/chromium/third_party/libvpx/source/config/ios/arm-neon/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/ios/arm-neon/vp8_rtcd.h
@@ -27,68 +27,68 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_neon
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_neon
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_neon
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_neon
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -96,9 +96,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -106,9 +106,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -117,44 +117,44 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_neon
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_neon
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_neon
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_neon
@@ -196,11 +196,11 @@ int vp8_denoiser_filter_uv_neon(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_neon
@@ -230,8 +230,8 @@ void vp8_dequant_idct_add_y_block_neon(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_neon
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_neon
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -283,91 +283,91 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_neon
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_neon
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_neon
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_neon
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_neon
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_neon
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_mbhs_neon
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_mbvs_neon
@@ -381,8 +381,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -400,81 +400,81 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_neon
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_neon
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_neon
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_neon
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_neon
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_neon
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_neon
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_neon
diff --git a/chromium/third_party/libvpx/source/config/ios/arm-neon/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/ios/arm-neon/vp9_rtcd.h
index 309c7808745..cdfebccf233 100644
--- a/chromium/third_party/libvpx/source/config/ios/arm-neon/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/ios/arm-neon/vp9_rtcd.h
@@ -140,12 +140,12 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_neon
diff --git a/chromium/third_party/libvpx/source/config/ios/arm-neon/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/ios/arm-neon/vpx_dsp_rtcd.h
index 453e90e2a89..abd9cbd63d0 100644
--- a/chromium/third_party/libvpx/source/config/ios/arm-neon/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/ios/arm-neon/vpx_dsp_rtcd.h
@@ -235,349 +235,349 @@ void vpx_convolve_copy_neon(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_neon
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_neon
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_neon
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_neon
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_neon
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_neon
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_neon
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_neon
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_neon
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_neon
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_neon
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_neon
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_neon
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_neon
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_neon
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_neon
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_neon
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_neon
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_neon
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_neon
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_neon
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_neon
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_neon
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_neon
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_neon
@@ -621,13 +621,13 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -635,23 +635,23 @@ void vpx_get16x16var_neon(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_neon
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_neon
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -662,41 +662,41 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_neon
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_neon
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_neon
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_neon
@@ -723,7 +723,7 @@ void vpx_hadamard_8x8_neon(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_neon
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -996,12 +996,12 @@ void vpx_lpf_vertical_8_dual_neon(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_neon
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -1035,35 +1035,35 @@ void vpx_minmax_8x8_neon(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_neon
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_neon
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -1180,12 +1180,12 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_neon
@@ -1221,12 +1221,12 @@ unsigned int vpx_sad16x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_neon
@@ -1262,12 +1262,12 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_neon
@@ -1303,12 +1303,12 @@ unsigned int vpx_sad32x16_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_neon
@@ -1337,12 +1337,12 @@ unsigned int vpx_sad32x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_neon
@@ -1371,12 +1371,12 @@ unsigned int vpx_sad32x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_neon
@@ -1412,12 +1412,12 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_neon
@@ -1453,12 +1453,12 @@ unsigned int vpx_sad4x8_avg_neon(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_neon
@@ -1487,12 +1487,12 @@ unsigned int vpx_sad64x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_neon
@@ -1521,12 +1521,12 @@ unsigned int vpx_sad64x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_neon
@@ -1562,12 +1562,12 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_neon
@@ -1603,12 +1603,12 @@ unsigned int vpx_sad8x4_avg_neon(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_neon
@@ -1644,12 +1644,12 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_neon
@@ -1755,17 +1755,17 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1773,17 +1773,17 @@ uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_neon
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1791,17 +1791,17 @@ uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_neon
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1809,17 +1809,17 @@ uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_neon
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1827,17 +1827,17 @@ uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_neon
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1845,17 +1845,17 @@ uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_neon
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1863,17 +1863,17 @@ uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_neon
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1881,17 +1881,17 @@ uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_neon
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1899,17 +1899,17 @@ uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_neon
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1917,17 +1917,17 @@ uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_neon
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1935,17 +1935,17 @@ uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_neon
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1953,17 +1953,17 @@ uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_neon
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1971,17 +1971,17 @@ uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_neon
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1989,208 +1989,208 @@ uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_neon
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_neon
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_neon
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_neon
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_neon
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_neon
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_neon
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_neon
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_neon
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_neon
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_neon
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_neon
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_neon
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2219,243 +2219,243 @@ uint64_t vpx_sum_squares_2d_i16_neon(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_neon
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_neon
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_neon
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_neon
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_neon
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_neon
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_neon
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_neon
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_neon
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_neon
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_neon
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_neon
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_neon
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_neon
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_neon
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_neon
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_neon
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_neon
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_neon
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_neon
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_neon
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_neon
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/ios/arm64/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/ios/arm64/vp8_rtcd.h
index 737afd52c16..8f2d3e5c228 100644
--- a/chromium/third_party/libvpx/source/config/ios/arm64/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/ios/arm64/vp8_rtcd.h
@@ -27,68 +27,68 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_neon
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_neon
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_neon
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_neon
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -96,9 +96,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -106,9 +106,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -117,44 +117,44 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_neon
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_neon
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_neon
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_neon
@@ -196,11 +196,11 @@ int vp8_denoiser_filter_uv_neon(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_neon
@@ -230,8 +230,8 @@ void vp8_dequant_idct_add_y_block_neon(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_neon
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_neon
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -283,91 +283,91 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_neon
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_neon
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_neon
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_neon
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_neon
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_neon
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_mbhs_neon
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_mbvs_neon
@@ -381,8 +381,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -400,81 +400,81 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_neon
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_neon
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_neon
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_neon
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_neon
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_neon
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_neon
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_neon
diff --git a/chromium/third_party/libvpx/source/config/ios/arm64/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/ios/arm64/vp9_rtcd.h
index 309c7808745..cdfebccf233 100644
--- a/chromium/third_party/libvpx/source/config/ios/arm64/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/ios/arm64/vp9_rtcd.h
@@ -140,12 +140,12 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_neon
diff --git a/chromium/third_party/libvpx/source/config/ios/arm64/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/ios/arm64/vpx_dsp_rtcd.h
index 453e90e2a89..abd9cbd63d0 100644
--- a/chromium/third_party/libvpx/source/config/ios/arm64/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/ios/arm64/vpx_dsp_rtcd.h
@@ -235,349 +235,349 @@ void vpx_convolve_copy_neon(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_neon
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_neon
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_neon
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_neon
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_neon
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_neon
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_neon
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_neon
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_neon
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_neon
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_neon
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_neon
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_neon
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_neon
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_neon
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_neon
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_neon
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_neon
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_neon
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_neon
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_neon
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_neon
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_neon
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_neon
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_neon
@@ -621,13 +621,13 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -635,23 +635,23 @@ void vpx_get16x16var_neon(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_neon
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_neon
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -662,41 +662,41 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_neon
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_neon
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_neon
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_neon
@@ -723,7 +723,7 @@ void vpx_hadamard_8x8_neon(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_neon
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -996,12 +996,12 @@ void vpx_lpf_vertical_8_dual_neon(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_neon
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -1035,35 +1035,35 @@ void vpx_minmax_8x8_neon(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_neon
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_neon
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -1180,12 +1180,12 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_neon
@@ -1221,12 +1221,12 @@ unsigned int vpx_sad16x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_neon
@@ -1262,12 +1262,12 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_neon
@@ -1303,12 +1303,12 @@ unsigned int vpx_sad32x16_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_neon
@@ -1337,12 +1337,12 @@ unsigned int vpx_sad32x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_neon
@@ -1371,12 +1371,12 @@ unsigned int vpx_sad32x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_neon
@@ -1412,12 +1412,12 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_neon
@@ -1453,12 +1453,12 @@ unsigned int vpx_sad4x8_avg_neon(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_neon
@@ -1487,12 +1487,12 @@ unsigned int vpx_sad64x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_neon
@@ -1521,12 +1521,12 @@ unsigned int vpx_sad64x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_neon
@@ -1562,12 +1562,12 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_neon
@@ -1603,12 +1603,12 @@ unsigned int vpx_sad8x4_avg_neon(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_neon
@@ -1644,12 +1644,12 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_neon
@@ -1755,17 +1755,17 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1773,17 +1773,17 @@ uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_neon
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1791,17 +1791,17 @@ uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_neon
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1809,17 +1809,17 @@ uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_neon
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1827,17 +1827,17 @@ uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_neon
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1845,17 +1845,17 @@ uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_neon
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1863,17 +1863,17 @@ uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_neon
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1881,17 +1881,17 @@ uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_neon
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1899,17 +1899,17 @@ uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_neon
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1917,17 +1917,17 @@ uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_neon
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1935,17 +1935,17 @@ uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_neon
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1953,17 +1953,17 @@ uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_neon
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1971,17 +1971,17 @@ uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_neon
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1989,208 +1989,208 @@ uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_neon
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_neon
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_neon
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_neon
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_neon
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_neon
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_neon
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_neon
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_neon
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_neon
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_neon
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_neon
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_neon
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2219,243 +2219,243 @@ uint64_t vpx_sum_squares_2d_i16_neon(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_neon
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_neon
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_neon
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_neon
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_neon
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_neon
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_neon
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_neon
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_neon
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_neon
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_neon
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_neon
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_neon
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_neon
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_neon
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_neon
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_neon
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_neon
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_neon
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_neon
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_neon
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_neon
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp8_rtcd.h
index ca242d0d2ea..aa7a2070b68 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp8_rtcd.h
@@ -27,88 +27,88 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -116,9 +116,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -126,9 +126,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -137,58 +137,58 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
RTCD_EXTERN void (*vp8_copy_mem16x16)(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
RTCD_EXTERN void (*vp8_copy_mem8x4)(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
RTCD_EXTERN void (*vp8_copy_mem8x8)(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-RTCD_EXTERN void (*vp8_dc_only_idct_add)(short input,
- unsigned char* pred,
+RTCD_EXTERN void (*vp8_dc_only_idct_add)(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
int vp8_denoiser_filter_c(unsigned char* mc_running_avg_y,
@@ -243,15 +243,15 @@ RTCD_EXTERN int (*vp8_denoiser_filter_uv)(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
RTCD_EXTERN void (*vp8_dequant_idct_add)(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_uv_block_c(short* q,
@@ -289,9 +289,9 @@ RTCD_EXTERN void (*vp8_dequant_idct_add_y_block)(short* q,
int stride,
char* eobs);
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
-RTCD_EXTERN void (*vp8_dequantize_b)(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
+RTCD_EXTERN void (*vp8_dequantize_b)(struct blockd*, short* DQC);
int vp8_diamond_search_sad_c(struct macroblock* x,
struct block* b,
@@ -342,120 +342,120 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-RTCD_EXTERN void (*vp8_loop_filter_bh)(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_bh)(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-RTCD_EXTERN void (*vp8_loop_filter_bv)(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_bv)(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-RTCD_EXTERN void (*vp8_loop_filter_mbh)(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_mbh)(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-RTCD_EXTERN void (*vp8_loop_filter_mbv)(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_mbv)(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-RTCD_EXTERN void (*vp8_loop_filter_simple_bh)(unsigned char* y,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_simple_bh)(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-RTCD_EXTERN void (*vp8_loop_filter_simple_bv)(unsigned char* y,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_simple_bv)(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-RTCD_EXTERN void (*vp8_loop_filter_simple_mbh)(unsigned char* y,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_simple_mbh)(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-RTCD_EXTERN void (*vp8_loop_filter_simple_mbv)(unsigned char* y,
- int ystride,
+RTCD_EXTERN void (*vp8_loop_filter_simple_mbv)(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
int vp8_mbblock_error_c(struct macroblock* mb, int dc);
@@ -468,8 +468,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -487,106 +487,106 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
RTCD_EXTERN void (*vp8_short_fdct8x4)(short* input, short* output, int pitch);
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
RTCD_EXTERN void (*vp8_short_idct4x4llm)(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
-RTCD_EXTERN void (*vp8_short_inv_walsh4x4)(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
+RTCD_EXTERN void (*vp8_short_inv_walsh4x4)(short* input, short* mb_dqcoeff);
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
RTCD_EXTERN void (*vp8_short_walsh4x4)(short* input, short* output, int pitch);
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp9_rtcd.h
index 28fa6b5740c..e7c5c4ef4d4 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vp9_rtcd.h
@@ -162,16 +162,16 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
RTCD_EXTERN void (*vp9_iht16x16_256_add)(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht4x4_16_add_c(const tran_low_t* input,
diff --git a/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vpx_dsp_rtcd.h
index aea7215ee66..0f5ae3eb1f4 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm-neon-cpu-detect/vpx_dsp_rtcd.h
@@ -320,422 +320,422 @@ RTCD_EXTERN void (*vpx_convolve_copy)(const uint8_t* src,
int h);
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d135_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d135_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d135_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d135_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_128_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_128_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_128_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_128_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_left_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_left_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_left_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_left_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_top_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_top_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_top_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_dc_top_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
@@ -796,51 +796,51 @@ RTCD_EXTERN void (*vpx_fdct8x8_1)(const int16_t* input,
int stride);
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
RTCD_EXTERN unsigned int (*vpx_get4x4sse_cs)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get8x8var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -850,54 +850,54 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_h_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_h_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_h_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_h_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
@@ -927,7 +927,7 @@ RTCD_EXTERN void (*vpx_hadamard_8x8)(const int16_t* src_diff,
int16_t* coeff);
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -1289,17 +1289,17 @@ RTCD_EXTERN void (*vpx_lpf_vertical_8_dual)(uint8_t* s,
const uint8_t* limit1,
const uint8_t* thresh1);
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-RTCD_EXTERN void (*vpx_mbpost_proc_across_ip)(unsigned char* dst,
+RTCD_EXTERN void (*vpx_mbpost_proc_across_ip)(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -1341,39 +1341,39 @@ RTCD_EXTERN void (*vpx_minmax_8x8)(const uint8_t* s,
int* max);
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -1526,17 +1526,17 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad16x16x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1578,17 +1578,17 @@ RTCD_EXTERN unsigned int (*vpx_sad16x32_avg)(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad16x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1630,17 +1630,17 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad16x8x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1682,17 +1682,17 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x16x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1727,17 +1727,17 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1772,17 +1772,17 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1824,17 +1824,17 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad4x4x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1876,17 +1876,17 @@ RTCD_EXTERN unsigned int (*vpx_sad4x8_avg)(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad4x8x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1921,17 +1921,17 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -1966,17 +1966,17 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -2018,17 +2018,17 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad8x16x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -2070,17 +2070,17 @@ RTCD_EXTERN unsigned int (*vpx_sad8x4_avg)(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad8x4x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -2122,17 +2122,17 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad8x8x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -2247,625 +2247,625 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2902,319 +2902,319 @@ RTCD_EXTERN uint64_t (*vpx_sum_squares_2d_i16)(const int16_t* src,
int size);
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_tm_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_tm_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_tm_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_tm_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_v_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_v_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_v_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_v_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/arm-neon/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm-neon/vp8_rtcd.h
index 737afd52c16..8f2d3e5c228 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm-neon/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm-neon/vp8_rtcd.h
@@ -27,68 +27,68 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_neon
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_neon
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_neon
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_neon
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -96,9 +96,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -106,9 +106,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -117,44 +117,44 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_neon
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_neon
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_neon
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_neon
@@ -196,11 +196,11 @@ int vp8_denoiser_filter_uv_neon(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_neon
@@ -230,8 +230,8 @@ void vp8_dequant_idct_add_y_block_neon(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_neon
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_neon
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -283,91 +283,91 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_neon
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_neon
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_neon
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_neon
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_neon
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_neon
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_mbhs_neon
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_mbvs_neon
@@ -381,8 +381,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -400,81 +400,81 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_neon
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_neon
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_neon
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_neon
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_neon
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_neon
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_neon
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/arm-neon/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm-neon/vp9_rtcd.h
index 309c7808745..cdfebccf233 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm-neon/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm-neon/vp9_rtcd.h
@@ -140,12 +140,12 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/arm-neon/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm-neon/vpx_dsp_rtcd.h
index 453e90e2a89..abd9cbd63d0 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm-neon/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm-neon/vpx_dsp_rtcd.h
@@ -235,349 +235,349 @@ void vpx_convolve_copy_neon(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_neon
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_neon
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_neon
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_neon
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_neon
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_neon
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_neon
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_neon
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_neon
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_neon
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_neon
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_neon
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_neon
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_neon
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_neon
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_neon
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_neon
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_neon
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_neon
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_neon
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_neon
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_neon
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_neon
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_neon
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_neon
@@ -621,13 +621,13 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -635,23 +635,23 @@ void vpx_get16x16var_neon(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_neon
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_neon
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -662,41 +662,41 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_neon
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_neon
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_neon
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_neon
@@ -723,7 +723,7 @@ void vpx_hadamard_8x8_neon(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_neon
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -996,12 +996,12 @@ void vpx_lpf_vertical_8_dual_neon(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_neon
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -1035,35 +1035,35 @@ void vpx_minmax_8x8_neon(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_neon
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_neon
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -1180,12 +1180,12 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_neon
@@ -1221,12 +1221,12 @@ unsigned int vpx_sad16x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_neon
@@ -1262,12 +1262,12 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_neon
@@ -1303,12 +1303,12 @@ unsigned int vpx_sad32x16_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_neon
@@ -1337,12 +1337,12 @@ unsigned int vpx_sad32x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_neon
@@ -1371,12 +1371,12 @@ unsigned int vpx_sad32x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_neon
@@ -1412,12 +1412,12 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_neon
@@ -1453,12 +1453,12 @@ unsigned int vpx_sad4x8_avg_neon(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_neon
@@ -1487,12 +1487,12 @@ unsigned int vpx_sad64x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_neon
@@ -1521,12 +1521,12 @@ unsigned int vpx_sad64x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_neon
@@ -1562,12 +1562,12 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_neon
@@ -1603,12 +1603,12 @@ unsigned int vpx_sad8x4_avg_neon(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_neon
@@ -1644,12 +1644,12 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_neon
@@ -1755,17 +1755,17 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1773,17 +1773,17 @@ uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_neon
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1791,17 +1791,17 @@ uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_neon
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1809,17 +1809,17 @@ uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_neon
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1827,17 +1827,17 @@ uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_neon
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1845,17 +1845,17 @@ uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_neon
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1863,17 +1863,17 @@ uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_neon
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1881,17 +1881,17 @@ uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_neon
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1899,17 +1899,17 @@ uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_neon
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1917,17 +1917,17 @@ uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_neon
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1935,17 +1935,17 @@ uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_neon
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1953,17 +1953,17 @@ uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_neon
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1971,17 +1971,17 @@ uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_neon
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1989,208 +1989,208 @@ uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_neon
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_neon
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_neon
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_neon
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_neon
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_neon
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_neon
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_neon
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_neon
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_neon
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_neon
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_neon
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_neon
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2219,243 +2219,243 @@ uint64_t vpx_sum_squares_2d_i16_neon(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_neon
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_neon
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_neon
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_neon
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_neon
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_neon
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_neon
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_neon
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_neon
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_neon
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_neon
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_neon
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_neon
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_neon
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_neon
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_neon
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_neon
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_neon
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_neon
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_neon
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_neon
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_neon
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/arm/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm/vp8_rtcd.h
index a84e3b3456c..5ec67483b65 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm/vp8_rtcd.h
@@ -27,44 +27,44 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -72,9 +72,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -82,9 +82,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -93,27 +93,27 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_c
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_c
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_c
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c
@@ -139,7 +139,7 @@ int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_c
@@ -158,7 +158,7 @@ void vp8_dequant_idct_add_y_block_c(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_c
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -209,55 +209,55 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_c
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_c
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c
@@ -271,8 +271,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -288,50 +288,50 @@ void vp8_short_fdct8x4_c(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_c
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_c
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c
diff --git a/chromium/third_party/libvpx/source/config/linux/arm/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm/vp9_rtcd.h
index 478101ff7e8..688066889c7 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm/vp9_rtcd.h
@@ -115,8 +115,8 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
diff --git a/chromium/third_party/libvpx/source/config/linux/arm/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm/vpx_dsp_rtcd.h
index 063a8c70be0..dc7b59b79f8 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm/vpx_dsp_rtcd.h
@@ -139,253 +139,253 @@ void vpx_convolve_copy_c(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_c
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c
@@ -418,7 +418,7 @@ void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_c
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -426,13 +426,13 @@ void vpx_get16x16var_c(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_c
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -443,25 +443,25 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c
@@ -482,7 +482,7 @@ void vpx_hadamard_8x8_c(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_c
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -643,7 +643,7 @@ void vpx_lpf_vertical_8_dual_c(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -666,30 +666,30 @@ void vpx_minmax_8x8_c(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_c
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_c
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -764,7 +764,7 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_c
@@ -791,7 +791,7 @@ unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_c
@@ -818,7 +818,7 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_c
@@ -845,7 +845,7 @@ unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_c
@@ -865,7 +865,7 @@ unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_c
@@ -885,7 +885,7 @@ unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_c
@@ -912,7 +912,7 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_c
@@ -939,7 +939,7 @@ unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_c
@@ -959,7 +959,7 @@ unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_c
@@ -979,7 +979,7 @@ unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_c
@@ -1006,7 +1006,7 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_c
@@ -1033,7 +1033,7 @@ unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_c
@@ -1060,7 +1060,7 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_c
@@ -1154,9 +1154,9 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1164,9 +1164,9 @@ uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1174,9 +1174,9 @@ uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1184,9 +1184,9 @@ uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1194,9 +1194,9 @@ uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1204,9 +1204,9 @@ uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1214,9 +1214,9 @@ uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1224,9 +1224,9 @@ uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1234,9 +1234,9 @@ uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1244,9 +1244,9 @@ uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1254,9 +1254,9 @@ uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1264,9 +1264,9 @@ uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1274,9 +1274,9 @@ uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1284,117 +1284,117 @@ uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1414,146 +1414,146 @@ uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_c
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_c
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_c
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_c
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_c
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_c
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_c
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_c
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_c
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_c
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_c
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_c
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_c
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/arm64/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm64/vp8_rtcd.h
index 737afd52c16..8f2d3e5c228 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm64/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm64/vp8_rtcd.h
@@ -27,68 +27,68 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_neon
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_neon
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_neon
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_neon
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -96,9 +96,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -106,9 +106,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -117,44 +117,44 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_neon
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_neon
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_neon
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_neon
@@ -196,11 +196,11 @@ int vp8_denoiser_filter_uv_neon(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_neon
@@ -230,8 +230,8 @@ void vp8_dequant_idct_add_y_block_neon(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_neon
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_neon
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -283,91 +283,91 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_neon
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_neon
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_neon
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_neon
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_neon
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_neon
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_mbhs_neon
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_mbvs_neon
@@ -381,8 +381,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -400,81 +400,81 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_neon
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_neon
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_neon
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_neon
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_neon
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_neon
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_neon
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/arm64/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm64/vp9_rtcd.h
index 309c7808745..cdfebccf233 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm64/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm64/vp9_rtcd.h
@@ -140,12 +140,12 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/arm64/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/arm64/vpx_dsp_rtcd.h
index 453e90e2a89..abd9cbd63d0 100644
--- a/chromium/third_party/libvpx/source/config/linux/arm64/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/arm64/vpx_dsp_rtcd.h
@@ -235,349 +235,349 @@ void vpx_convolve_copy_neon(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_neon
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_neon
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_neon
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_neon
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_neon
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_neon
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_neon
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_neon
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_neon
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_neon
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_neon
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_neon
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_neon
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_neon
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_neon
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_neon
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_neon
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_neon
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_neon
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_neon
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_neon
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_neon
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_neon
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_neon
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_neon
@@ -621,13 +621,13 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -635,23 +635,23 @@ void vpx_get16x16var_neon(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_neon
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_neon
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -662,41 +662,41 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_neon
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_neon
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_neon
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_neon
@@ -723,7 +723,7 @@ void vpx_hadamard_8x8_neon(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_neon
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -996,12 +996,12 @@ void vpx_lpf_vertical_8_dual_neon(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_neon
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -1035,35 +1035,35 @@ void vpx_minmax_8x8_neon(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_neon
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_neon
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -1180,12 +1180,12 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_neon
@@ -1221,12 +1221,12 @@ unsigned int vpx_sad16x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_neon
@@ -1262,12 +1262,12 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_neon
@@ -1303,12 +1303,12 @@ unsigned int vpx_sad32x16_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_neon
@@ -1337,12 +1337,12 @@ unsigned int vpx_sad32x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_neon
@@ -1371,12 +1371,12 @@ unsigned int vpx_sad32x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_neon
@@ -1412,12 +1412,12 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_neon
@@ -1453,12 +1453,12 @@ unsigned int vpx_sad4x8_avg_neon(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_neon
@@ -1487,12 +1487,12 @@ unsigned int vpx_sad64x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_neon
@@ -1521,12 +1521,12 @@ unsigned int vpx_sad64x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_neon
@@ -1562,12 +1562,12 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_neon
@@ -1603,12 +1603,12 @@ unsigned int vpx_sad8x4_avg_neon(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_neon
@@ -1644,12 +1644,12 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_neon
@@ -1755,17 +1755,17 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1773,17 +1773,17 @@ uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_neon
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1791,17 +1791,17 @@ uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_neon
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1809,17 +1809,17 @@ uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_neon
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1827,17 +1827,17 @@ uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_neon
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1845,17 +1845,17 @@ uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_neon
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1863,17 +1863,17 @@ uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_neon
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1881,17 +1881,17 @@ uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_neon
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1899,17 +1899,17 @@ uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_neon
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1917,17 +1917,17 @@ uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_neon
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1935,17 +1935,17 @@ uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_neon
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1953,17 +1953,17 @@ uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_neon
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1971,17 +1971,17 @@ uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_neon
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1989,208 +1989,208 @@ uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_neon
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_neon
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_neon
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_neon
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_neon
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_neon
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_neon
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_neon
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_neon
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_neon
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_neon
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_neon
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_neon
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2219,243 +2219,243 @@ uint64_t vpx_sum_squares_2d_i16_neon(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_neon
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_neon
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_neon
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_neon
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_neon
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_neon
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_neon
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_neon
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_neon
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_neon
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_neon
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_neon
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_neon
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_neon
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_neon
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_neon
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_neon
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_neon
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_neon
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_neon
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_neon
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_neon
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp8_rtcd.h
index 737afd52c16..8f2d3e5c228 100644
--- a/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp8_rtcd.h
@@ -27,68 +27,68 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_neon
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_neon
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_neon
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_neon
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -96,9 +96,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -106,9 +106,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -117,44 +117,44 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_neon
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_neon
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_neon
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_neon
@@ -196,11 +196,11 @@ int vp8_denoiser_filter_uv_neon(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_neon
@@ -230,8 +230,8 @@ void vp8_dequant_idct_add_y_block_neon(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_neon
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_neon
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -283,91 +283,91 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_neon
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_neon
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_neon
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_neon
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_neon
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_neon
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_mbhs_neon
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_mbvs_neon
@@ -381,8 +381,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -400,81 +400,81 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_neon
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_neon
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_neon
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_neon
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_neon
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_neon
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_neon
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp9_rtcd.h
index 574cd7a7d94..a88b5f02691 100644
--- a/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vp9_rtcd.h
@@ -165,13 +165,13 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_neon(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_neon
@@ -262,12 +262,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vpx_dsp_rtcd.h
index 13fe98c482b..50b633be4ae 100644
--- a/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/chromeos-arm-neon/vpx_dsp_rtcd.h
@@ -235,349 +235,349 @@ void vpx_convolve_copy_neon(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_neon
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_neon
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_neon
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_neon
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_neon
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_neon
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_neon
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_neon
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_neon
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_neon
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_neon
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_neon
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_neon
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_neon
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_neon
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_neon
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_neon
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_neon
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_neon
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_neon
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_neon
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_neon
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_neon
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_neon
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_neon
@@ -621,13 +621,13 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -635,23 +635,23 @@ void vpx_get16x16var_neon(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_neon
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_neon
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -662,41 +662,41 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_neon
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_neon
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_neon
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_neon
@@ -723,13 +723,13 @@ void vpx_hadamard_8x8_neon(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_neon
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -737,7 +737,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -745,38 +745,38 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -786,9 +786,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -797,9 +797,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
vpx_highbd_10_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -809,9 +809,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -821,9 +821,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -833,9 +833,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -844,9 +844,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
vpx_highbd_10_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -855,9 +855,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -867,9 +867,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -879,9 +879,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -890,9 +890,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
vpx_highbd_10_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -901,9 +901,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -912,9 +912,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -923,9 +923,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -933,9 +933,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_c
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -943,9 +943,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -953,9 +953,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_c
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -963,9 +963,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_c
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -973,9 +973,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_c
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -983,9 +983,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -993,9 +993,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1003,9 +1003,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1013,9 +1013,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_c
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1023,9 +1023,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1033,9 +1033,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1043,9 +1043,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1053,98 +1053,98 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_c
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1152,7 +1152,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1160,38 +1160,38 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1201,9 +1201,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1212,9 +1212,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
vpx_highbd_12_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1224,9 +1224,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1236,9 +1236,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1248,9 +1248,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1259,9 +1259,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
vpx_highbd_12_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1270,9 +1270,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1282,9 +1282,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1294,9 +1294,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1305,9 +1305,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
vpx_highbd_12_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1316,9 +1316,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1327,9 +1327,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1338,9 +1338,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1348,9 +1348,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_c
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1358,9 +1358,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1368,9 +1368,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_c
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1378,9 +1378,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_c
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1388,9 +1388,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_c
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1398,9 +1398,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1408,9 +1408,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1418,9 +1418,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_c
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1438,9 +1438,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1448,9 +1448,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1458,9 +1458,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1468,98 +1468,98 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_c
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1567,7 +1567,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1575,37 +1575,37 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1614,9 +1614,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1625,9 +1625,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1636,9 +1636,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1647,9 +1647,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1658,9 +1658,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1669,9 +1669,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1680,9 +1680,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1691,9 +1691,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1702,9 +1702,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1713,9 +1713,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1724,9 +1724,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1735,9 +1735,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1746,9 +1746,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1756,9 +1756,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1766,9 +1766,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1776,9 +1776,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1786,9 +1786,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1796,9 +1796,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1806,27 +1806,27 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1834,9 +1834,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1844,9 +1844,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1854,118 +1854,118 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -1987,7 +1987,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -1999,7 +1999,7 @@ void vpx_highbd_convolve8_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8 vpx_highbd_convolve8_neon
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
@@ -2013,7 +2013,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2025,7 +2025,7 @@ void vpx_highbd_convolve8_avg_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_neon
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
@@ -2039,7 +2039,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2051,7 +2051,7 @@ void vpx_highbd_convolve8_avg_horiz_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_neon
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
@@ -2065,7 +2065,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2077,7 +2077,7 @@ void vpx_highbd_convolve8_avg_vert_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_neon
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
@@ -2091,7 +2091,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2103,7 +2103,7 @@ void vpx_highbd_convolve8_horiz_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_neon
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
@@ -2117,7 +2117,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2129,7 +2129,7 @@ void vpx_highbd_convolve8_vert_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_neon
void vpx_highbd_convolve_avg_c(const uint16_t* src,
@@ -2143,7 +2143,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2155,7 +2155,7 @@ void vpx_highbd_convolve_avg_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_neon
void vpx_highbd_convolve_copy_c(const uint16_t* src,
@@ -2169,7 +2169,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2181,272 +2181,272 @@ void vpx_highbd_convolve_copy_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_neon
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_neon
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_neon
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_neon
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_neon
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_neon
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_neon
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_neon
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_neon
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_neon
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_neon
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_neon
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_neon
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2454,12 +2454,12 @@ void vpx_highbd_dc_left_predictor_16x16_neon(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_neon
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2467,120 +2467,120 @@ void vpx_highbd_dc_left_predictor_32x32_neon(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_neon
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_neon
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_neon
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_neon
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_neon
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_neon
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_neon
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_neon
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_neon
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_neon
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2624,53 +2624,68 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_highbd_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_neon
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_neon
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_neon
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_neon
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -3005,9 +3020,9 @@ void vpx_highbd_lpf_vertical_8_dual_neon(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_neon
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -3058,7 +3073,7 @@ unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c
@@ -3078,7 +3093,7 @@ unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c
@@ -3098,7 +3113,7 @@ unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c
@@ -3118,7 +3133,7 @@ unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c
@@ -3138,7 +3153,7 @@ unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c
@@ -3158,7 +3173,7 @@ unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c
@@ -3178,7 +3193,7 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c
@@ -3198,7 +3213,7 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c
@@ -3218,7 +3233,7 @@ unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c
@@ -3238,7 +3253,7 @@ unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c
@@ -3258,7 +3273,7 @@ unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c
@@ -3278,7 +3293,7 @@ unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c
@@ -3298,7 +3313,7 @@ unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c
@@ -3307,104 +3322,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_neon
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_neon
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_neon
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_neon
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_neon
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_neon
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_neon
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3678,12 +3693,12 @@ void vpx_lpf_vertical_8_dual_neon(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_neon
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -3717,35 +3732,35 @@ void vpx_minmax_8x8_neon(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_neon
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_neon
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -3862,12 +3877,12 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_neon
@@ -3903,12 +3918,12 @@ unsigned int vpx_sad16x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_neon
@@ -3944,12 +3959,12 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_neon
@@ -3985,12 +4000,12 @@ unsigned int vpx_sad32x16_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_neon
@@ -4019,12 +4034,12 @@ unsigned int vpx_sad32x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_neon
@@ -4053,12 +4068,12 @@ unsigned int vpx_sad32x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_neon
@@ -4094,12 +4109,12 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_neon
@@ -4135,12 +4150,12 @@ unsigned int vpx_sad4x8_avg_neon(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_neon
@@ -4169,12 +4184,12 @@ unsigned int vpx_sad64x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_neon
@@ -4203,12 +4218,12 @@ unsigned int vpx_sad64x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_neon
@@ -4244,12 +4259,12 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_neon
@@ -4285,12 +4300,12 @@ unsigned int vpx_sad8x4_avg_neon(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_neon
@@ -4326,12 +4341,12 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_neon
@@ -4437,17 +4452,17 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4455,17 +4470,17 @@ uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_neon
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4473,17 +4488,17 @@ uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_neon
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4491,17 +4506,17 @@ uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_neon
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4509,17 +4524,17 @@ uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_neon
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4527,17 +4542,17 @@ uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_neon
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4545,17 +4560,17 @@ uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_neon
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4563,17 +4578,17 @@ uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_neon
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4581,17 +4596,17 @@ uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_neon
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4599,17 +4614,17 @@ uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_neon
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4617,17 +4632,17 @@ uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_neon
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4635,17 +4650,17 @@ uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_neon
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4653,17 +4668,17 @@ uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_neon
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4671,208 +4686,208 @@ uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_neon
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_neon
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_neon
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_neon
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_neon
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_neon
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_neon
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_neon
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_neon
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_neon
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_neon
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_neon
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_neon
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -4901,243 +4916,243 @@ uint64_t vpx_sum_squares_2d_i16_neon(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_neon
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_neon
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_neon
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_neon
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_neon
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_neon
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_neon
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_neon
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_neon
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_neon
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_neon
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_neon
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_neon
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_neon
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_neon
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_neon
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_neon
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_neon
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_neon
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_neon
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_neon
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_neon
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp8_rtcd.h
index 737afd52c16..8f2d3e5c228 100644
--- a/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp8_rtcd.h
@@ -27,68 +27,68 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_neon
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_neon
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_neon
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_neon
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -96,9 +96,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -106,9 +106,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -117,44 +117,44 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_neon
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_neon
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_neon(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_neon
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_neon(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_neon(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_neon
@@ -196,11 +196,11 @@ int vp8_denoiser_filter_uv_neon(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_neon(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_neon
@@ -230,8 +230,8 @@ void vp8_dequant_idct_add_y_block_neon(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_neon
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_neon(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_neon(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_neon
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -283,91 +283,91 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_neon
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_neon
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_neon
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_neon(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_neon(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_neon
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_neon
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_neon
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbhs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbhs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_mbhs_neon
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_mbvs_neon(unsigned char* y,
- int ystride,
+void vp8_loop_filter_mbvs_neon(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_mbvs_neon
@@ -381,8 +381,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -400,81 +400,81 @@ void vp8_short_fdct8x4_neon(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_neon
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_neon(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_neon
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_neon(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_neon(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_neon
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_neon(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_neon
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_neon
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_neon
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_neon
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_neon(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_neon(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp9_rtcd.h
index 574cd7a7d94..a88b5f02691 100644
--- a/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vp9_rtcd.h
@@ -165,13 +165,13 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_neon(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_neon
@@ -262,12 +262,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_neon(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_neon
diff --git a/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vpx_dsp_rtcd.h
index 13fe98c482b..50b633be4ae 100644
--- a/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/chromeos-arm64/vpx_dsp_rtcd.h
@@ -235,349 +235,349 @@ void vpx_convolve_copy_neon(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_neon
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_neon
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_neon
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_neon
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d135_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_neon
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_neon
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_neon
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_neon
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_neon
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_neon
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_neon
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_neon
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_neon
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_neon
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_neon
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_neon
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_neon
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_neon
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_neon
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_neon
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_neon
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_neon
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_neon
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_neon
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_neon
@@ -621,13 +621,13 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -635,23 +635,23 @@ void vpx_get16x16var_neon(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_neon
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
unsigned int vpx_get4x4sse_cs_neon(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_neon
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -662,41 +662,41 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_neon
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_neon
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_neon
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_neon
@@ -723,13 +723,13 @@ void vpx_hadamard_8x8_neon(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_neon
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -737,7 +737,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -745,38 +745,38 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -786,9 +786,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -797,9 +797,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
vpx_highbd_10_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -809,9 +809,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -821,9 +821,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -833,9 +833,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -844,9 +844,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
vpx_highbd_10_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -855,9 +855,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -867,9 +867,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -879,9 +879,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -890,9 +890,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
vpx_highbd_10_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -901,9 +901,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -912,9 +912,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -923,9 +923,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -933,9 +933,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_c
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -943,9 +943,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -953,9 +953,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_c
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -963,9 +963,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_c
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -973,9 +973,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_c
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -983,9 +983,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -993,9 +993,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1003,9 +1003,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1013,9 +1013,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_c
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1023,9 +1023,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1033,9 +1033,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1043,9 +1043,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1053,98 +1053,98 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_c
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1152,7 +1152,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1160,38 +1160,38 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1201,9 +1201,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1212,9 +1212,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
vpx_highbd_12_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1224,9 +1224,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1236,9 +1236,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1248,9 +1248,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1259,9 +1259,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
vpx_highbd_12_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1270,9 +1270,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1282,9 +1282,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1294,9 +1294,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1305,9 +1305,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
vpx_highbd_12_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1316,9 +1316,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1327,9 +1327,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1338,9 +1338,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1348,9 +1348,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_c
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1358,9 +1358,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1368,9 +1368,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_c
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1378,9 +1378,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_c
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1388,9 +1388,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_c
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1398,9 +1398,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1408,9 +1408,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1418,9 +1418,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_c
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1438,9 +1438,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1448,9 +1448,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1458,9 +1458,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1468,98 +1468,98 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_c
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1567,7 +1567,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1575,37 +1575,37 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1614,9 +1614,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1625,9 +1625,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1636,9 +1636,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1647,9 +1647,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1658,9 +1658,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1669,9 +1669,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1680,9 +1680,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1691,9 +1691,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1702,9 +1702,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1713,9 +1713,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1724,9 +1724,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1735,9 +1735,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1746,9 +1746,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1756,9 +1756,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1766,9 +1766,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1776,9 +1776,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1786,9 +1786,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1796,9 +1796,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1806,27 +1806,27 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1834,9 +1834,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1844,9 +1844,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1854,118 +1854,118 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -1987,7 +1987,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -1999,7 +1999,7 @@ void vpx_highbd_convolve8_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8 vpx_highbd_convolve8_neon
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
@@ -2013,7 +2013,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2025,7 +2025,7 @@ void vpx_highbd_convolve8_avg_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_neon
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
@@ -2039,7 +2039,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2051,7 +2051,7 @@ void vpx_highbd_convolve8_avg_horiz_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_neon
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
@@ -2065,7 +2065,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2077,7 +2077,7 @@ void vpx_highbd_convolve8_avg_vert_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_neon
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
@@ -2091,7 +2091,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2103,7 +2103,7 @@ void vpx_highbd_convolve8_horiz_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_neon
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
@@ -2117,7 +2117,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2129,7 +2129,7 @@ void vpx_highbd_convolve8_vert_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_neon
void vpx_highbd_convolve_avg_c(const uint16_t* src,
@@ -2143,7 +2143,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2155,7 +2155,7 @@ void vpx_highbd_convolve_avg_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_neon
void vpx_highbd_convolve_copy_c(const uint16_t* src,
@@ -2169,7 +2169,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_neon(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2181,272 +2181,272 @@ void vpx_highbd_convolve_copy_neon(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_neon
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_neon
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_neon
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_neon
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_neon
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_neon
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_neon
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_neon
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_neon
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_neon
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_neon
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_neon
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_neon
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2454,12 +2454,12 @@ void vpx_highbd_dc_left_predictor_16x16_neon(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_neon
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2467,120 +2467,120 @@ void vpx_highbd_dc_left_predictor_32x32_neon(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_neon
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_neon
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_neon
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_neon
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_neon
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_neon
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_neon
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_neon
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_neon
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_neon
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2624,53 +2624,68 @@ void vpx_fdct8x8_1_neon(const int16_t* input, tran_low_t* output, int stride);
#define vpx_highbd_fdct8x8_1 vpx_fdct8x8_1_neon
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_neon
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_neon
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_neon
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_neon
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -3005,9 +3020,9 @@ void vpx_highbd_lpf_vertical_8_dual_neon(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_neon
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -3058,7 +3073,7 @@ unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c
@@ -3078,7 +3093,7 @@ unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c
@@ -3098,7 +3113,7 @@ unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c
@@ -3118,7 +3133,7 @@ unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c
@@ -3138,7 +3153,7 @@ unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c
@@ -3158,7 +3173,7 @@ unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c
@@ -3178,7 +3193,7 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c
@@ -3198,7 +3213,7 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c
@@ -3218,7 +3233,7 @@ unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c
@@ -3238,7 +3253,7 @@ unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c
@@ -3258,7 +3273,7 @@ unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c
@@ -3278,7 +3293,7 @@ unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c
@@ -3298,7 +3313,7 @@ unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c
@@ -3307,104 +3322,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_neon
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_neon
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_neon
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_neon
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_neon
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_neon
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_neon
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_neon(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3678,12 +3693,12 @@ void vpx_lpf_vertical_8_dual_neon(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_neon
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_neon(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_neon(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -3717,35 +3732,35 @@ void vpx_minmax_8x8_neon(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_neon
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_neon
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -3862,12 +3877,12 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_neon
@@ -3903,12 +3918,12 @@ unsigned int vpx_sad16x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_neon
@@ -3944,12 +3959,12 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_neon
@@ -3985,12 +4000,12 @@ unsigned int vpx_sad32x16_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_neon
@@ -4019,12 +4034,12 @@ unsigned int vpx_sad32x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_neon
@@ -4053,12 +4068,12 @@ unsigned int vpx_sad32x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_neon
@@ -4094,12 +4109,12 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_neon
@@ -4135,12 +4150,12 @@ unsigned int vpx_sad4x8_avg_neon(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_neon
@@ -4169,12 +4184,12 @@ unsigned int vpx_sad64x32_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_neon
@@ -4203,12 +4218,12 @@ unsigned int vpx_sad64x64_avg_neon(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_neon
@@ -4244,12 +4259,12 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_neon
@@ -4285,12 +4300,12 @@ unsigned int vpx_sad8x4_avg_neon(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_neon
@@ -4326,12 +4341,12 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_neon(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_neon
@@ -4437,17 +4452,17 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4455,17 +4470,17 @@ uint32_t vpx_sub_pixel_avg_variance16x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_neon
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4473,17 +4488,17 @@ uint32_t vpx_sub_pixel_avg_variance16x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_neon
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4491,17 +4506,17 @@ uint32_t vpx_sub_pixel_avg_variance16x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_neon
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4509,17 +4524,17 @@ uint32_t vpx_sub_pixel_avg_variance32x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_neon
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4527,17 +4542,17 @@ uint32_t vpx_sub_pixel_avg_variance32x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_neon
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4545,17 +4560,17 @@ uint32_t vpx_sub_pixel_avg_variance32x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_neon
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4563,17 +4578,17 @@ uint32_t vpx_sub_pixel_avg_variance4x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_neon
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4581,17 +4596,17 @@ uint32_t vpx_sub_pixel_avg_variance4x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_neon
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4599,17 +4614,17 @@ uint32_t vpx_sub_pixel_avg_variance64x32_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_neon
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4617,17 +4632,17 @@ uint32_t vpx_sub_pixel_avg_variance64x64_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_neon
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4635,17 +4650,17 @@ uint32_t vpx_sub_pixel_avg_variance8x16_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_neon
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4653,17 +4668,17 @@ uint32_t vpx_sub_pixel_avg_variance8x4_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_neon
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -4671,208 +4686,208 @@ uint32_t vpx_sub_pixel_avg_variance8x8_neon(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_neon
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_neon
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_neon
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_neon
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_neon
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_neon
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_neon
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_neon
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_neon
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_neon
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_neon
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_neon
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_neon
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -4901,243 +4916,243 @@ uint64_t vpx_sum_squares_2d_i16_neon(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_neon
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_neon
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_neon
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_neon
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_neon
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_neon
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_neon
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_neon
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_neon(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_neon
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_neon
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_neon
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_neon
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_neon
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_neon
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_neon
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_neon
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_neon
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_neon
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_neon
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_neon
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_neon
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_neon(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_neon
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/generic/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/generic/vp8_rtcd.h
index dc054d6b36a..6e6147d3faa 100644
--- a/chromium/third_party/libvpx/source/config/linux/generic/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/generic/vp8_rtcd.h
@@ -27,44 +27,44 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -72,9 +72,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -82,9 +82,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -93,27 +93,27 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_c
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_c
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_c
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c
@@ -139,7 +139,7 @@ int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_c
@@ -158,7 +158,7 @@ void vp8_dequant_idct_add_y_block_c(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_c
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -209,55 +209,55 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_c
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_c
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c
@@ -271,8 +271,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -288,50 +288,50 @@ void vp8_short_fdct8x4_c(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_c
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_c
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c
diff --git a/chromium/third_party/libvpx/source/config/linux/generic/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/generic/vp9_rtcd.h
index 289a739928c..5ee904c571b 100644
--- a/chromium/third_party/libvpx/source/config/linux/generic/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/generic/vp9_rtcd.h
@@ -143,8 +143,8 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_c
@@ -225,8 +225,8 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
diff --git a/chromium/third_party/libvpx/source/config/linux/generic/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/generic/vpx_dsp_rtcd.h
index 29706b564b5..f8a910d67cc 100644
--- a/chromium/third_party/libvpx/source/config/linux/generic/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/generic/vpx_dsp_rtcd.h
@@ -139,253 +139,253 @@ void vpx_convolve_copy_c(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_c
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c
@@ -418,7 +418,7 @@ void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_c
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -426,13 +426,13 @@ void vpx_get16x16var_c(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_c
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -443,25 +443,25 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c
@@ -482,13 +482,13 @@ void vpx_hadamard_8x8_c(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_c
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -496,7 +496,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -504,38 +504,38 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -545,9 +545,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -556,9 +556,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
vpx_highbd_10_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -568,9 +568,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -580,9 +580,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -592,9 +592,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -603,9 +603,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
vpx_highbd_10_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -614,9 +614,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -626,9 +626,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -638,9 +638,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -649,9 +649,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
vpx_highbd_10_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -660,9 +660,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -671,9 +671,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -682,9 +682,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -692,9 +692,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_c
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -702,9 +702,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -712,9 +712,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_c
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -722,9 +722,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_c
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -732,9 +732,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_c
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -742,9 +742,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -752,9 +752,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -762,9 +762,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -772,9 +772,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_c
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -782,9 +782,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -792,9 +792,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -802,9 +802,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -812,98 +812,98 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_c
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -911,7 +911,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -919,38 +919,38 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -960,9 +960,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -971,9 +971,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
vpx_highbd_12_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -983,9 +983,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -995,9 +995,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1007,9 +1007,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1018,9 +1018,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
vpx_highbd_12_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1029,9 +1029,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1041,9 +1041,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1053,9 +1053,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1064,9 +1064,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
vpx_highbd_12_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1075,9 +1075,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1086,9 +1086,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1097,9 +1097,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1107,9 +1107,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_c
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1117,9 +1117,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1127,9 +1127,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_c
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1137,9 +1137,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_c
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1147,9 +1147,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_c
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1157,9 +1157,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1167,9 +1167,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1177,9 +1177,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1187,9 +1187,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_c
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1197,9 +1197,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1207,9 +1207,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1217,9 +1217,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1227,98 +1227,98 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_c
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1326,7 +1326,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1334,37 +1334,37 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1373,9 +1373,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1384,9 +1384,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1395,9 +1395,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1406,9 +1406,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1417,9 +1417,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1439,9 +1439,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1450,9 +1450,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1461,9 +1461,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1472,9 +1472,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1483,9 +1483,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1494,9 +1494,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1505,9 +1505,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1515,9 +1515,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1525,9 +1525,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1535,9 +1535,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1545,9 +1545,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1555,9 +1555,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1565,27 +1565,27 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1593,9 +1593,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1603,9 +1603,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1613,118 +1613,118 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -1746,7 +1746,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8 vpx_highbd_convolve8_c
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
@@ -1760,7 +1760,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_c
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
@@ -1774,7 +1774,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_c
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
@@ -1788,7 +1788,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_c
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
@@ -1802,7 +1802,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_c
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
@@ -1816,7 +1816,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_c
void vpx_highbd_convolve_avg_c(const uint16_t* src,
@@ -1830,7 +1830,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_c
void vpx_highbd_convolve_copy_c(const uint16_t* src,
@@ -1844,284 +1844,284 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_c
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_c
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_c
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_c
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_c
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_c
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_c
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_c
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_c
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_c
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_c
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_c
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_c
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_16x16 vpx_highbd_dc_left_predictor_16x16_c
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_32x32 vpx_highbd_dc_left_predictor_32x32_c
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_c
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_c
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_c
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_c
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_c
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_c
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_c
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_c
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_c
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2164,33 +2164,48 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_c
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_c
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_c
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_c
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -2389,9 +2404,9 @@ void vpx_highbd_lpf_vertical_8_dual_c(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_c
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -2442,7 +2457,7 @@ unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c
@@ -2462,7 +2477,7 @@ unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c
@@ -2482,7 +2497,7 @@ unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c
@@ -2502,7 +2517,7 @@ unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c
@@ -2522,7 +2537,7 @@ unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c
@@ -2542,7 +2557,7 @@ unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c
@@ -2562,7 +2577,7 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c
@@ -2582,7 +2597,7 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c
@@ -2602,7 +2617,7 @@ unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c
@@ -2622,7 +2637,7 @@ unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c
@@ -2642,7 +2657,7 @@ unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c
@@ -2662,7 +2677,7 @@ unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c
@@ -2682,7 +2697,7 @@ unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c
@@ -2691,64 +2706,64 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_c
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_c
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_c
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_c
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_c
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_c
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_c
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2910,7 +2925,7 @@ void vpx_lpf_vertical_8_dual_c(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -2933,30 +2948,30 @@ void vpx_minmax_8x8_c(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_c
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_c
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -3031,7 +3046,7 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_c
@@ -3058,7 +3073,7 @@ unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_c
@@ -3085,7 +3100,7 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_c
@@ -3112,7 +3127,7 @@ unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_c
@@ -3132,7 +3147,7 @@ unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_c
@@ -3152,7 +3167,7 @@ unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_c
@@ -3179,7 +3194,7 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_c
@@ -3206,7 +3221,7 @@ unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_c
@@ -3226,7 +3241,7 @@ unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_c
@@ -3246,7 +3261,7 @@ unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_c
@@ -3273,7 +3288,7 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_c
@@ -3300,7 +3315,7 @@ unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_c
@@ -3327,7 +3342,7 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_c
@@ -3421,9 +3436,9 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3431,9 +3446,9 @@ uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3441,9 +3456,9 @@ uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3451,9 +3466,9 @@ uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3461,9 +3476,9 @@ uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3471,9 +3486,9 @@ uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3481,9 +3496,9 @@ uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3491,9 +3506,9 @@ uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3501,9 +3516,9 @@ uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3511,9 +3526,9 @@ uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3521,9 +3536,9 @@ uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3531,9 +3546,9 @@ uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3541,9 +3556,9 @@ uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3551,117 +3566,117 @@ uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -3681,146 +3696,146 @@ uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_c
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_c
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_c
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_c
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_c
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_c
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_c
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_c
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_c
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_c
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_c
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_c
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_c
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/ia32/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/ia32/vp8_rtcd.h
index 4e9d062caae..c46bfe5733f 100644
--- a/chromium/third_party/libvpx/source/config/linux/ia32/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/ia32/vp8_rtcd.h
@@ -27,90 +27,90 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_mmx
+void vp8_bilinear_predict4x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_sse2
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_mmx
+void vp8_bilinear_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_sse2
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -118,9 +118,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -128,9 +128,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -140,65 +140,65 @@ int vp8_block_error_sse2(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_sse2
void vp8_copy32xn_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse2(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse3(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
RTCD_EXTERN void (*vp8_copy32xn)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_sse2(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_sse2
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_mmx
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_mmx
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_mmx(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_mmx(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_mmx
@@ -240,11 +240,11 @@ int vp8_denoiser_filter_uv_sse2(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_mmx(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_mmx
@@ -274,8 +274,8 @@ void vp8_dequant_idct_add_y_block_sse2(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_sse2
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_mmx(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_mmx(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_mmx
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -375,91 +375,91 @@ RTCD_EXTERN int (*vp8_full_search_sad)(struct macroblock* x,
int* mvcost[2],
union int_mv* center_mv);
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_sse2
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_sse2
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_sse2
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_sse2
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_sse2
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_sse2
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_sse2
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_sse2
@@ -475,8 +475,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -484,8 +484,8 @@ int vp8_refining_search_sadx4(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -505,126 +505,126 @@ void vp8_short_fdct8x4_sse2(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_sse2
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_mmx(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_mmx
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_sse2(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_sse2(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_sse2
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_sse2(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_sse2
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_mmx(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/linux/ia32/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/ia32/vp9_rtcd.h
index 1e1da43518a..603bd31b5c3 100644
--- a/chromium/third_party/libvpx/source/config/linux/ia32/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/ia32/vp9_rtcd.h
@@ -242,18 +242,18 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_sse4_1(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
RTCD_EXTERN void (*vp9_highbd_iht16x16_256_add)(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
@@ -351,12 +351,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_sse2
diff --git a/chromium/third_party/libvpx/source/config/linux/ia32/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/ia32/vpx_dsp_rtcd.h
index 14d1107e665..b2b02a59aa1 100644
--- a/chromium/third_party/libvpx/source/config/linux/ia32/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/ia32/vpx_dsp_rtcd.h
@@ -427,420 +427,420 @@ void vpx_convolve_copy_sse2(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_sse2
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_sse2
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_sse2
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_sse2
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_sse2
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_sse2
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_sse2
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_sse2
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_sse2
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_sse2
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_sse2
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_sse2
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_sse2
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_sse2
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_sse2
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_sse2
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_sse2
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_sse2
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_sse2
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_sse2
@@ -884,44 +884,44 @@ void vpx_fdct8x8_1_sse2(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_sse2
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -933,41 +933,41 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_sse2
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_sse2
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_sse2
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_sse2
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_sse2
@@ -1007,13 +1007,13 @@ void vpx_hadamard_8x8_sse2(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_sse2
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1021,7 +1021,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1029,57 +1029,57 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_sse2
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1089,18 +1089,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1109,18 +1109,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_10_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1130,18 +1130,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1151,18 +1151,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1172,18 +1172,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1192,9 +1192,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1203,9 +1203,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1215,18 +1215,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1236,18 +1236,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1256,18 +1256,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1276,18 +1276,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1296,18 +1296,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1316,16 +1316,16 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1333,16 +1333,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1350,16 +1350,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1367,16 +1367,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1384,16 +1384,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1401,16 +1401,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1418,9 +1418,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1438,16 +1438,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1455,16 +1455,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1472,16 +1472,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1489,16 +1489,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1506,16 +1506,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1523,148 +1523,148 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_sse2
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_sse2
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_sse2
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_sse2
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_sse2
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_sse2
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_sse2
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_sse2
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_sse2
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_sse2
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1672,7 +1672,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1680,57 +1680,57 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_sse2
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1740,18 +1740,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1760,18 +1760,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_12_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1781,18 +1781,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1802,18 +1802,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1823,18 +1823,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1843,9 +1843,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1854,9 +1854,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1866,18 +1866,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1887,18 +1887,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1907,18 +1907,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1927,18 +1927,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1947,18 +1947,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1967,16 +1967,16 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1984,16 +1984,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2001,16 +2001,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2018,16 +2018,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2035,16 +2035,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2052,16 +2052,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2069,9 +2069,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2079,9 +2079,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2089,16 +2089,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2106,16 +2106,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2123,16 +2123,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2140,16 +2140,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2157,16 +2157,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2174,148 +2174,148 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_sse2
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_sse2
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_sse2
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_sse2
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_sse2
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_sse2
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_sse2
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_sse2
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_sse2
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_sse2
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2323,7 +2323,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2331,56 +2331,56 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_sse2
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2389,18 +2389,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2409,18 +2409,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2429,18 +2429,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2449,18 +2449,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2469,18 +2469,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2489,9 +2489,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2500,9 +2500,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2511,18 +2511,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2531,18 +2531,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2551,18 +2551,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2571,18 +2571,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2591,18 +2591,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2611,16 +2611,16 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2628,16 +2628,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2645,16 +2645,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2662,16 +2662,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2679,16 +2679,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2696,16 +2696,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2713,34 +2713,34 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2748,16 +2748,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2765,16 +2765,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2782,16 +2782,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2799,16 +2799,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2816,152 +2816,152 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_sse2
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_sse2
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_sse2
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_sse2
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_sse2
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_sse2
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_sse2
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_sse2
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_sse2
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_sse2
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_sse2
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_sse2
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -2983,7 +2983,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2995,7 +2995,7 @@ void vpx_highbd_convolve8_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3007,7 +3007,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3020,7 +3020,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3032,7 +3032,7 @@ void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3044,7 +3044,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3057,7 +3057,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3069,7 +3069,7 @@ void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3081,7 +3081,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3094,7 +3094,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3106,7 +3106,7 @@ void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3118,7 +3118,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3131,7 +3131,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3143,7 +3143,7 @@ void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3155,7 +3155,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3168,7 +3168,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3180,7 +3180,7 @@ void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3192,7 +3192,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3205,7 +3205,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3217,7 +3217,7 @@ void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3229,7 +3229,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3241,7 +3241,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3254,7 +3254,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3266,7 +3266,7 @@ void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3278,7 +3278,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3290,427 +3290,427 @@ RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_sse2
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_sse2
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_sse2
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_sse2
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_4x4)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_sse2
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_sse2
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_sse2
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_sse2
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_sse2
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3718,12 +3718,12 @@ void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_sse2
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3731,120 +3731,120 @@ void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_sse2
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_sse2
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_sse2
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_sse2
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_sse2
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_sse2
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_sse2
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_sse2
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_sse2
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_sse2
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3902,53 +3902,68 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_sse2
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_sse2
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_sse2
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_sse2
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -4346,9 +4361,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_sse2
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -4434,12 +4449,12 @@ unsigned int vpx_highbd_sad16x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_sse2
@@ -4468,12 +4483,12 @@ unsigned int vpx_highbd_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_sse2
@@ -4502,12 +4517,12 @@ unsigned int vpx_highbd_sad16x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_sse2
@@ -4536,12 +4551,12 @@ unsigned int vpx_highbd_sad32x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_sse2
@@ -4570,12 +4585,12 @@ unsigned int vpx_highbd_sad32x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_sse2
@@ -4604,12 +4619,12 @@ unsigned int vpx_highbd_sad32x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_sse2
@@ -4629,12 +4644,12 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_sse2
@@ -4654,12 +4669,12 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_sse2
@@ -4688,12 +4703,12 @@ unsigned int vpx_highbd_sad64x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_sse2
@@ -4722,12 +4737,12 @@ unsigned int vpx_highbd_sad64x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_sse2
@@ -4756,12 +4771,12 @@ unsigned int vpx_highbd_sad8x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_sse2
@@ -4790,12 +4805,12 @@ unsigned int vpx_highbd_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_sse2
@@ -4824,12 +4839,12 @@ unsigned int vpx_highbd_sad8x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_sse2
@@ -4838,104 +4853,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_sse2
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_sse2
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_sse2
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_sse2
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_sse2
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_sse2
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_sse2
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -5245,12 +5260,12 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_sse2
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_sse2(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_sse2(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -5284,68 +5299,68 @@ void vpx_minmax_8x8_sse2(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_sse2
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_sse2
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_sse2
@@ -5546,12 +5561,12 @@ RTCD_EXTERN void (*vpx_sad16x16x3)(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_sse2
@@ -5596,12 +5611,12 @@ unsigned int vpx_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_sse2
@@ -5651,12 +5666,12 @@ RTCD_EXTERN void (*vpx_sad16x8x3)(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_sse2
@@ -5717,12 +5732,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_sse2
@@ -5767,22 +5782,22 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -5826,12 +5841,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_sse2
@@ -5876,12 +5891,12 @@ RTCD_EXTERN void (*vpx_sad4x4x3)(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_sse2
@@ -5926,12 +5941,12 @@ unsigned int vpx_sad4x8_avg_sse2(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_sse2
@@ -5976,12 +5991,12 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_sse2
@@ -6026,22 +6041,22 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -6085,12 +6100,12 @@ RTCD_EXTERN void (*vpx_sad8x16x3)(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_sse2
@@ -6135,12 +6150,12 @@ unsigned int vpx_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_sse2
@@ -6185,12 +6200,12 @@ RTCD_EXTERN void (*vpx_sad8x8x3)(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_sse2
@@ -6316,850 +6331,850 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -7187,315 +7202,315 @@ uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_sse2
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_sse2
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_sse2
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_sse2
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_sse2
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_sse2
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_sse2
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_sse2
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_sse2
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_sse2
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_sse2
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_sse2
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_sse2
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_sse2
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/mips64el/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/mips64el/vp8_rtcd.h
index dc054d6b36a..6e6147d3faa 100644
--- a/chromium/third_party/libvpx/source/config/linux/mips64el/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/mips64el/vp8_rtcd.h
@@ -27,44 +27,44 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -72,9 +72,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -82,9 +82,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -93,27 +93,27 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_c
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_c
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_c
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c
@@ -139,7 +139,7 @@ int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_c
@@ -158,7 +158,7 @@ void vp8_dequant_idct_add_y_block_c(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_c
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -209,55 +209,55 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_c
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_c
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c
@@ -271,8 +271,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -288,50 +288,50 @@ void vp8_short_fdct8x4_c(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_c
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_c
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c
diff --git a/chromium/third_party/libvpx/source/config/linux/mips64el/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/mips64el/vp9_rtcd.h
index 1eb9db53c6d..d187b0ac044 100644
--- a/chromium/third_party/libvpx/source/config/linux/mips64el/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/mips64el/vp9_rtcd.h
@@ -115,8 +115,8 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
diff --git a/chromium/third_party/libvpx/source/config/linux/mips64el/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/mips64el/vpx_dsp_rtcd.h
index 8703d051e90..b406c882633 100644
--- a/chromium/third_party/libvpx/source/config/linux/mips64el/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/mips64el/vpx_dsp_rtcd.h
@@ -139,253 +139,253 @@ void vpx_convolve_copy_c(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_c
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c
@@ -418,7 +418,7 @@ void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_c
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -426,13 +426,13 @@ void vpx_get16x16var_c(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_c
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -443,25 +443,25 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c
@@ -482,7 +482,7 @@ void vpx_hadamard_8x8_c(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_c
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -643,7 +643,7 @@ void vpx_lpf_vertical_8_dual_c(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -666,30 +666,30 @@ void vpx_minmax_8x8_c(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_c
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_c
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -764,7 +764,7 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_c
@@ -791,7 +791,7 @@ unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_c
@@ -818,7 +818,7 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_c
@@ -845,7 +845,7 @@ unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_c
@@ -865,7 +865,7 @@ unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_c
@@ -885,7 +885,7 @@ unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_c
@@ -912,7 +912,7 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_c
@@ -939,7 +939,7 @@ unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_c
@@ -959,7 +959,7 @@ unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_c
@@ -979,7 +979,7 @@ unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_c
@@ -1006,7 +1006,7 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_c
@@ -1033,7 +1033,7 @@ unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_c
@@ -1060,7 +1060,7 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_c
@@ -1154,9 +1154,9 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1164,9 +1164,9 @@ uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1174,9 +1174,9 @@ uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1184,9 +1184,9 @@ uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1194,9 +1194,9 @@ uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1204,9 +1204,9 @@ uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1214,9 +1214,9 @@ uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1224,9 +1224,9 @@ uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1234,9 +1234,9 @@ uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1244,9 +1244,9 @@ uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1254,9 +1254,9 @@ uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1264,9 +1264,9 @@ uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1274,9 +1274,9 @@ uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1284,117 +1284,117 @@ uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1414,146 +1414,146 @@ uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_c
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_c
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_c
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_c
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_c
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_c
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_c
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_c
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_c
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_c
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_c
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_c
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_c
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/mipsel/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/mipsel/vp8_rtcd.h
index dc054d6b36a..6e6147d3faa 100644
--- a/chromium/third_party/libvpx/source/config/linux/mipsel/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/mipsel/vp8_rtcd.h
@@ -27,44 +27,44 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -72,9 +72,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -82,9 +82,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -93,27 +93,27 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_c
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_c
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_c
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c
@@ -139,7 +139,7 @@ int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_c
@@ -158,7 +158,7 @@ void vp8_dequant_idct_add_y_block_c(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_c
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -209,55 +209,55 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_c
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_c
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c
@@ -271,8 +271,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -288,50 +288,50 @@ void vp8_short_fdct8x4_c(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_c
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_c
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c
diff --git a/chromium/third_party/libvpx/source/config/linux/mipsel/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/mipsel/vp9_rtcd.h
index 1eb9db53c6d..d187b0ac044 100644
--- a/chromium/third_party/libvpx/source/config/linux/mipsel/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/mipsel/vp9_rtcd.h
@@ -115,8 +115,8 @@ void vp9_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_fwht4x4 vp9_fwht4x4_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
diff --git a/chromium/third_party/libvpx/source/config/linux/mipsel/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/mipsel/vpx_dsp_rtcd.h
index 8703d051e90..b406c882633 100644
--- a/chromium/third_party/libvpx/source/config/linux/mipsel/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/mipsel/vpx_dsp_rtcd.h
@@ -139,253 +139,253 @@ void vpx_convolve_copy_c(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_c
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c
@@ -418,7 +418,7 @@ void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_c
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -426,13 +426,13 @@ void vpx_get16x16var_c(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_c
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -443,25 +443,25 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c
@@ -482,7 +482,7 @@ void vpx_hadamard_8x8_c(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_c
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
@@ -643,7 +643,7 @@ void vpx_lpf_vertical_8_dual_c(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -666,30 +666,30 @@ void vpx_minmax_8x8_c(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_c
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_c
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -764,7 +764,7 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_c
@@ -791,7 +791,7 @@ unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_c
@@ -818,7 +818,7 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_c
@@ -845,7 +845,7 @@ unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_c
@@ -865,7 +865,7 @@ unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_c
@@ -885,7 +885,7 @@ unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_c
@@ -912,7 +912,7 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_c
@@ -939,7 +939,7 @@ unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_c
@@ -959,7 +959,7 @@ unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_c
@@ -979,7 +979,7 @@ unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_c
@@ -1006,7 +1006,7 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_c
@@ -1033,7 +1033,7 @@ unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_c
@@ -1060,7 +1060,7 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_c
@@ -1154,9 +1154,9 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1164,9 +1164,9 @@ uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1174,9 +1174,9 @@ uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1184,9 +1184,9 @@ uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1194,9 +1194,9 @@ uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1204,9 +1204,9 @@ uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1214,9 +1214,9 @@ uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1224,9 +1224,9 @@ uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1234,9 +1234,9 @@ uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1244,9 +1244,9 @@ uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1254,9 +1254,9 @@ uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1264,9 +1264,9 @@ uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1274,9 +1274,9 @@ uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1284,117 +1284,117 @@ uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1414,146 +1414,146 @@ uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_c
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_c
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_c
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_c
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_c
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_c
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_c
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_c
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_c
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_c
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_c
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_c
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_c
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/linux/x64/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/linux/x64/vp8_rtcd.h
index 4e9d062caae..c46bfe5733f 100644
--- a/chromium/third_party/libvpx/source/config/linux/x64/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/x64/vp8_rtcd.h
@@ -27,90 +27,90 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_mmx
+void vp8_bilinear_predict4x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_sse2
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_mmx
+void vp8_bilinear_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_sse2
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -118,9 +118,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -128,9 +128,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -140,65 +140,65 @@ int vp8_block_error_sse2(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_sse2
void vp8_copy32xn_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse2(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse3(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
RTCD_EXTERN void (*vp8_copy32xn)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_sse2(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_sse2
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_mmx
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_mmx
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_mmx(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_mmx(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_mmx
@@ -240,11 +240,11 @@ int vp8_denoiser_filter_uv_sse2(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_mmx(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_mmx
@@ -274,8 +274,8 @@ void vp8_dequant_idct_add_y_block_sse2(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_sse2
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_mmx(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_mmx(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_mmx
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -375,91 +375,91 @@ RTCD_EXTERN int (*vp8_full_search_sad)(struct macroblock* x,
int* mvcost[2],
union int_mv* center_mv);
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_sse2
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_sse2
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_sse2
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_sse2
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_sse2
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_sse2
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_sse2
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_sse2
@@ -475,8 +475,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -484,8 +484,8 @@ int vp8_refining_search_sadx4(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -505,126 +505,126 @@ void vp8_short_fdct8x4_sse2(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_sse2
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_mmx(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_mmx
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_sse2(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_sse2(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_sse2
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_sse2(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_sse2
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_mmx(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/linux/x64/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/linux/x64/vp9_rtcd.h
index 6f00c78fb74..28787a89e6f 100644
--- a/chromium/third_party/libvpx/source/config/linux/x64/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/x64/vp9_rtcd.h
@@ -242,18 +242,18 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_sse4_1(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
RTCD_EXTERN void (*vp9_highbd_iht16x16_256_add)(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
@@ -351,12 +351,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_sse2
diff --git a/chromium/third_party/libvpx/source/config/linux/x64/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/linux/x64/vpx_dsp_rtcd.h
index 9970fae1a91..3b28a11dbc8 100644
--- a/chromium/third_party/libvpx/source/config/linux/x64/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/linux/x64/vpx_dsp_rtcd.h
@@ -427,420 +427,420 @@ void vpx_convolve_copy_sse2(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_sse2
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_sse2
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_sse2
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_sse2
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_sse2
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_sse2
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_sse2
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_sse2
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_sse2
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_sse2
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_sse2
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_sse2
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_sse2
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_sse2
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_sse2
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_sse2
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_sse2
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_sse2
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_sse2
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_sse2
@@ -884,44 +884,44 @@ void vpx_fdct8x8_1_sse2(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_sse2
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -933,41 +933,41 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_sse2
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_sse2
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_sse2
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_sse2
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_sse2
@@ -1012,13 +1012,13 @@ RTCD_EXTERN void (*vpx_hadamard_8x8)(const int16_t* src_diff,
tran_low_t* coeff);
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1026,7 +1026,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1034,57 +1034,57 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_sse2
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1094,18 +1094,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1114,18 +1114,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_10_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1135,18 +1135,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1156,18 +1156,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1177,18 +1177,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1197,9 +1197,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1208,9 +1208,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1220,18 +1220,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1241,18 +1241,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1261,18 +1261,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1281,18 +1281,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1301,18 +1301,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1321,16 +1321,16 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1338,16 +1338,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1355,16 +1355,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1372,16 +1372,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1389,16 +1389,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1406,16 +1406,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1423,9 +1423,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1433,9 +1433,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1443,16 +1443,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1460,16 +1460,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1477,16 +1477,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1494,16 +1494,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1511,16 +1511,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1528,148 +1528,148 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_sse2
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_sse2
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_sse2
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_sse2
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_sse2
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_sse2
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_sse2
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_sse2
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_sse2
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_sse2
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1677,7 +1677,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1685,57 +1685,57 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_sse2
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1745,18 +1745,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1765,18 +1765,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_12_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1786,18 +1786,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1807,18 +1807,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1828,18 +1828,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1848,9 +1848,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1859,9 +1859,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1871,18 +1871,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1892,18 +1892,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1912,18 +1912,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1932,18 +1932,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1952,18 +1952,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1972,16 +1972,16 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1989,16 +1989,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2006,16 +2006,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2023,16 +2023,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2040,16 +2040,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2057,16 +2057,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2074,9 +2074,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2084,9 +2084,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2094,16 +2094,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2111,16 +2111,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2128,16 +2128,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2145,16 +2145,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2162,16 +2162,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2179,148 +2179,148 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_sse2
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_sse2
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_sse2
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_sse2
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_sse2
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_sse2
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_sse2
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_sse2
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_sse2
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_sse2
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2328,7 +2328,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2336,56 +2336,56 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_sse2
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2394,18 +2394,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2414,18 +2414,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2434,18 +2434,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2454,18 +2454,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2474,18 +2474,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2494,9 +2494,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2505,9 +2505,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2516,18 +2516,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2536,18 +2536,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2556,18 +2556,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2576,18 +2576,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2596,18 +2596,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2616,16 +2616,16 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2633,16 +2633,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2650,16 +2650,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2667,16 +2667,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2684,16 +2684,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2701,16 +2701,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2718,34 +2718,34 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2753,16 +2753,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2770,16 +2770,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2787,16 +2787,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2804,16 +2804,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2821,152 +2821,152 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_sse2
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_sse2
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_sse2
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_sse2
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_sse2
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_sse2
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_sse2
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_sse2
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_sse2
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_sse2
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_sse2
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_sse2
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -2988,7 +2988,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3000,7 +3000,7 @@ void vpx_highbd_convolve8_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3012,7 +3012,7 @@ void vpx_highbd_convolve8_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3024,7 +3024,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3037,7 +3037,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3049,7 +3049,7 @@ void vpx_highbd_convolve8_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3061,7 +3061,7 @@ void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3073,7 +3073,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3086,7 +3086,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3098,7 +3098,7 @@ void vpx_highbd_convolve8_avg_horiz_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3110,7 +3110,7 @@ void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3122,7 +3122,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3135,7 +3135,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3147,7 +3147,7 @@ void vpx_highbd_convolve8_avg_vert_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3159,7 +3159,7 @@ void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3171,7 +3171,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3184,7 +3184,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3196,7 +3196,7 @@ void vpx_highbd_convolve8_horiz_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3208,7 +3208,7 @@ void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3220,7 +3220,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3233,7 +3233,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3245,7 +3245,7 @@ void vpx_highbd_convolve8_vert_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3257,7 +3257,7 @@ void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3269,7 +3269,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3282,7 +3282,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3294,7 +3294,7 @@ void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3306,7 +3306,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3318,7 +3318,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3331,7 +3331,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3343,7 +3343,7 @@ void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3355,7 +3355,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3367,427 +3367,427 @@ RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_sse2
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_sse2
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_sse2
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_sse2
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_4x4)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_sse2
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_sse2
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_sse2
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_sse2
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_sse2
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3795,12 +3795,12 @@ void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_sse2
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3808,120 +3808,120 @@ void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_sse2
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_sse2
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_sse2
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_sse2
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_sse2
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_sse2
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_sse2
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_sse2
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_sse2
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_sse2
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3979,53 +3979,68 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_sse2
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_sse2
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_sse2
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_sse2
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -4423,9 +4438,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_sse2
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -4511,12 +4526,12 @@ unsigned int vpx_highbd_sad16x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_sse2
@@ -4545,12 +4560,12 @@ unsigned int vpx_highbd_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_sse2
@@ -4579,12 +4594,12 @@ unsigned int vpx_highbd_sad16x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_sse2
@@ -4613,12 +4628,12 @@ unsigned int vpx_highbd_sad32x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_sse2
@@ -4647,12 +4662,12 @@ unsigned int vpx_highbd_sad32x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_sse2
@@ -4681,12 +4696,12 @@ unsigned int vpx_highbd_sad32x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_sse2
@@ -4706,12 +4721,12 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_sse2
@@ -4731,12 +4746,12 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_sse2
@@ -4765,12 +4780,12 @@ unsigned int vpx_highbd_sad64x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_sse2
@@ -4799,12 +4814,12 @@ unsigned int vpx_highbd_sad64x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_sse2
@@ -4833,12 +4848,12 @@ unsigned int vpx_highbd_sad8x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_sse2
@@ -4867,12 +4882,12 @@ unsigned int vpx_highbd_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_sse2
@@ -4901,12 +4916,12 @@ unsigned int vpx_highbd_sad8x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_sse2
@@ -4915,104 +4930,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_sse2
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_sse2
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_sse2
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_sse2
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_sse2
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_sse2
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_sse2
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -5322,12 +5337,12 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_sse2
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_sse2(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_sse2(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -5361,68 +5376,68 @@ void vpx_minmax_8x8_sse2(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_sse2
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_sse2
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_sse2
@@ -5623,12 +5638,12 @@ RTCD_EXTERN void (*vpx_sad16x16x3)(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_sse2
@@ -5673,12 +5688,12 @@ unsigned int vpx_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_sse2
@@ -5728,12 +5743,12 @@ RTCD_EXTERN void (*vpx_sad16x8x3)(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_sse2
@@ -5794,12 +5809,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_sse2
@@ -5844,22 +5859,22 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -5903,12 +5918,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_sse2
@@ -5953,12 +5968,12 @@ RTCD_EXTERN void (*vpx_sad4x4x3)(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_sse2
@@ -6003,12 +6018,12 @@ unsigned int vpx_sad4x8_avg_sse2(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_sse2
@@ -6053,12 +6068,12 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_sse2
@@ -6103,22 +6118,22 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -6162,12 +6177,12 @@ RTCD_EXTERN void (*vpx_sad8x16x3)(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_sse2
@@ -6212,12 +6227,12 @@ unsigned int vpx_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_sse2
@@ -6262,12 +6277,12 @@ RTCD_EXTERN void (*vpx_sad8x8x3)(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_sse2
@@ -6393,850 +6408,850 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -7264,315 +7279,315 @@ uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_sse2
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_sse2
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_sse2
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_sse2
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_sse2
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_sse2
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_sse2
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_sse2
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_sse2
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_sse2
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_sse2
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_sse2
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_sse2
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_sse2
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/mac/ia32/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/mac/ia32/vp8_rtcd.h
index 4e9d062caae..c46bfe5733f 100644
--- a/chromium/third_party/libvpx/source/config/mac/ia32/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/mac/ia32/vp8_rtcd.h
@@ -27,90 +27,90 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_mmx
+void vp8_bilinear_predict4x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_sse2
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_mmx
+void vp8_bilinear_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_sse2
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -118,9 +118,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -128,9 +128,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -140,65 +140,65 @@ int vp8_block_error_sse2(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_sse2
void vp8_copy32xn_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse2(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse3(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
RTCD_EXTERN void (*vp8_copy32xn)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_sse2(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_sse2
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_mmx
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_mmx
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_mmx(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_mmx(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_mmx
@@ -240,11 +240,11 @@ int vp8_denoiser_filter_uv_sse2(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_mmx(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_mmx
@@ -274,8 +274,8 @@ void vp8_dequant_idct_add_y_block_sse2(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_sse2
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_mmx(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_mmx(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_mmx
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -375,91 +375,91 @@ RTCD_EXTERN int (*vp8_full_search_sad)(struct macroblock* x,
int* mvcost[2],
union int_mv* center_mv);
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_sse2
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_sse2
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_sse2
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_sse2
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_sse2
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_sse2
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_sse2
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_sse2
@@ -475,8 +475,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -484,8 +484,8 @@ int vp8_refining_search_sadx4(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -505,126 +505,126 @@ void vp8_short_fdct8x4_sse2(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_sse2
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_mmx(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_mmx
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_sse2(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_sse2(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_sse2
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_sse2(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_sse2
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_mmx(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/mac/ia32/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/mac/ia32/vp9_rtcd.h
index 1e1da43518a..603bd31b5c3 100644
--- a/chromium/third_party/libvpx/source/config/mac/ia32/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/mac/ia32/vp9_rtcd.h
@@ -242,18 +242,18 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_sse4_1(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
RTCD_EXTERN void (*vp9_highbd_iht16x16_256_add)(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
@@ -351,12 +351,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_sse2
diff --git a/chromium/third_party/libvpx/source/config/mac/ia32/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/mac/ia32/vpx_dsp_rtcd.h
index 14d1107e665..b2b02a59aa1 100644
--- a/chromium/third_party/libvpx/source/config/mac/ia32/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/mac/ia32/vpx_dsp_rtcd.h
@@ -427,420 +427,420 @@ void vpx_convolve_copy_sse2(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_sse2
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_sse2
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_sse2
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_sse2
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_sse2
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_sse2
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_sse2
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_sse2
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_sse2
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_sse2
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_sse2
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_sse2
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_sse2
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_sse2
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_sse2
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_sse2
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_sse2
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_sse2
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_sse2
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_sse2
@@ -884,44 +884,44 @@ void vpx_fdct8x8_1_sse2(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_sse2
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -933,41 +933,41 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_sse2
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_sse2
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_sse2
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_sse2
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_sse2
@@ -1007,13 +1007,13 @@ void vpx_hadamard_8x8_sse2(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_sse2
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1021,7 +1021,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1029,57 +1029,57 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_sse2
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1089,18 +1089,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1109,18 +1109,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_10_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1130,18 +1130,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1151,18 +1151,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1172,18 +1172,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1192,9 +1192,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1203,9 +1203,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1215,18 +1215,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1236,18 +1236,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1256,18 +1256,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1276,18 +1276,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1296,18 +1296,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1316,16 +1316,16 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1333,16 +1333,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1350,16 +1350,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1367,16 +1367,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1384,16 +1384,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1401,16 +1401,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1418,9 +1418,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1438,16 +1438,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1455,16 +1455,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1472,16 +1472,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1489,16 +1489,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1506,16 +1506,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1523,148 +1523,148 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_sse2
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_sse2
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_sse2
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_sse2
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_sse2
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_sse2
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_sse2
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_sse2
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_sse2
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_sse2
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1672,7 +1672,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1680,57 +1680,57 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_sse2
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1740,18 +1740,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1760,18 +1760,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_12_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1781,18 +1781,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1802,18 +1802,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1823,18 +1823,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1843,9 +1843,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1854,9 +1854,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1866,18 +1866,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1887,18 +1887,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1907,18 +1907,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1927,18 +1927,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1947,18 +1947,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1967,16 +1967,16 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1984,16 +1984,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2001,16 +2001,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2018,16 +2018,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2035,16 +2035,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2052,16 +2052,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2069,9 +2069,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2079,9 +2079,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2089,16 +2089,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2106,16 +2106,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2123,16 +2123,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2140,16 +2140,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2157,16 +2157,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2174,148 +2174,148 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_sse2
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_sse2
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_sse2
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_sse2
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_sse2
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_sse2
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_sse2
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_sse2
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_sse2
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_sse2
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2323,7 +2323,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2331,56 +2331,56 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_sse2
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2389,18 +2389,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2409,18 +2409,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2429,18 +2429,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2449,18 +2449,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2469,18 +2469,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2489,9 +2489,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2500,9 +2500,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2511,18 +2511,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2531,18 +2531,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2551,18 +2551,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2571,18 +2571,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2591,18 +2591,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2611,16 +2611,16 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2628,16 +2628,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2645,16 +2645,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2662,16 +2662,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2679,16 +2679,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2696,16 +2696,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2713,34 +2713,34 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2748,16 +2748,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2765,16 +2765,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2782,16 +2782,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2799,16 +2799,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2816,152 +2816,152 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_sse2
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_sse2
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_sse2
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_sse2
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_sse2
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_sse2
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_sse2
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_sse2
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_sse2
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_sse2
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_sse2
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_sse2
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -2983,7 +2983,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2995,7 +2995,7 @@ void vpx_highbd_convolve8_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3007,7 +3007,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3020,7 +3020,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3032,7 +3032,7 @@ void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3044,7 +3044,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3057,7 +3057,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3069,7 +3069,7 @@ void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3081,7 +3081,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3094,7 +3094,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3106,7 +3106,7 @@ void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3118,7 +3118,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3131,7 +3131,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3143,7 +3143,7 @@ void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3155,7 +3155,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3168,7 +3168,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3180,7 +3180,7 @@ void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3192,7 +3192,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3205,7 +3205,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3217,7 +3217,7 @@ void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3229,7 +3229,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3241,7 +3241,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3254,7 +3254,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3266,7 +3266,7 @@ void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3278,7 +3278,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3290,427 +3290,427 @@ RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_sse2
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_sse2
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_sse2
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_sse2
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_4x4)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_sse2
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_sse2
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_sse2
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_sse2
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_sse2
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3718,12 +3718,12 @@ void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_sse2
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3731,120 +3731,120 @@ void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_sse2
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_sse2
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_sse2
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_sse2
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_sse2
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_sse2
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_sse2
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_sse2
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_sse2
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_sse2
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3902,53 +3902,68 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_sse2
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_sse2
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_sse2
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_sse2
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -4346,9 +4361,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_sse2
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -4434,12 +4449,12 @@ unsigned int vpx_highbd_sad16x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_sse2
@@ -4468,12 +4483,12 @@ unsigned int vpx_highbd_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_sse2
@@ -4502,12 +4517,12 @@ unsigned int vpx_highbd_sad16x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_sse2
@@ -4536,12 +4551,12 @@ unsigned int vpx_highbd_sad32x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_sse2
@@ -4570,12 +4585,12 @@ unsigned int vpx_highbd_sad32x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_sse2
@@ -4604,12 +4619,12 @@ unsigned int vpx_highbd_sad32x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_sse2
@@ -4629,12 +4644,12 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_sse2
@@ -4654,12 +4669,12 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_sse2
@@ -4688,12 +4703,12 @@ unsigned int vpx_highbd_sad64x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_sse2
@@ -4722,12 +4737,12 @@ unsigned int vpx_highbd_sad64x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_sse2
@@ -4756,12 +4771,12 @@ unsigned int vpx_highbd_sad8x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_sse2
@@ -4790,12 +4805,12 @@ unsigned int vpx_highbd_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_sse2
@@ -4824,12 +4839,12 @@ unsigned int vpx_highbd_sad8x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_sse2
@@ -4838,104 +4853,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_sse2
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_sse2
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_sse2
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_sse2
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_sse2
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_sse2
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_sse2
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -5245,12 +5260,12 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_sse2
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_sse2(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_sse2(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -5284,68 +5299,68 @@ void vpx_minmax_8x8_sse2(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_sse2
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_sse2
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_sse2
@@ -5546,12 +5561,12 @@ RTCD_EXTERN void (*vpx_sad16x16x3)(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_sse2
@@ -5596,12 +5611,12 @@ unsigned int vpx_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_sse2
@@ -5651,12 +5666,12 @@ RTCD_EXTERN void (*vpx_sad16x8x3)(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_sse2
@@ -5717,12 +5732,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_sse2
@@ -5767,22 +5782,22 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -5826,12 +5841,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_sse2
@@ -5876,12 +5891,12 @@ RTCD_EXTERN void (*vpx_sad4x4x3)(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_sse2
@@ -5926,12 +5941,12 @@ unsigned int vpx_sad4x8_avg_sse2(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_sse2
@@ -5976,12 +5991,12 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_sse2
@@ -6026,22 +6041,22 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -6085,12 +6100,12 @@ RTCD_EXTERN void (*vpx_sad8x16x3)(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_sse2
@@ -6135,12 +6150,12 @@ unsigned int vpx_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_sse2
@@ -6185,12 +6200,12 @@ RTCD_EXTERN void (*vpx_sad8x8x3)(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_sse2
@@ -6316,850 +6331,850 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -7187,315 +7202,315 @@ uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_sse2
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_sse2
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_sse2
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_sse2
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_sse2
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_sse2
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_sse2
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_sse2
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_sse2
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_sse2
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_sse2
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_sse2
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_sse2
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_sse2
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/mac/x64/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/mac/x64/vp8_rtcd.h
index 4e9d062caae..c46bfe5733f 100644
--- a/chromium/third_party/libvpx/source/config/mac/x64/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/mac/x64/vp8_rtcd.h
@@ -27,90 +27,90 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_mmx
+void vp8_bilinear_predict4x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_sse2
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_mmx
+void vp8_bilinear_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_sse2
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -118,9 +118,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -128,9 +128,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -140,65 +140,65 @@ int vp8_block_error_sse2(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_sse2
void vp8_copy32xn_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse2(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse3(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
RTCD_EXTERN void (*vp8_copy32xn)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_sse2(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_sse2
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_mmx
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_mmx
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_mmx(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_mmx(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_mmx
@@ -240,11 +240,11 @@ int vp8_denoiser_filter_uv_sse2(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_mmx(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_mmx
@@ -274,8 +274,8 @@ void vp8_dequant_idct_add_y_block_sse2(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_sse2
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_mmx(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_mmx(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_mmx
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -375,91 +375,91 @@ RTCD_EXTERN int (*vp8_full_search_sad)(struct macroblock* x,
int* mvcost[2],
union int_mv* center_mv);
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_sse2
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_sse2
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_sse2
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_sse2
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_sse2
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_sse2
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_sse2
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_sse2
@@ -475,8 +475,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -484,8 +484,8 @@ int vp8_refining_search_sadx4(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -505,126 +505,126 @@ void vp8_short_fdct8x4_sse2(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_sse2
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_mmx(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_mmx
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_sse2(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_sse2(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_sse2
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_sse2(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_sse2
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_mmx(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/mac/x64/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/mac/x64/vp9_rtcd.h
index 6f00c78fb74..28787a89e6f 100644
--- a/chromium/third_party/libvpx/source/config/mac/x64/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/mac/x64/vp9_rtcd.h
@@ -242,18 +242,18 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_sse4_1(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
RTCD_EXTERN void (*vp9_highbd_iht16x16_256_add)(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
@@ -351,12 +351,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_sse2
diff --git a/chromium/third_party/libvpx/source/config/mac/x64/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/mac/x64/vpx_dsp_rtcd.h
index 9970fae1a91..3b28a11dbc8 100644
--- a/chromium/third_party/libvpx/source/config/mac/x64/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/mac/x64/vpx_dsp_rtcd.h
@@ -427,420 +427,420 @@ void vpx_convolve_copy_sse2(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_sse2
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_sse2
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_sse2
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_sse2
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_sse2
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_sse2
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_sse2
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_sse2
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_sse2
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_sse2
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_sse2
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_sse2
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_sse2
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_sse2
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_sse2
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_sse2
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_sse2
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_sse2
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_sse2
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_sse2
@@ -884,44 +884,44 @@ void vpx_fdct8x8_1_sse2(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_sse2
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -933,41 +933,41 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_sse2
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_sse2
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_sse2
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_sse2
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_sse2
@@ -1012,13 +1012,13 @@ RTCD_EXTERN void (*vpx_hadamard_8x8)(const int16_t* src_diff,
tran_low_t* coeff);
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1026,7 +1026,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1034,57 +1034,57 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_sse2
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1094,18 +1094,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1114,18 +1114,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_10_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1135,18 +1135,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1156,18 +1156,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1177,18 +1177,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1197,9 +1197,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1208,9 +1208,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1220,18 +1220,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1241,18 +1241,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1261,18 +1261,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1281,18 +1281,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1301,18 +1301,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1321,16 +1321,16 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1338,16 +1338,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1355,16 +1355,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1372,16 +1372,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1389,16 +1389,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1406,16 +1406,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1423,9 +1423,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1433,9 +1433,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1443,16 +1443,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1460,16 +1460,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1477,16 +1477,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1494,16 +1494,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1511,16 +1511,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1528,148 +1528,148 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_sse2
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_sse2
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_sse2
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_sse2
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_sse2
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_sse2
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_sse2
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_sse2
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_sse2
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_sse2
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1677,7 +1677,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1685,57 +1685,57 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_sse2
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1745,18 +1745,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1765,18 +1765,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_12_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1786,18 +1786,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1807,18 +1807,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1828,18 +1828,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1848,9 +1848,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1859,9 +1859,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1871,18 +1871,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1892,18 +1892,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1912,18 +1912,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1932,18 +1932,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1952,18 +1952,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1972,16 +1972,16 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1989,16 +1989,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2006,16 +2006,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2023,16 +2023,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2040,16 +2040,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2057,16 +2057,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2074,9 +2074,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2084,9 +2084,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2094,16 +2094,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2111,16 +2111,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2128,16 +2128,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2145,16 +2145,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2162,16 +2162,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2179,148 +2179,148 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_sse2
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_sse2
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_sse2
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_sse2
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_sse2
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_sse2
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_sse2
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_sse2
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_sse2
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_sse2
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2328,7 +2328,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2336,56 +2336,56 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_sse2
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2394,18 +2394,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2414,18 +2414,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2434,18 +2434,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2454,18 +2454,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2474,18 +2474,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2494,9 +2494,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2505,9 +2505,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2516,18 +2516,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2536,18 +2536,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2556,18 +2556,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2576,18 +2576,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2596,18 +2596,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2616,16 +2616,16 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2633,16 +2633,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2650,16 +2650,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2667,16 +2667,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2684,16 +2684,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2701,16 +2701,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2718,34 +2718,34 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2753,16 +2753,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2770,16 +2770,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2787,16 +2787,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2804,16 +2804,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2821,152 +2821,152 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_sse2
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_sse2
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_sse2
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_sse2
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_sse2
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_sse2
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_sse2
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_sse2
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_sse2
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_sse2
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_sse2
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_sse2
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -2988,7 +2988,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3000,7 +3000,7 @@ void vpx_highbd_convolve8_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3012,7 +3012,7 @@ void vpx_highbd_convolve8_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3024,7 +3024,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3037,7 +3037,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3049,7 +3049,7 @@ void vpx_highbd_convolve8_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3061,7 +3061,7 @@ void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3073,7 +3073,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3086,7 +3086,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3098,7 +3098,7 @@ void vpx_highbd_convolve8_avg_horiz_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3110,7 +3110,7 @@ void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3122,7 +3122,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3135,7 +3135,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3147,7 +3147,7 @@ void vpx_highbd_convolve8_avg_vert_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3159,7 +3159,7 @@ void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3171,7 +3171,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3184,7 +3184,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3196,7 +3196,7 @@ void vpx_highbd_convolve8_horiz_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3208,7 +3208,7 @@ void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3220,7 +3220,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3233,7 +3233,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3245,7 +3245,7 @@ void vpx_highbd_convolve8_vert_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3257,7 +3257,7 @@ void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3269,7 +3269,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3282,7 +3282,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3294,7 +3294,7 @@ void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3306,7 +3306,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3318,7 +3318,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3331,7 +3331,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3343,7 +3343,7 @@ void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3355,7 +3355,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3367,427 +3367,427 @@ RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_sse2
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_sse2
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_sse2
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_sse2
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_4x4)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_sse2
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_sse2
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_sse2
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_sse2
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_sse2
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3795,12 +3795,12 @@ void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_sse2
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3808,120 +3808,120 @@ void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_sse2
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_sse2
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_sse2
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_sse2
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_sse2
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_sse2
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_sse2
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_sse2
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_sse2
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_sse2
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3979,53 +3979,68 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_sse2
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_sse2
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_sse2
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_sse2
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -4423,9 +4438,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_sse2
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -4511,12 +4526,12 @@ unsigned int vpx_highbd_sad16x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_sse2
@@ -4545,12 +4560,12 @@ unsigned int vpx_highbd_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_sse2
@@ -4579,12 +4594,12 @@ unsigned int vpx_highbd_sad16x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_sse2
@@ -4613,12 +4628,12 @@ unsigned int vpx_highbd_sad32x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_sse2
@@ -4647,12 +4662,12 @@ unsigned int vpx_highbd_sad32x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_sse2
@@ -4681,12 +4696,12 @@ unsigned int vpx_highbd_sad32x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_sse2
@@ -4706,12 +4721,12 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_sse2
@@ -4731,12 +4746,12 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_sse2
@@ -4765,12 +4780,12 @@ unsigned int vpx_highbd_sad64x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_sse2
@@ -4799,12 +4814,12 @@ unsigned int vpx_highbd_sad64x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_sse2
@@ -4833,12 +4848,12 @@ unsigned int vpx_highbd_sad8x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_sse2
@@ -4867,12 +4882,12 @@ unsigned int vpx_highbd_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_sse2
@@ -4901,12 +4916,12 @@ unsigned int vpx_highbd_sad8x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_sse2
@@ -4915,104 +4930,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_sse2
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_sse2
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_sse2
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_sse2
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_sse2
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_sse2
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_sse2
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -5322,12 +5337,12 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_sse2
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_sse2(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_sse2(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -5361,68 +5376,68 @@ void vpx_minmax_8x8_sse2(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_sse2
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_sse2
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_sse2
@@ -5623,12 +5638,12 @@ RTCD_EXTERN void (*vpx_sad16x16x3)(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_sse2
@@ -5673,12 +5688,12 @@ unsigned int vpx_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_sse2
@@ -5728,12 +5743,12 @@ RTCD_EXTERN void (*vpx_sad16x8x3)(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_sse2
@@ -5794,12 +5809,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_sse2
@@ -5844,22 +5859,22 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -5903,12 +5918,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_sse2
@@ -5953,12 +5968,12 @@ RTCD_EXTERN void (*vpx_sad4x4x3)(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_sse2
@@ -6003,12 +6018,12 @@ unsigned int vpx_sad4x8_avg_sse2(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_sse2
@@ -6053,12 +6068,12 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_sse2
@@ -6103,22 +6118,22 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -6162,12 +6177,12 @@ RTCD_EXTERN void (*vpx_sad8x16x3)(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_sse2
@@ -6212,12 +6227,12 @@ unsigned int vpx_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_sse2
@@ -6262,12 +6277,12 @@ RTCD_EXTERN void (*vpx_sad8x8x3)(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_sse2
@@ -6393,850 +6408,850 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -7264,315 +7279,315 @@ uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_sse2
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_sse2
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_sse2
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_sse2
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_sse2
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_sse2
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_sse2
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_sse2
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_sse2
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_sse2
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_sse2
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_sse2
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_sse2
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_sse2
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/nacl/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/nacl/vp8_rtcd.h
index dc054d6b36a..6e6147d3faa 100644
--- a/chromium/third_party/libvpx/source/config/nacl/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/nacl/vp8_rtcd.h
@@ -27,44 +27,44 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -72,9 +72,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -82,9 +82,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -93,27 +93,27 @@ int vp8_block_error_c(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_c
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_c
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_c
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_c
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c
@@ -139,7 +139,7 @@ int vp8_denoiser_filter_uv_c(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_c
@@ -158,7 +158,7 @@ void vp8_dequant_idct_add_y_block_c(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_c
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -209,55 +209,55 @@ int vp8_full_search_sad_c(struct macroblock* x,
union int_mv* center_mv);
#define vp8_full_search_sad vp8_full_search_sad_c
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_c
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_c
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c
@@ -271,8 +271,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -288,50 +288,50 @@ void vp8_short_fdct8x4_c(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_c
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_c
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c
diff --git a/chromium/third_party/libvpx/source/config/nacl/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/nacl/vp9_rtcd.h
index 289a739928c..5ee904c571b 100644
--- a/chromium/third_party/libvpx/source/config/nacl/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/nacl/vp9_rtcd.h
@@ -143,8 +143,8 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
#define vp9_highbd_iht16x16_256_add vp9_highbd_iht16x16_256_add_c
@@ -225,8 +225,8 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
diff --git a/chromium/third_party/libvpx/source/config/nacl/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/nacl/vpx_dsp_rtcd.h
index 29706b564b5..f8a910d67cc 100644
--- a/chromium/third_party/libvpx/source/config/nacl/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/nacl/vpx_dsp_rtcd.h
@@ -139,253 +139,253 @@ void vpx_convolve_copy_c(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_c
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c
@@ -418,7 +418,7 @@ void vpx_fdct8x8_1_c(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_c
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -426,13 +426,13 @@ void vpx_get16x16var_c(const uint8_t* src_ptr,
#define vpx_get16x16var vpx_get16x16var_c
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -443,25 +443,25 @@ unsigned int vpx_get_mb_ss_c(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_c
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c
@@ -482,13 +482,13 @@ void vpx_hadamard_8x8_c(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_c
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -496,7 +496,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -504,38 +504,38 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_c
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -545,9 +545,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -556,9 +556,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
vpx_highbd_10_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -568,9 +568,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -580,9 +580,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -592,9 +592,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -603,9 +603,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
vpx_highbd_10_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -614,9 +614,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -626,9 +626,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -638,9 +638,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -649,9 +649,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
vpx_highbd_10_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -660,9 +660,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -671,9 +671,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -682,9 +682,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -692,9 +692,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_c
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -702,9 +702,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_c
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -712,9 +712,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_c
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -722,9 +722,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_c
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -732,9 +732,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_c
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -742,9 +742,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_c
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -752,9 +752,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -762,9 +762,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -772,9 +772,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_c
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -782,9 +782,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_c
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -792,9 +792,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_c
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -802,9 +802,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_c
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -812,98 +812,98 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_c
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_c
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_c
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_c
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_c
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_c
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_c
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_c
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_c
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_c
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_c
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -911,7 +911,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -919,38 +919,38 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_c
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -960,9 +960,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -971,9 +971,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
vpx_highbd_12_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -983,9 +983,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -995,9 +995,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1007,9 +1007,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1018,9 +1018,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
vpx_highbd_12_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1029,9 +1029,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1041,9 +1041,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1053,9 +1053,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1064,9 +1064,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
vpx_highbd_12_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1075,9 +1075,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1086,9 +1086,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1097,9 +1097,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1107,9 +1107,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_c
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1117,9 +1117,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_c
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1127,9 +1127,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_c
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1137,9 +1137,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_c
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1147,9 +1147,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_c
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1157,9 +1157,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_c
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1167,9 +1167,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1177,9 +1177,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1187,9 +1187,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_c
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1197,9 +1197,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_c
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1207,9 +1207,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_c
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1217,9 +1217,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_c
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1227,98 +1227,98 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_c
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_c
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_c
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_c
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_c
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_c
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_c
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_c
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_c
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_c
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_c
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1326,7 +1326,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1334,37 +1334,37 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_c
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1373,9 +1373,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1384,9 +1384,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1395,9 +1395,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1406,9 +1406,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1417,9 +1417,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1439,9 +1439,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1450,9 +1450,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1461,9 +1461,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1472,9 +1472,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1483,9 +1483,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1494,9 +1494,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1505,9 +1505,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance8x8_c
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1515,9 +1515,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_c
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1525,9 +1525,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_c
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1535,9 +1535,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_c
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1545,9 +1545,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_c
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1555,9 +1555,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_c
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1565,27 +1565,27 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_c
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1593,9 +1593,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_c
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1603,9 +1603,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_c
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1613,118 +1613,118 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_c
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x4 vpx_highbd_8_sub_pixel_variance8x4_c
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance8x8 vpx_highbd_8_sub_pixel_variance8x8_c
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_c
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_c
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_c
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_c
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_c
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_c
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_c
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_c
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_c
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_c
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_c
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_c
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -1746,7 +1746,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8 vpx_highbd_convolve8_c
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
@@ -1760,7 +1760,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg vpx_highbd_convolve8_avg_c
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
@@ -1774,7 +1774,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_horiz vpx_highbd_convolve8_avg_horiz_c
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
@@ -1788,7 +1788,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_avg_vert vpx_highbd_convolve8_avg_vert_c
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
@@ -1802,7 +1802,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_horiz vpx_highbd_convolve8_horiz_c
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
@@ -1816,7 +1816,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve8_vert vpx_highbd_convolve8_vert_c
void vpx_highbd_convolve_avg_c(const uint16_t* src,
@@ -1830,7 +1830,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_avg vpx_highbd_convolve_avg_c
void vpx_highbd_convolve_copy_c(const uint16_t* src,
@@ -1844,284 +1844,284 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
#define vpx_highbd_convolve_copy vpx_highbd_convolve_copy_c
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_16x16 vpx_highbd_d117_predictor_16x16_c
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_32x32 vpx_highbd_d117_predictor_32x32_c
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_c
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_8x8 vpx_highbd_d117_predictor_8x8_c
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_16x16 vpx_highbd_d135_predictor_16x16_c
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_32x32 vpx_highbd_d135_predictor_32x32_c
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_c
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_8x8 vpx_highbd_d135_predictor_8x8_c
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_16x16 vpx_highbd_d153_predictor_16x16_c
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_32x32 vpx_highbd_d153_predictor_32x32_c
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_c
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_8x8 vpx_highbd_d153_predictor_8x8_c
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_16x16 vpx_highbd_d207_predictor_16x16_c
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_32x32 vpx_highbd_d207_predictor_32x32_c
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_c
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_8x8 vpx_highbd_d207_predictor_8x8_c
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_16x16 vpx_highbd_d45_predictor_16x16_c
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_32x32 vpx_highbd_d45_predictor_32x32_c
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_4x4 vpx_highbd_d45_predictor_4x4_c
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d45_predictor_8x8 vpx_highbd_d45_predictor_8x8_c
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_16x16 vpx_highbd_d63_predictor_16x16_c
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_32x32 vpx_highbd_d63_predictor_32x32_c
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_c
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_8x8 vpx_highbd_d63_predictor_8x8_c
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_c
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_c
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_c
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_c
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_16x16 vpx_highbd_dc_left_predictor_16x16_c
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_32x32 vpx_highbd_dc_left_predictor_32x32_c
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_c
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_c
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_c
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_c
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_c
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_c
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_c
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_c
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_c
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2164,33 +2164,48 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_c
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_c
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_c
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_c
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -2389,9 +2404,9 @@ void vpx_highbd_lpf_vertical_8_dual_c(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_c
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -2442,7 +2457,7 @@ unsigned int vpx_highbd_sad16x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_c
@@ -2462,7 +2477,7 @@ unsigned int vpx_highbd_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_c
@@ -2482,7 +2497,7 @@ unsigned int vpx_highbd_sad16x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_c
@@ -2502,7 +2517,7 @@ unsigned int vpx_highbd_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_c
@@ -2522,7 +2537,7 @@ unsigned int vpx_highbd_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_c
@@ -2542,7 +2557,7 @@ unsigned int vpx_highbd_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_c
@@ -2562,7 +2577,7 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_c
@@ -2582,7 +2597,7 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_c
@@ -2602,7 +2617,7 @@ unsigned int vpx_highbd_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_c
@@ -2622,7 +2637,7 @@ unsigned int vpx_highbd_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_c
@@ -2642,7 +2657,7 @@ unsigned int vpx_highbd_sad8x16_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_c
@@ -2662,7 +2677,7 @@ unsigned int vpx_highbd_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_c
@@ -2682,7 +2697,7 @@ unsigned int vpx_highbd_sad8x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_c
@@ -2691,64 +2706,64 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_c
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_c
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_c
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_c
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_c
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_c
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_c
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -2910,7 +2925,7 @@ void vpx_lpf_vertical_8_dual_c(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -2933,30 +2948,30 @@ void vpx_minmax_8x8_c(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_c
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x16 vpx_mse16x16_c
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse16x8 vpx_mse16x8_c
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_c
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_c
@@ -3031,7 +3046,7 @@ void vpx_sad16x16x3_c(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_c
@@ -3058,7 +3073,7 @@ unsigned int vpx_sad16x32_avg_c(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_c
@@ -3085,7 +3100,7 @@ void vpx_sad16x8x3_c(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_c
@@ -3112,7 +3127,7 @@ unsigned int vpx_sad32x16_avg_c(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_c
@@ -3132,7 +3147,7 @@ unsigned int vpx_sad32x32_avg_c(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x32x4d vpx_sad32x32x4d_c
@@ -3152,7 +3167,7 @@ unsigned int vpx_sad32x64_avg_c(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_c
@@ -3179,7 +3194,7 @@ void vpx_sad4x4x3_c(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_c
@@ -3206,7 +3221,7 @@ unsigned int vpx_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_c
@@ -3226,7 +3241,7 @@ unsigned int vpx_sad64x32_avg_c(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_c
@@ -3246,7 +3261,7 @@ unsigned int vpx_sad64x64_avg_c(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x64x4d vpx_sad64x64x4d_c
@@ -3273,7 +3288,7 @@ void vpx_sad8x16x3_c(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_c
@@ -3300,7 +3315,7 @@ unsigned int vpx_sad8x4_avg_c(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_c
@@ -3327,7 +3342,7 @@ void vpx_sad8x8x3_c(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_c
@@ -3421,9 +3436,9 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3431,9 +3446,9 @@ uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x16 vpx_sub_pixel_avg_variance16x16_c
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3441,9 +3456,9 @@ uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x32 vpx_sub_pixel_avg_variance16x32_c
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3451,9 +3466,9 @@ uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance16x8 vpx_sub_pixel_avg_variance16x8_c
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3461,9 +3476,9 @@ uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x16 vpx_sub_pixel_avg_variance32x16_c
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3471,9 +3486,9 @@ uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x32 vpx_sub_pixel_avg_variance32x32_c
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3481,9 +3496,9 @@ uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance32x64 vpx_sub_pixel_avg_variance32x64_c
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3491,9 +3506,9 @@ uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x4 vpx_sub_pixel_avg_variance4x4_c
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3501,9 +3516,9 @@ uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance4x8 vpx_sub_pixel_avg_variance4x8_c
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3511,9 +3526,9 @@ uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x32 vpx_sub_pixel_avg_variance64x32_c
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3521,9 +3536,9 @@ uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance64x64 vpx_sub_pixel_avg_variance64x64_c
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3531,9 +3546,9 @@ uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x16 vpx_sub_pixel_avg_variance8x16_c
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3541,9 +3556,9 @@ uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x4 vpx_sub_pixel_avg_variance8x4_c
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -3551,117 +3566,117 @@ uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
#define vpx_sub_pixel_avg_variance8x8 vpx_sub_pixel_avg_variance8x8_c
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x16 vpx_sub_pixel_variance16x16_c
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x32 vpx_sub_pixel_variance16x32_c
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance16x8 vpx_sub_pixel_variance16x8_c
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x16 vpx_sub_pixel_variance32x16_c
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x32 vpx_sub_pixel_variance32x32_c
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance32x64 vpx_sub_pixel_variance32x64_c
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x4 vpx_sub_pixel_variance4x4_c
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance4x8 vpx_sub_pixel_variance4x8_c
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x32 vpx_sub_pixel_variance64x32_c
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance64x64 vpx_sub_pixel_variance64x64_c
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x16 vpx_sub_pixel_variance8x16_c
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_sub_pixel_variance8x4 vpx_sub_pixel_variance8x4_c
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -3681,146 +3696,146 @@ uint64_t vpx_sum_squares_2d_i16_c(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_c
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x16 vpx_variance16x16_c
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x32 vpx_variance16x32_c
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance16x8 vpx_variance16x8_c
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x16 vpx_variance32x16_c
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x32 vpx_variance32x32_c
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance32x64 vpx_variance32x64_c
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_c
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_c
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x32 vpx_variance64x32_c
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance64x64 vpx_variance64x64_c
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_c
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_c
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_c
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/vpx_version.h b/chromium/third_party/libvpx/source/config/vpx_version.h
index cdae165d3e8..ff39bf831a9 100644
--- a/chromium/third_party/libvpx/source/config/vpx_version.h
+++ b/chromium/third_party/libvpx/source/config/vpx_version.h
@@ -2,7 +2,7 @@
#define VERSION_MAJOR 1
#define VERSION_MINOR 7
#define VERSION_PATCH 0
-#define VERSION_EXTRA "1132-ge188b5435"
+#define VERSION_EXTRA "1420-g932f8fa04"
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.7.0-1132-ge188b5435"
-#define VERSION_STRING " v1.7.0-1132-ge188b5435"
+#define VERSION_STRING_NOSP "v1.7.0-1420-g932f8fa04"
+#define VERSION_STRING " v1.7.0-1420-g932f8fa04"
diff --git a/chromium/third_party/libvpx/source/config/win/ia32/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/win/ia32/vp8_rtcd.h
index 4e9d062caae..c46bfe5733f 100644
--- a/chromium/third_party/libvpx/source/config/win/ia32/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/win/ia32/vp8_rtcd.h
@@ -27,90 +27,90 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_mmx
+void vp8_bilinear_predict4x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_sse2
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_mmx
+void vp8_bilinear_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_sse2
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -118,9 +118,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -128,9 +128,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -140,65 +140,65 @@ int vp8_block_error_sse2(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_sse2
void vp8_copy32xn_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse2(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse3(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
RTCD_EXTERN void (*vp8_copy32xn)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_sse2(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_sse2
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_mmx
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_mmx
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_mmx(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_mmx(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_mmx
@@ -240,11 +240,11 @@ int vp8_denoiser_filter_uv_sse2(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_mmx(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_mmx
@@ -274,8 +274,8 @@ void vp8_dequant_idct_add_y_block_sse2(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_sse2
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_mmx(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_mmx(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_mmx
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -375,91 +375,91 @@ RTCD_EXTERN int (*vp8_full_search_sad)(struct macroblock* x,
int* mvcost[2],
union int_mv* center_mv);
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_sse2
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_sse2
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_sse2
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_sse2
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_sse2
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_sse2
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_sse2
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_sse2
@@ -475,8 +475,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -484,8 +484,8 @@ int vp8_refining_search_sadx4(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -505,126 +505,126 @@ void vp8_short_fdct8x4_sse2(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_sse2
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_mmx(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_mmx
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_sse2(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_sse2(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_sse2
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_sse2(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_sse2
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_mmx(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/win/ia32/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/win/ia32/vp9_rtcd.h
index 1e1da43518a..603bd31b5c3 100644
--- a/chromium/third_party/libvpx/source/config/win/ia32/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/win/ia32/vp9_rtcd.h
@@ -242,18 +242,18 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_sse4_1(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
RTCD_EXTERN void (*vp9_highbd_iht16x16_256_add)(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
@@ -351,12 +351,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_sse2
diff --git a/chromium/third_party/libvpx/source/config/win/ia32/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/win/ia32/vpx_dsp_rtcd.h
index 14d1107e665..b2b02a59aa1 100644
--- a/chromium/third_party/libvpx/source/config/win/ia32/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/win/ia32/vpx_dsp_rtcd.h
@@ -427,420 +427,420 @@ void vpx_convolve_copy_sse2(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_sse2
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_sse2
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_sse2
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_sse2
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_sse2
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_sse2
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_sse2
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_sse2
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_sse2
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_sse2
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_sse2
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_sse2
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_sse2
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_sse2
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_sse2
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_sse2
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_sse2
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_sse2
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_sse2
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_sse2
@@ -884,44 +884,44 @@ void vpx_fdct8x8_1_sse2(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_sse2
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -933,41 +933,41 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_sse2
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_sse2
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_sse2
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_sse2
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_sse2
@@ -1007,13 +1007,13 @@ void vpx_hadamard_8x8_sse2(const int16_t* src_diff,
#define vpx_hadamard_8x8 vpx_hadamard_8x8_sse2
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1021,7 +1021,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1029,57 +1029,57 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_sse2
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1089,18 +1089,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1109,18 +1109,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_10_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1130,18 +1130,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1151,18 +1151,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1172,18 +1172,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1192,9 +1192,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1203,9 +1203,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1215,18 +1215,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1236,18 +1236,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1256,18 +1256,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1276,18 +1276,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1296,18 +1296,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1316,16 +1316,16 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1333,16 +1333,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1350,16 +1350,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1367,16 +1367,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1384,16 +1384,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1401,16 +1401,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1418,9 +1418,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1428,9 +1428,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1438,16 +1438,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1455,16 +1455,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1472,16 +1472,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1489,16 +1489,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1506,16 +1506,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1523,148 +1523,148 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_sse2
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_sse2
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_sse2
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_sse2
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_sse2
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_sse2
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_sse2
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_sse2
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_sse2
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_sse2
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1672,7 +1672,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1680,57 +1680,57 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_sse2
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1740,18 +1740,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1760,18 +1760,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_12_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1781,18 +1781,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1802,18 +1802,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1823,18 +1823,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1843,9 +1843,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1854,9 +1854,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1866,18 +1866,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1887,18 +1887,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1907,18 +1907,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1927,18 +1927,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1947,18 +1947,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1967,16 +1967,16 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1984,16 +1984,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2001,16 +2001,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2018,16 +2018,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2035,16 +2035,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2052,16 +2052,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2069,9 +2069,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2079,9 +2079,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2089,16 +2089,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2106,16 +2106,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2123,16 +2123,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2140,16 +2140,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2157,16 +2157,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2174,148 +2174,148 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_sse2
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_sse2
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_sse2
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_sse2
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_sse2
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_sse2
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_sse2
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_sse2
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_sse2
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_sse2
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2323,7 +2323,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2331,56 +2331,56 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_sse2
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2389,18 +2389,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2409,18 +2409,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2429,18 +2429,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2449,18 +2449,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2469,18 +2469,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2489,9 +2489,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2500,9 +2500,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2511,18 +2511,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2531,18 +2531,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2551,18 +2551,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2571,18 +2571,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2591,18 +2591,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2611,16 +2611,16 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2628,16 +2628,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2645,16 +2645,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2662,16 +2662,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2679,16 +2679,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2696,16 +2696,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2713,34 +2713,34 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2748,16 +2748,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2765,16 +2765,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2782,16 +2782,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2799,16 +2799,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2816,152 +2816,152 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_sse2
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_sse2
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_sse2
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_sse2
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_sse2
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_sse2
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_sse2
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_sse2
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_sse2
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_sse2
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_sse2
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_sse2
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -2983,7 +2983,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -2995,7 +2995,7 @@ void vpx_highbd_convolve8_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3007,7 +3007,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3020,7 +3020,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3032,7 +3032,7 @@ void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3044,7 +3044,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3057,7 +3057,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3069,7 +3069,7 @@ void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3081,7 +3081,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3094,7 +3094,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3106,7 +3106,7 @@ void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3118,7 +3118,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3131,7 +3131,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3143,7 +3143,7 @@ void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3155,7 +3155,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3168,7 +3168,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3180,7 +3180,7 @@ void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3192,7 +3192,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3205,7 +3205,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3217,7 +3217,7 @@ void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3229,7 +3229,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3241,7 +3241,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3254,7 +3254,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3266,7 +3266,7 @@ void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3278,7 +3278,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3290,427 +3290,427 @@ RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_sse2
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_sse2
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_sse2
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_sse2
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_4x4)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_sse2
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_sse2
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_sse2
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_sse2
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_sse2
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3718,12 +3718,12 @@ void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_sse2
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3731,120 +3731,120 @@ void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_sse2
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_sse2
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_sse2
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_sse2
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_sse2
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_sse2
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_sse2
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_sse2
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_sse2
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_sse2
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3902,53 +3902,68 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_sse2
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_sse2
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_sse2
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_sse2
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -4346,9 +4361,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_sse2
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -4434,12 +4449,12 @@ unsigned int vpx_highbd_sad16x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_sse2
@@ -4468,12 +4483,12 @@ unsigned int vpx_highbd_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_sse2
@@ -4502,12 +4517,12 @@ unsigned int vpx_highbd_sad16x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_sse2
@@ -4536,12 +4551,12 @@ unsigned int vpx_highbd_sad32x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_sse2
@@ -4570,12 +4585,12 @@ unsigned int vpx_highbd_sad32x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_sse2
@@ -4604,12 +4619,12 @@ unsigned int vpx_highbd_sad32x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_sse2
@@ -4629,12 +4644,12 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_sse2
@@ -4654,12 +4669,12 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_sse2
@@ -4688,12 +4703,12 @@ unsigned int vpx_highbd_sad64x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_sse2
@@ -4722,12 +4737,12 @@ unsigned int vpx_highbd_sad64x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_sse2
@@ -4756,12 +4771,12 @@ unsigned int vpx_highbd_sad8x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_sse2
@@ -4790,12 +4805,12 @@ unsigned int vpx_highbd_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_sse2
@@ -4824,12 +4839,12 @@ unsigned int vpx_highbd_sad8x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_sse2
@@ -4838,104 +4853,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_sse2
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_sse2
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_sse2
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_sse2
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_sse2
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_sse2
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_sse2
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -5245,12 +5260,12 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_sse2
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_sse2(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_sse2(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -5284,68 +5299,68 @@ void vpx_minmax_8x8_sse2(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_sse2
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_sse2
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_sse2
@@ -5546,12 +5561,12 @@ RTCD_EXTERN void (*vpx_sad16x16x3)(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_sse2
@@ -5596,12 +5611,12 @@ unsigned int vpx_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_sse2
@@ -5651,12 +5666,12 @@ RTCD_EXTERN void (*vpx_sad16x8x3)(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_sse2
@@ -5717,12 +5732,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_sse2
@@ -5767,22 +5782,22 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -5826,12 +5841,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_sse2
@@ -5876,12 +5891,12 @@ RTCD_EXTERN void (*vpx_sad4x4x3)(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_sse2
@@ -5926,12 +5941,12 @@ unsigned int vpx_sad4x8_avg_sse2(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_sse2
@@ -5976,12 +5991,12 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_sse2
@@ -6026,22 +6041,22 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -6085,12 +6100,12 @@ RTCD_EXTERN void (*vpx_sad8x16x3)(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_sse2
@@ -6135,12 +6150,12 @@ unsigned int vpx_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_sse2
@@ -6185,12 +6200,12 @@ RTCD_EXTERN void (*vpx_sad8x8x3)(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_sse2
@@ -6316,850 +6331,850 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -7187,315 +7202,315 @@ uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_sse2
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_sse2
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_sse2
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_sse2
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_sse2
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_sse2
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_sse2
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_sse2
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_sse2
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_sse2
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_sse2
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_sse2
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_sse2
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_sse2
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/config/win/x64/vp8_rtcd.h b/chromium/third_party/libvpx/source/config/win/x64/vp8_rtcd.h
index 4e9d062caae..c46bfe5733f 100644
--- a/chromium/third_party/libvpx/source/config/win/x64/vp8_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/win/x64/vp8_rtcd.h
@@ -27,90 +27,90 @@ struct yv12_buffer_config;
extern "C" {
#endif
-void vp8_bilinear_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_mmx
+void vp8_bilinear_predict4x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_sse2
-void vp8_bilinear_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
- int dst_pitch);
-#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_mmx
+void vp8_bilinear_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
+ int dst_pitch);
+#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_sse2
-void vp8_bilinear_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_bilinear_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_bilinear_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_blend_b_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_b vp8_blend_b_c
@@ -118,9 +118,9 @@ void vp8_blend_b_c(unsigned char* y,
void vp8_blend_mb_inner_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_inner vp8_blend_mb_inner_c
@@ -128,9 +128,9 @@ void vp8_blend_mb_inner_c(unsigned char* y,
void vp8_blend_mb_outer_c(unsigned char* y,
unsigned char* u,
unsigned char* v,
- int y1,
- int u1,
- int v1,
+ int y_1,
+ int u_1,
+ int v_1,
int alpha,
int stride);
#define vp8_blend_mb_outer vp8_blend_mb_outer_c
@@ -140,65 +140,65 @@ int vp8_block_error_sse2(short* coeff, short* dqcoeff);
#define vp8_block_error vp8_block_error_sse2
void vp8_copy32xn_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse2(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy32xn_sse3(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
RTCD_EXTERN void (*vp8_copy32xn)(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
unsigned char* dst_ptr,
int dst_stride,
- int n);
+ int height);
void vp8_copy_mem16x16_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem16x16_sse2(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_sse2
void vp8_copy_mem8x4_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x4_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_mmx
void vp8_copy_mem8x8_c(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
void vp8_copy_mem8x8_mmx(unsigned char* src,
- int src_pitch,
+ int src_stride,
unsigned char* dst,
- int dst_pitch);
+ int dst_stride);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_mmx
-void vp8_dc_only_idct_add_c(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_c(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
-void vp8_dc_only_idct_add_mmx(short input,
- unsigned char* pred,
+void vp8_dc_only_idct_add_mmx(short input_dc,
+ unsigned char* pred_ptr,
int pred_stride,
- unsigned char* dst,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_mmx
@@ -240,11 +240,11 @@ int vp8_denoiser_filter_uv_sse2(unsigned char* mc_running_avg,
void vp8_dequant_idct_add_c(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
void vp8_dequant_idct_add_mmx(short* input,
short* dq,
- unsigned char* output,
+ unsigned char* dest,
int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_mmx
@@ -274,8 +274,8 @@ void vp8_dequant_idct_add_y_block_sse2(short* q,
char* eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_sse2
-void vp8_dequantize_b_c(struct blockd*, short* dqc);
-void vp8_dequantize_b_mmx(struct blockd*, short* dqc);
+void vp8_dequantize_b_c(struct blockd*, short* DQC);
+void vp8_dequantize_b_mmx(struct blockd*, short* DQC);
#define vp8_dequantize_b vp8_dequantize_b_mmx
int vp8_diamond_search_sad_c(struct macroblock* x,
@@ -375,91 +375,91 @@ RTCD_EXTERN int (*vp8_full_search_sad)(struct macroblock* x,
int* mvcost[2],
union int_mv* center_mv);
-void vp8_loop_filter_bh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_sse2
-void vp8_loop_filter_bv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_bv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_bv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_sse2
-void vp8_loop_filter_mbh_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbh_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbh_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_sse2
-void vp8_loop_filter_mbv_c(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_c(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
-void vp8_loop_filter_mbv_sse2(unsigned char* y,
- unsigned char* u,
- unsigned char* v,
- int ystride,
+void vp8_loop_filter_mbv_sse2(unsigned char* y_ptr,
+ unsigned char* u_ptr,
+ unsigned char* v_ptr,
+ int y_stride,
int uv_stride,
struct loop_filter_info* lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_sse2
-void vp8_loop_filter_bhs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bhs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bhs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_sse2
-void vp8_loop_filter_bvs_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_bvs_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_bvs_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_sse2
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_sse2
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
-void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y,
- int ystride,
+void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char* y_ptr,
+ int y_stride,
const unsigned char* blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_sse2
@@ -475,8 +475,8 @@ int vp8_refining_search_sad_c(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -484,8 +484,8 @@ int vp8_refining_search_sadx4(struct macroblock* x,
struct block* b,
struct blockd* d,
union int_mv* ref_mv,
- int sad_per_bit,
- int distance,
+ int error_per_bit,
+ int search_range,
struct variance_vtable* fn_ptr,
int* mvcost[2],
union int_mv* center_mv);
@@ -505,126 +505,126 @@ void vp8_short_fdct8x4_sse2(short* input, short* output, int pitch);
#define vp8_short_fdct8x4 vp8_short_fdct8x4_sse2
void vp8_short_idct4x4llm_c(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
void vp8_short_idct4x4llm_mmx(short* input,
- unsigned char* pred,
- int pitch,
- unsigned char* dst,
+ unsigned char* pred_ptr,
+ int pred_stride,
+ unsigned char* dst_ptr,
int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_mmx
-void vp8_short_inv_walsh4x4_c(short* input, short* output);
-void vp8_short_inv_walsh4x4_sse2(short* input, short* output);
+void vp8_short_inv_walsh4x4_c(short* input, short* mb_dqcoeff);
+void vp8_short_inv_walsh4x4_sse2(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_sse2
-void vp8_short_inv_walsh4x4_1_c(short* input, short* output);
+void vp8_short_inv_walsh4x4_1_c(short* input, short* mb_dqcoeff);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_short_walsh4x4_c(short* input, short* output, int pitch);
void vp8_short_walsh4x4_sse2(short* input, short* output, int pitch);
#define vp8_short_walsh4x4 vp8_short_walsh4x4_sse2
-void vp8_sixtap_predict16x16_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict16x16_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict16x16_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_mmx(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_mmx(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict4x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict4x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x4_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x4_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_c(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_c(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_sse2(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_sse2(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-void vp8_sixtap_predict8x8_ssse3(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+void vp8_sixtap_predict8x8_ssse3(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
-RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src,
- int src_pitch,
- int xofst,
- int yofst,
- unsigned char* dst,
+RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char* src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ unsigned char* dst_ptr,
int dst_pitch);
void vp8_rtcd(void);
diff --git a/chromium/third_party/libvpx/source/config/win/x64/vp9_rtcd.h b/chromium/third_party/libvpx/source/config/win/x64/vp9_rtcd.h
index 6f00c78fb74..28787a89e6f 100644
--- a/chromium/third_party/libvpx/source/config/win/x64/vp9_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/win/x64/vp9_rtcd.h
@@ -242,18 +242,18 @@ void vp9_highbd_fwht4x4_c(const int16_t* input, tran_low_t* output, int stride);
#define vp9_highbd_fwht4x4 vp9_highbd_fwht4x4_c
void vp9_highbd_iht16x16_256_add_c(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
void vp9_highbd_iht16x16_256_add_sse4_1(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
RTCD_EXTERN void (*vp9_highbd_iht16x16_256_add)(const tran_low_t* input,
- uint16_t* output,
- int pitch,
+ uint16_t* dest,
+ int stride,
int tx_type,
int bd);
@@ -351,12 +351,12 @@ void vp9_highbd_temporal_filter_apply_c(const uint8_t* frame1,
#define vp9_highbd_temporal_filter_apply vp9_highbd_temporal_filter_apply_c
void vp9_iht16x16_256_add_c(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t* input,
- uint8_t* output,
- int pitch,
+ uint8_t* dest,
+ int stride,
int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_sse2
diff --git a/chromium/third_party/libvpx/source/config/win/x64/vpx_dsp_rtcd.h b/chromium/third_party/libvpx/source/config/win/x64/vpx_dsp_rtcd.h
index 9970fae1a91..3b28a11dbc8 100644
--- a/chromium/third_party/libvpx/source/config/win/x64/vpx_dsp_rtcd.h
+++ b/chromium/third_party/libvpx/source/config/win/x64/vpx_dsp_rtcd.h
@@ -427,420 +427,420 @@ void vpx_convolve_copy_sse2(const uint8_t* src,
#define vpx_convolve_copy vpx_convolve_copy_sse2
void vpx_d117_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d153_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_sse2
void vpx_d207_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d207_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_sse2
void vpx_d45_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d45_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_sse2
void vpx_d45e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d63_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_16x16_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_32x32_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_4x4_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63_predictor_8x8_ssse3(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_d63e_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_sse2
void vpx_dc_128_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_sse2
void vpx_dc_128_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_sse2
void vpx_dc_128_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_sse2
void vpx_dc_left_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_sse2
void vpx_dc_left_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_sse2
void vpx_dc_left_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_sse2
void vpx_dc_left_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_sse2
void vpx_dc_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_sse2
void vpx_dc_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_sse2
void vpx_dc_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_sse2
void vpx_dc_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_sse2
void vpx_dc_top_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_sse2
void vpx_dc_top_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_sse2
void vpx_dc_top_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_sse2
void vpx_dc_top_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_sse2
@@ -884,44 +884,44 @@ void vpx_fdct8x8_1_sse2(const int16_t* input, tran_low_t* output, int stride);
#define vpx_fdct8x8_1 vpx_fdct8x8_1_sse2
void vpx_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get16x16var_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
RTCD_EXTERN void (*vpx_get16x16var)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
unsigned int vpx_get4x4sse_cs_c(const unsigned char* src_ptr,
- int source_stride,
+ int src_stride,
const unsigned char* ref_ptr,
int ref_stride);
#define vpx_get4x4sse_cs vpx_get4x4sse_cs_c
void vpx_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
int* sum);
void vpx_get8x8var_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -933,41 +933,41 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t*);
#define vpx_get_mb_ss vpx_get_mb_ss_sse2
void vpx_h_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_sse2
void vpx_h_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_sse2
void vpx_h_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_sse2
void vpx_h_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_h_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_sse2
@@ -1012,13 +1012,13 @@ RTCD_EXTERN void (*vpx_hadamard_8x8)(const int16_t* src_diff,
tran_low_t* coeff);
void vpx_he_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1026,7 +1026,7 @@ void vpx_highbd_10_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get16x16var vpx_highbd_10_get16x16var_c
void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1034,57 +1034,57 @@ void vpx_highbd_10_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_10_get8x8var vpx_highbd_10_get8x8var_c
unsigned int vpx_highbd_10_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x16 vpx_highbd_10_mse16x16_sse2
unsigned int vpx_highbd_10_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse16x8 vpx_highbd_10_mse16x8_c
unsigned int vpx_highbd_10_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x16 vpx_highbd_10_mse8x16_c
unsigned int vpx_highbd_10_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_mse8x8 vpx_highbd_10_mse8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1094,18 +1094,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1114,18 +1114,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_10_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1135,18 +1135,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1156,18 +1156,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1177,18 +1177,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1197,9 +1197,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1208,9 +1208,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1220,18 +1220,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1241,18 +1241,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1261,18 +1261,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_10_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1281,18 +1281,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1301,18 +1301,18 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1321,16 +1321,16 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_10_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1338,16 +1338,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1355,16 +1355,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1372,16 +1372,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1389,16 +1389,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1406,16 +1406,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1423,9 +1423,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1433,9 +1433,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x4_c
uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1443,16 +1443,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance4x8_c
uint32_t vpx_highbd_10_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1460,16 +1460,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_10_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1477,16 +1477,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1494,16 +1494,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1511,16 +1511,16 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_10_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1528,148 +1528,148 @@ uint32_t vpx_highbd_10_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_10_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_10_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x16 vpx_highbd_10_variance16x16_sse2
unsigned int vpx_highbd_10_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x32 vpx_highbd_10_variance16x32_sse2
unsigned int vpx_highbd_10_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance16x8 vpx_highbd_10_variance16x8_sse2
unsigned int vpx_highbd_10_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x16 vpx_highbd_10_variance32x16_sse2
unsigned int vpx_highbd_10_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x32 vpx_highbd_10_variance32x32_sse2
unsigned int vpx_highbd_10_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance32x64 vpx_highbd_10_variance32x64_sse2
unsigned int vpx_highbd_10_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x4 vpx_highbd_10_variance4x4_c
unsigned int vpx_highbd_10_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance4x8 vpx_highbd_10_variance4x8_c
unsigned int vpx_highbd_10_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x32 vpx_highbd_10_variance64x32_sse2
unsigned int vpx_highbd_10_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance64x64 vpx_highbd_10_variance64x64_sse2
unsigned int vpx_highbd_10_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x16 vpx_highbd_10_variance8x16_sse2
unsigned int vpx_highbd_10_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x4 vpx_highbd_10_variance8x4_c
unsigned int vpx_highbd_10_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_10_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_10_variance8x8 vpx_highbd_10_variance8x8_sse2
void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1677,7 +1677,7 @@ void vpx_highbd_12_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get16x16var vpx_highbd_12_get16x16var_c
void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -1685,57 +1685,57 @@ void vpx_highbd_12_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_12_get8x8var vpx_highbd_12_get8x8var_c
unsigned int vpx_highbd_12_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x16 vpx_highbd_12_mse16x16_sse2
unsigned int vpx_highbd_12_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse16x8 vpx_highbd_12_mse16x8_c
unsigned int vpx_highbd_12_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x16 vpx_highbd_12_mse8x16_c
unsigned int vpx_highbd_12_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_mse8x8 vpx_highbd_12_mse8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1745,18 +1745,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1765,18 +1765,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_12_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1786,18 +1786,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1807,18 +1807,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1828,18 +1828,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1848,9 +1848,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1859,9 +1859,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1871,18 +1871,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1892,18 +1892,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32_sse2(
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_c(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1912,18 +1912,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_12_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1932,18 +1932,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1952,18 +1952,18 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -1972,16 +1972,16 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_12_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -1989,16 +1989,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2006,16 +2006,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2023,16 +2023,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2040,16 +2040,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2057,16 +2057,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2074,9 +2074,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2084,9 +2084,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x4_c
uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2094,16 +2094,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance4x8_c
uint32_t vpx_highbd_12_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2111,16 +2111,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_12_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2128,16 +2128,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2145,16 +2145,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2162,16 +2162,16 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_12_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2179,148 +2179,148 @@ uint32_t vpx_highbd_12_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_12_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_12_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x16 vpx_highbd_12_variance16x16_sse2
unsigned int vpx_highbd_12_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x32 vpx_highbd_12_variance16x32_sse2
unsigned int vpx_highbd_12_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance16x8 vpx_highbd_12_variance16x8_sse2
unsigned int vpx_highbd_12_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x16 vpx_highbd_12_variance32x16_sse2
unsigned int vpx_highbd_12_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x32 vpx_highbd_12_variance32x32_sse2
unsigned int vpx_highbd_12_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance32x64 vpx_highbd_12_variance32x64_sse2
unsigned int vpx_highbd_12_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x4 vpx_highbd_12_variance4x4_c
unsigned int vpx_highbd_12_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance4x8 vpx_highbd_12_variance4x8_c
unsigned int vpx_highbd_12_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x32 vpx_highbd_12_variance64x32_sse2
unsigned int vpx_highbd_12_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance64x64 vpx_highbd_12_variance64x64_sse2
unsigned int vpx_highbd_12_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x16 vpx_highbd_12_variance8x16_sse2
unsigned int vpx_highbd_12_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x4 vpx_highbd_12_variance8x4_c
unsigned int vpx_highbd_12_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_12_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_12_variance8x8 vpx_highbd_12_variance8x8_sse2
void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2328,7 +2328,7 @@ void vpx_highbd_8_get16x16var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get16x16var vpx_highbd_8_get16x16var_c
void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse,
@@ -2336,56 +2336,56 @@ void vpx_highbd_8_get8x8var_c(const uint8_t* src_ptr,
#define vpx_highbd_8_get8x8var vpx_highbd_8_get8x8var_c
unsigned int vpx_highbd_8_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x16 vpx_highbd_8_mse16x16_sse2
unsigned int vpx_highbd_8_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse16x8 vpx_highbd_8_mse16x8_c
unsigned int vpx_highbd_8_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x16 vpx_highbd_8_mse8x16_c
unsigned int vpx_highbd_8_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_mse8x8 vpx_highbd_8_mse8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2394,18 +2394,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2414,18 +2414,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2434,18 +2434,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2454,18 +2454,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2474,18 +2474,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2494,9 +2494,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2505,9 +2505,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2516,18 +2516,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_avg_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2536,18 +2536,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2556,18 +2556,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64_sse2(
vpx_highbd_8_sub_pixel_avg_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2576,18 +2576,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2596,18 +2596,18 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
@@ -2616,16 +2616,16 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8_sse2(
vpx_highbd_8_sub_pixel_avg_variance8x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2633,16 +2633,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2650,16 +2650,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2667,16 +2667,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance16x8_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2684,16 +2684,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2701,16 +2701,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2718,34 +2718,34 @@ uint32_t vpx_highbd_8_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance32x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x4 vpx_highbd_8_sub_pixel_variance4x4_c
uint32_t vpx_highbd_8_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
#define vpx_highbd_8_sub_pixel_variance4x8 vpx_highbd_8_sub_pixel_variance4x8_c
uint32_t vpx_highbd_8_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2753,16 +2753,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x32_sse2
uint32_t vpx_highbd_8_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2770,16 +2770,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance64x64_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2787,16 +2787,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x16_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2804,16 +2804,16 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x4_sse2
uint32_t vpx_highbd_8_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -2821,152 +2821,152 @@ uint32_t vpx_highbd_8_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
vpx_highbd_8_sub_pixel_variance8x8_sse2
unsigned int vpx_highbd_8_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x16 vpx_highbd_8_variance16x16_sse2
unsigned int vpx_highbd_8_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x32 vpx_highbd_8_variance16x32_sse2
unsigned int vpx_highbd_8_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance16x8 vpx_highbd_8_variance16x8_sse2
unsigned int vpx_highbd_8_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x16 vpx_highbd_8_variance32x16_sse2
unsigned int vpx_highbd_8_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x32 vpx_highbd_8_variance32x32_sse2
unsigned int vpx_highbd_8_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance32x64 vpx_highbd_8_variance32x64_sse2
unsigned int vpx_highbd_8_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x4 vpx_highbd_8_variance4x4_c
unsigned int vpx_highbd_8_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance4x8 vpx_highbd_8_variance4x8_c
unsigned int vpx_highbd_8_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x32 vpx_highbd_8_variance64x32_sse2
unsigned int vpx_highbd_8_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance64x64 vpx_highbd_8_variance64x64_sse2
unsigned int vpx_highbd_8_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x16 vpx_highbd_8_variance8x16_sse2
unsigned int vpx_highbd_8_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x4 vpx_highbd_8_variance8x4_c
unsigned int vpx_highbd_8_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_highbd_8_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_highbd_8_variance8x8 vpx_highbd_8_variance8x8_sse2
-unsigned int vpx_highbd_avg_4x4_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_4x4_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_4x4 vpx_highbd_avg_4x4_sse2
-unsigned int vpx_highbd_avg_8x8_c(const uint8_t*, int p);
-unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t*, int p);
+unsigned int vpx_highbd_avg_8x8_c(const uint8_t* s8, int p);
+unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t* s8, int p);
#define vpx_highbd_avg_8x8 vpx_highbd_avg_8x8_sse2
void vpx_highbd_comp_avg_pred_c(uint16_t* comp_pred,
@@ -2988,7 +2988,7 @@ void vpx_highbd_convolve8_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3000,7 +3000,7 @@ void vpx_highbd_convolve8_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3012,7 +3012,7 @@ void vpx_highbd_convolve8_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3024,7 +3024,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3037,7 +3037,7 @@ void vpx_highbd_convolve8_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3049,7 +3049,7 @@ void vpx_highbd_convolve8_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3061,7 +3061,7 @@ void vpx_highbd_convolve8_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3073,7 +3073,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3086,7 +3086,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3098,7 +3098,7 @@ void vpx_highbd_convolve8_avg_horiz_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3110,7 +3110,7 @@ void vpx_highbd_convolve8_avg_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3122,7 +3122,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3135,7 +3135,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3147,7 +3147,7 @@ void vpx_highbd_convolve8_avg_vert_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3159,7 +3159,7 @@ void vpx_highbd_convolve8_avg_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3171,7 +3171,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_avg_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3184,7 +3184,7 @@ void vpx_highbd_convolve8_horiz_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3196,7 +3196,7 @@ void vpx_highbd_convolve8_horiz_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3208,7 +3208,7 @@ void vpx_highbd_convolve8_horiz_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3220,7 +3220,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_horiz)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3233,7 +3233,7 @@ void vpx_highbd_convolve8_vert_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3245,7 +3245,7 @@ void vpx_highbd_convolve8_vert_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3257,7 +3257,7 @@ void vpx_highbd_convolve8_vert_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3269,7 +3269,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve8_vert)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3282,7 +3282,7 @@ void vpx_highbd_convolve_avg_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3294,7 +3294,7 @@ void vpx_highbd_convolve_avg_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3306,7 +3306,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3318,7 +3318,7 @@ RTCD_EXTERN void (*vpx_highbd_convolve_avg)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_c(const uint16_t* src,
ptrdiff_t src_stride,
@@ -3331,7 +3331,7 @@ void vpx_highbd_convolve_copy_c(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3343,7 +3343,7 @@ void vpx_highbd_convolve_copy_sse2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3355,7 +3355,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
ptrdiff_t src_stride,
uint16_t* dst,
@@ -3367,427 +3367,427 @@ RTCD_EXTERN void (*vpx_highbd_convolve_copy)(const uint16_t* src,
int y_step_q4,
int w,
int h,
- int bps);
+ int bd);
void vpx_highbd_d117_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d117_predictor_4x4 vpx_highbd_d117_predictor_4x4_sse2
void vpx_highbd_d117_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d117_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d135_predictor_4x4 vpx_highbd_d135_predictor_4x4_sse2
void vpx_highbd_d135_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d135_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d153_predictor_4x4 vpx_highbd_d153_predictor_4x4_sse2
void vpx_highbd_d153_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d153_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d207_predictor_4x4 vpx_highbd_d207_predictor_4x4_sse2
void vpx_highbd_d207_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d207_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_4x4)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d45_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_16x16)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_32x32)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_d63_predictor_4x4 vpx_highbd_d63_predictor_4x4_sse2
void vpx_highbd_d63_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
RTCD_EXTERN void (*vpx_highbd_d63_predictor_8x8)(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_16x16 vpx_highbd_dc_128_predictor_16x16_sse2
void vpx_highbd_dc_128_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_32x32 vpx_highbd_dc_128_predictor_32x32_sse2
void vpx_highbd_dc_128_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_4x4 vpx_highbd_dc_128_predictor_4x4_sse2
void vpx_highbd_dc_128_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_128_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_128_predictor_8x8 vpx_highbd_dc_128_predictor_8x8_sse2
void vpx_highbd_dc_left_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3795,12 +3795,12 @@ void vpx_highbd_dc_left_predictor_16x16_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_16x16_sse2
void vpx_highbd_dc_left_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3808,120 +3808,120 @@ void vpx_highbd_dc_left_predictor_32x32_sse2(uint16_t* dst,
vpx_highbd_dc_left_predictor_32x32_sse2
void vpx_highbd_dc_left_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_4x4 vpx_highbd_dc_left_predictor_4x4_sse2
void vpx_highbd_dc_left_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_left_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_left_predictor_8x8 vpx_highbd_dc_left_predictor_8x8_sse2
void vpx_highbd_dc_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_16x16 vpx_highbd_dc_predictor_16x16_sse2
void vpx_highbd_dc_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_32x32 vpx_highbd_dc_predictor_32x32_sse2
void vpx_highbd_dc_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_4x4 vpx_highbd_dc_predictor_4x4_sse2
void vpx_highbd_dc_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_predictor_8x8 vpx_highbd_dc_predictor_8x8_sse2
void vpx_highbd_dc_top_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_16x16 vpx_highbd_dc_top_predictor_16x16_sse2
void vpx_highbd_dc_top_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_32x32 vpx_highbd_dc_top_predictor_32x32_sse2
void vpx_highbd_dc_top_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_dc_top_predictor_4x4 vpx_highbd_dc_top_predictor_4x4_sse2
void vpx_highbd_dc_top_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_dc_top_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -3979,53 +3979,68 @@ void vpx_highbd_fdct8x8_1_c(const int16_t* input,
#define vpx_highbd_fdct8x8_1 vpx_highbd_fdct8x8_1_c
void vpx_highbd_h_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_16x16 vpx_highbd_h_predictor_16x16_sse2
void vpx_highbd_h_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_32x32 vpx_highbd_h_predictor_32x32_sse2
void vpx_highbd_h_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_4x4 vpx_highbd_h_predictor_4x4_sse2
void vpx_highbd_h_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_h_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_h_predictor_8x8 vpx_highbd_h_predictor_8x8_sse2
+void vpx_highbd_hadamard_16x16_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_16x16 vpx_highbd_hadamard_16x16_c
+
+void vpx_highbd_hadamard_32x32_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_32x32 vpx_highbd_hadamard_32x32_c
+
+void vpx_highbd_hadamard_8x8_c(const int16_t* src_diff,
+ ptrdiff_t src_stride,
+ tran_low_t* coeff);
+#define vpx_highbd_hadamard_8x8 vpx_highbd_hadamard_8x8_c
+
void vpx_highbd_idct16x16_10_add_c(const tran_low_t* input,
uint16_t* dest,
int stride,
@@ -4423,9 +4438,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t* s,
int bd);
#define vpx_highbd_lpf_vertical_8_dual vpx_highbd_lpf_vertical_8_dual_sse2
-void vpx_highbd_minmax_8x8_c(const uint8_t* s,
+void vpx_highbd_minmax_8x8_c(const uint8_t* s8,
int p,
- const uint8_t* d,
+ const uint8_t* d8,
int dp,
int* min,
int* max);
@@ -4511,12 +4526,12 @@ unsigned int vpx_highbd_sad16x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x16x4d vpx_highbd_sad16x16x4d_sse2
@@ -4545,12 +4560,12 @@ unsigned int vpx_highbd_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x32x4d vpx_highbd_sad16x32x4d_sse2
@@ -4579,12 +4594,12 @@ unsigned int vpx_highbd_sad16x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad16x8x4d vpx_highbd_sad16x8x4d_sse2
@@ -4613,12 +4628,12 @@ unsigned int vpx_highbd_sad32x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x16x4d vpx_highbd_sad32x16x4d_sse2
@@ -4647,12 +4662,12 @@ unsigned int vpx_highbd_sad32x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x32x4d vpx_highbd_sad32x32x4d_sse2
@@ -4681,12 +4696,12 @@ unsigned int vpx_highbd_sad32x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad32x64x4d vpx_highbd_sad32x64x4d_sse2
@@ -4706,12 +4721,12 @@ unsigned int vpx_highbd_sad4x4_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x4x4d vpx_highbd_sad4x4x4d_sse2
@@ -4731,12 +4746,12 @@ unsigned int vpx_highbd_sad4x8_avg_c(const uint8_t* src_ptr,
void vpx_highbd_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad4x8x4d vpx_highbd_sad4x8x4d_sse2
@@ -4765,12 +4780,12 @@ unsigned int vpx_highbd_sad64x32_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x32x4d vpx_highbd_sad64x32x4d_sse2
@@ -4799,12 +4814,12 @@ unsigned int vpx_highbd_sad64x64_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad64x64x4d vpx_highbd_sad64x64x4d_sse2
@@ -4833,12 +4848,12 @@ unsigned int vpx_highbd_sad8x16_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x16x4d vpx_highbd_sad8x16x4d_sse2
@@ -4867,12 +4882,12 @@ unsigned int vpx_highbd_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x4x4d vpx_highbd_sad8x4x4d_sse2
@@ -4901,12 +4916,12 @@ unsigned int vpx_highbd_sad8x8_avg_sse2(const uint8_t* src_ptr,
void vpx_highbd_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_highbd_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_highbd_sad8x8x4d vpx_highbd_sad8x8x4d_sse2
@@ -4915,104 +4930,104 @@ void vpx_highbd_subtract_block_c(int rows,
int cols,
int16_t* diff_ptr,
ptrdiff_t diff_stride,
- const uint8_t* src_ptr,
+ const uint8_t* src8_ptr,
ptrdiff_t src_stride,
- const uint8_t* pred_ptr,
+ const uint8_t* pred8_ptr,
ptrdiff_t pred_stride,
int bd);
#define vpx_highbd_subtract_block vpx_highbd_subtract_block_c
void vpx_highbd_tm_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_16x16 vpx_highbd_tm_predictor_16x16_sse2
void vpx_highbd_tm_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_32x32 vpx_highbd_tm_predictor_32x32_sse2
void vpx_highbd_tm_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_4x4 vpx_highbd_tm_predictor_4x4_sse2
void vpx_highbd_tm_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_tm_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_tm_predictor_8x8 vpx_highbd_tm_predictor_8x8_sse2
void vpx_highbd_v_predictor_16x16_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_16x16_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_16x16 vpx_highbd_v_predictor_16x16_sse2
void vpx_highbd_v_predictor_32x32_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_32x32_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_32x32 vpx_highbd_v_predictor_32x32_sse2
void vpx_highbd_v_predictor_4x4_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_4x4_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
#define vpx_highbd_v_predictor_4x4 vpx_highbd_v_predictor_4x4_sse2
void vpx_highbd_v_predictor_8x8_c(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
void vpx_highbd_v_predictor_8x8_sse2(uint16_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint16_t* above,
const uint16_t* left,
int bd);
@@ -5322,12 +5337,12 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t* s,
const uint8_t* thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_sse2
-void vpx_mbpost_proc_across_ip_c(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_c(unsigned char* src,
int pitch,
int rows,
int cols,
int flimit);
-void vpx_mbpost_proc_across_ip_sse2(unsigned char* dst,
+void vpx_mbpost_proc_across_ip_sse2(unsigned char* src,
int pitch,
int rows,
int cols,
@@ -5361,68 +5376,68 @@ void vpx_minmax_8x8_sse2(const uint8_t* s,
#define vpx_minmax_8x8 vpx_minmax_8x8_sse2
unsigned int vpx_mse16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_mse16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x16 vpx_mse8x16_sse2
unsigned int vpx_mse8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
unsigned int vpx_mse8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
- int recon_stride,
+ int ref_stride,
unsigned int* sse);
#define vpx_mse8x8 vpx_mse8x8_sse2
@@ -5623,12 +5638,12 @@ RTCD_EXTERN void (*vpx_sad16x16x3)(const uint8_t* src_ptr,
void vpx_sad16x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x16x4d vpx_sad16x16x4d_sse2
@@ -5673,12 +5688,12 @@ unsigned int vpx_sad16x32_avg_sse2(const uint8_t* src_ptr,
void vpx_sad16x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x32x4d vpx_sad16x32x4d_sse2
@@ -5728,12 +5743,12 @@ RTCD_EXTERN void (*vpx_sad16x8x3)(const uint8_t* src_ptr,
void vpx_sad16x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad16x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad16x8x4d vpx_sad16x8x4d_sse2
@@ -5794,12 +5809,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x16_avg)(const uint8_t* src_ptr,
void vpx_sad32x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x16x4d vpx_sad32x16x4d_sse2
@@ -5844,22 +5859,22 @@ RTCD_EXTERN unsigned int (*vpx_sad32x32_avg)(const uint8_t* src_ptr,
void vpx_sad32x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x32x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad32x32x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -5903,12 +5918,12 @@ RTCD_EXTERN unsigned int (*vpx_sad32x64_avg)(const uint8_t* src_ptr,
void vpx_sad32x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad32x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad32x64x4d vpx_sad32x64x4d_sse2
@@ -5953,12 +5968,12 @@ RTCD_EXTERN void (*vpx_sad4x4x3)(const uint8_t* src_ptr,
void vpx_sad4x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x4x4d vpx_sad4x4x4d_sse2
@@ -6003,12 +6018,12 @@ unsigned int vpx_sad4x8_avg_sse2(const uint8_t* src_ptr,
void vpx_sad4x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad4x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad4x8x4d vpx_sad4x8x4d_sse2
@@ -6053,12 +6068,12 @@ RTCD_EXTERN unsigned int (*vpx_sad64x32_avg)(const uint8_t* src_ptr,
void vpx_sad64x32x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x32x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad64x32x4d vpx_sad64x32x4d_sse2
@@ -6103,22 +6118,22 @@ RTCD_EXTERN unsigned int (*vpx_sad64x64_avg)(const uint8_t* src_ptr,
void vpx_sad64x64x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad64x64x4d_avx2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
RTCD_EXTERN void (*vpx_sad64x64x4d)(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
@@ -6162,12 +6177,12 @@ RTCD_EXTERN void (*vpx_sad8x16x3)(const uint8_t* src_ptr,
void vpx_sad8x16x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x16x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x16x4d vpx_sad8x16x4d_sse2
@@ -6212,12 +6227,12 @@ unsigned int vpx_sad8x4_avg_sse2(const uint8_t* src_ptr,
void vpx_sad8x4x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x4x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x4x4d vpx_sad8x4x4d_sse2
@@ -6262,12 +6277,12 @@ RTCD_EXTERN void (*vpx_sad8x8x3)(const uint8_t* src_ptr,
void vpx_sad8x8x4d_c(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
void vpx_sad8x8x4d_sse2(const uint8_t* src_ptr,
int src_stride,
- const uint8_t* const ref_ptr[],
+ const uint8_t* const ref_array[],
int ref_stride,
uint32_t* sad_array);
#define vpx_sad8x8x4d vpx_sad8x8x4d_sse2
@@ -6393,850 +6408,850 @@ void vpx_scaled_vert_c(const uint8_t* src,
#define vpx_scaled_vert vpx_scaled_vert_c
uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance16x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance32x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance4x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x32)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance64x64)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x16)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x4)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_avg_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_avg_variance8x8)(
const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse,
const uint8_t* second_pred);
uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance16x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance32x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance4x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance4x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x32_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x16_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x16)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x4_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x4)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
uint32_t vpx_sub_pixel_variance8x8_ssse3(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
RTCD_EXTERN uint32_t (*vpx_sub_pixel_variance8x8)(const uint8_t* src_ptr,
- int source_stride,
- int xoffset,
- int yoffset,
+ int src_stride,
+ int x_offset,
+ int y_offset,
const uint8_t* ref_ptr,
int ref_stride,
uint32_t* sse);
@@ -7264,315 +7279,315 @@ uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t* src, int stride, int size);
#define vpx_sum_squares_2d_i16 vpx_sum_squares_2d_i16_sse2
void vpx_tm_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_sse2
void vpx_tm_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_sse2
void vpx_tm_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_sse2
void vpx_tm_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_tm_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_sse2
void vpx_v_predictor_16x16_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_16x16_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_sse2
void vpx_v_predictor_32x32_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_32x32_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_sse2
void vpx_v_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_4x4_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_sse2
void vpx_v_predictor_8x8_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
void vpx_v_predictor_8x8_sse2(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_sse2
unsigned int vpx_variance16x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance16x8_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance16x8)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x16_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x16)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance32x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance32x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x4 vpx_variance4x4_sse2
unsigned int vpx_variance4x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance4x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance4x8 vpx_variance4x8_sse2
unsigned int vpx_variance64x32_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x32_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x32)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance64x64_avx2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
RTCD_EXTERN unsigned int (*vpx_variance64x64)(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x16_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x16 vpx_variance8x16_sse2
unsigned int vpx_variance8x4_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x4_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x4 vpx_variance8x4_sse2
unsigned int vpx_variance8x8_c(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
unsigned int vpx_variance8x8_sse2(const uint8_t* src_ptr,
- int source_stride,
+ int src_stride,
const uint8_t* ref_ptr,
int ref_stride,
unsigned int* sse);
#define vpx_variance8x8 vpx_variance8x8_sse2
void vpx_ve_predictor_4x4_c(uint8_t* dst,
- ptrdiff_t y_stride,
+ ptrdiff_t stride,
const uint8_t* above,
const uint8_t* left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
diff --git a/chromium/third_party/libvpx/source/libvpx/CHANGELOG b/chromium/third_party/libvpx/source/libvpx/CHANGELOG
index 2281394c8ed..52089df0600 100644
--- a/chromium/third_party/libvpx/source/libvpx/CHANGELOG
+++ b/chromium/third_party/libvpx/source/libvpx/CHANGELOG
@@ -1,4 +1,4 @@
-2017-01-04 v1.7.0 "Mandarin Duck"
+2018-01-04 v1.7.0 "Mandarin Duck"
This release focused on high bit depth performance (10/12 bit) and vp9
encoding improvements.
diff --git a/chromium/third_party/libvpx/source/libvpx/README b/chromium/third_party/libvpx/source/libvpx/README
index 49407ed9ff3..4e8bba0d63a 100644
--- a/chromium/third_party/libvpx/source/libvpx/README
+++ b/chromium/third_party/libvpx/source/libvpx/README
@@ -63,6 +63,7 @@ COMPILING THE APPLICATIONS/LIBRARIES:
arm64-android-gcc
arm64-darwin-gcc
arm64-linux-gcc
+ arm64-win64-vs15
armv7-android-gcc
armv7-darwin-gcc
armv7-linux-rvct
diff --git a/chromium/third_party/libvpx/source/libvpx/build/make/Makefile b/chromium/third_party/libvpx/source/libvpx/build/make/Makefile
index f6b3f0630f1..c070cd0e0c6 100644
--- a/chromium/third_party/libvpx/source/libvpx/build/make/Makefile
+++ b/chromium/third_party/libvpx/source/libvpx/build/make/Makefile
@@ -99,6 +99,7 @@ distclean: clean
rm -f Makefile; \
rm -f config.log config.mk; \
rm -f vpx_config.[hc] vpx_config.asm; \
+ rm -f arm_neon.h; \
else \
rm -f $(target)-$(TOOLCHAIN).mk; \
fi
diff --git a/chromium/third_party/libvpx/source/libvpx/build/make/configure.sh b/chromium/third_party/libvpx/source/libvpx/build/make/configure.sh
index fdda759be22..2622040531e 100644
--- a/chromium/third_party/libvpx/source/libvpx/build/make/configure.sh
+++ b/chromium/third_party/libvpx/source/libvpx/build/make/configure.sh
@@ -546,6 +546,24 @@ EOF
cmp "$1" ${TMP_H} >/dev/null 2>&1 || mv ${TMP_H} "$1"
}
+write_win_arm64_neon_h_workaround() {
+ print_webm_license ${TMP_H} "/*" " */"
+ cat >> ${TMP_H} << EOF
+/* This file automatically generated by configure. Do not edit! */
+#ifndef VPX_WIN_ARM_NEON_H_WORKAROUND
+#define VPX_WIN_ARM_NEON_H_WORKAROUND
+/* The Windows SDK has arm_neon.h, but unlike on other platforms it is
+ * ARM32-only. ARM64 NEON support is provided by arm64_neon.h, a proper
+ * superset of arm_neon.h. Work around this by providing a more local
+ * arm_neon.h that simply #includes arm64_neon.h.
+ */
+#include <arm64_neon.h>
+#endif /* VPX_WIN_ARM_NEON_H_WORKAROUND */
+EOF
+ mkdir -p `dirname "$1"`
+ cmp "$1" ${TMP_H} >/dev/null 2>&1 || mv ${TMP_H} "$1"
+}
+
process_common_cmdline() {
for opt in "$@"; do
optval="${opt#*=}"
@@ -1010,18 +1028,42 @@ EOF
fi
;;
vs*)
- asm_conversion_cmd="${source_path}/build/make/ads2armasm_ms.pl"
- AS_SFX=.S
- msvs_arch_dir=arm-msvs
- disable_feature multithread
- disable_feature unit_tests
- vs_version=${tgt_cc##vs}
- if [ $vs_version -ge 12 ]; then
- # MSVC 2013 doesn't allow doing plain .exe projects for ARM,
- # only "AppContainerApplication" which requires an AppxManifest.
- # Therefore disable the examples, just build the library.
- disable_feature examples
- disable_feature tools
+ # A number of ARM-based Windows platforms are constrained by their
+ # respective SDKs' limitations. Fortunately, these are all 32-bit ABIs
+ # and so can be selected as 'win32'.
+ if [ ${tgt_os} = "win32" ]; then
+ asm_conversion_cmd="${source_path}/build/make/ads2armasm_ms.pl"
+ AS_SFX=.S
+ msvs_arch_dir=arm-msvs
+ disable_feature multithread
+ disable_feature unit_tests
+ vs_version=${tgt_cc##vs}
+ if [ $vs_version -ge 12 ]; then
+ # MSVC 2013 doesn't allow doing plain .exe projects for ARM32,
+ # only "AppContainerApplication" which requires an AppxManifest.
+ # Therefore disable the examples, just build the library.
+ disable_feature examples
+ disable_feature tools
+ fi
+ else
+ # Windows 10 on ARM, on the other hand, has full Windows SDK support
+ # for building Win32 ARM64 applications in addition to ARM64
+ # Windows Store apps. It is the only 64-bit ARM ABI that
+ # Windows supports, so it is the default definition of 'win64'.
+ # ARM64 build support officially shipped in Visual Studio 15.9.0.
+
+ # Because the ARM64 Windows SDK's arm_neon.h is ARM32-specific
+ # while LLVM's is not, probe its validity.
+ if enabled neon; then
+ if [ -n "${CC}" ]; then
+ check_header arm_neon.h || check_header arm64_neon.h && \
+ enable_feature win_arm64_neon_h_workaround
+ else
+ # If a probe is not possible, assume this is the pure Windows
+ # SDK and so the workaround is necessary.
+ enable_feature win_arm64_neon_h_workaround
+ fi
+ fi
fi
;;
rvct)
diff --git a/chromium/third_party/libvpx/source/libvpx/build/make/gen_msvs_vcxproj.sh b/chromium/third_party/libvpx/source/libvpx/build/make/gen_msvs_vcxproj.sh
index 171d0b99b6e..ae2b1cd4c8b 100755
--- a/chromium/third_party/libvpx/source/libvpx/build/make/gen_msvs_vcxproj.sh
+++ b/chromium/third_party/libvpx/source/libvpx/build/make/gen_msvs_vcxproj.sh
@@ -261,6 +261,11 @@ case "$target" in
asm_Debug_cmdline="yasm -Xvc -g cv8 -f win32 ${yasmincs} &quot;%(FullPath)&quot;"
asm_Release_cmdline="yasm -Xvc -f win32 ${yasmincs} &quot;%(FullPath)&quot;"
;;
+ arm64*)
+ platforms[0]="ARM64"
+ asm_Debug_cmdline="armasm64 -nologo -oldit &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="armasm64 -nologo -oldit &quot;%(FullPath)&quot;"
+ ;;
arm*)
platforms[0]="ARM"
asm_Debug_cmdline="armasm -nologo -oldit &quot;%(FullPath)&quot;"
@@ -307,6 +312,16 @@ generate_vcxproj() {
tag_content ApplicationType "Windows Store"
tag_content ApplicationTypeRevision 8.1
fi
+ if [ $vs_ver -eq 15 ] && [ "${platforms[0]}" = "ARM64" ]; then
+ # Require the first Visual Studio version to have ARM64 support.
+ tag_content MinimumVisualStudioVersion 15.9
+ # Require a Windows SDK that has ARM64 support rather than the
+ # default of 8.1.
+ # Since VS 15 does not have a 'use latest SDK version' facility,
+ # set WindowsTargetPlatformVersion to the first official SDK
+ # version to have ARM64 support.
+ tag_content WindowsTargetPlatformVersion 10.0.17134.0
+ fi
close_tag PropertyGroup
tag Import \
diff --git a/chromium/third_party/libvpx/source/libvpx/configure b/chromium/third_party/libvpx/source/libvpx/configure
index e864e57fb45..e0ffb55b4b5 100755
--- a/chromium/third_party/libvpx/source/libvpx/configure
+++ b/chromium/third_party/libvpx/source/libvpx/configure
@@ -102,6 +102,7 @@ all_platforms="${all_platforms} arm64-android-gcc"
all_platforms="${all_platforms} arm64-darwin-gcc"
all_platforms="${all_platforms} arm64-linux-gcc"
all_platforms="${all_platforms} arm64-win64-gcc"
+all_platforms="${all_platforms} arm64-win64-vs15"
all_platforms="${all_platforms} armv7-android-gcc" #neon Cortex-A8
all_platforms="${all_platforms} armv7-darwin-gcc" #neon Cortex-A8
all_platforms="${all_platforms} armv7-linux-rvct" #neon Cortex-A8
@@ -448,6 +449,7 @@ process_targets() {
enabled child || write_common_config_banner
write_common_target_config_h ${BUILD_PFX}vpx_config.h
write_common_config_targets
+ enabled win_arm64_neon_h_workaround && write_win_arm64_neon_h_workaround ${BUILD_PFX}arm_neon.h
# Calculate the default distribution name, based on the enabled features
cf=""
diff --git a/chromium/third_party/libvpx/source/libvpx/examples/vpx_dec_fuzzer.cc b/chromium/third_party/libvpx/source/libvpx/examples/vpx_dec_fuzzer.cc
new file mode 100644
index 00000000000..b74b47c230d
--- /dev/null
+++ b/chromium/third_party/libvpx/source/libvpx/examples/vpx_dec_fuzzer.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Fuzzer for libvpx decoders
+ * ==========================
+ * Requirements
+ * --------------
+ * Requires Clang 6.0 or above as -fsanitize=fuzzer is used as a linker
+ * option.
+
+ * Steps to build
+ * --------------
+ * Clone libvpx repository
+ $git clone https://chromium.googlesource.com/webm/libvpx
+
+ * Create a directory in parallel to libvpx and change directory
+ $mkdir vpx_dec_fuzzer
+ $cd vpx_dec_fuzzer/
+
+ * Enable sanitizers (Supported: address integer memory thread undefined)
+ $source ../libvpx/tools/set_analyzer_env.sh address
+
+ * Configure libvpx.
+ * Note --size-limit and VPX_MAX_ALLOCABLE_MEMORY are defined to avoid
+ * Out of memory errors when running generated fuzzer binary
+ $../libvpx/configure --disable-unit-tests --size-limit=12288x12288 \
+ --extra-cflags="-DVPX_MAX_ALLOCABLE_MEMORY=1073741824" \
+ --disable-webm-io --enable-debug
+
+ * Build libvpx
+ $make -j32
+
+ * Build vp9 fuzzer
+ $ $CXX $CXXFLAGS -std=c++11 -DDECODER=vp9 \
+ -fsanitize=fuzzer -I../libvpx -I. -Wl,--start-group \
+ ../libvpx/examples/vpx_dec_fuzzer.cc -o ./vpx_dec_fuzzer_vp9 \
+ ./libvpx.a ./tools_common.c.o -Wl,--end-group
+
+ * DECODER should be defined as vp9 or vp8 to enable vp9/vp8
+ *
+ * create a corpus directory and copy some ivf files there.
+ * Based on which codec (vp8/vp9) is being tested, it is recommended to
+ * have corresponding ivf files in corpus directory
+ * Empty corpus directoy also is acceptable, though not recommended
+ $mkdir CORPUS && cp some-files CORPUS
+
+ * Run fuzzing:
+ $./vpx_dec_fuzzer_vp9 CORPUS
+
+ * References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory>
+
+#include "./tools_common.h"
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx_ports/mem_ops.h"
+
+#define VPX_TOSTRING(str) #str
+#define VPX_STRINGIFY(str) VPX_TOSTRING(str)
+
+static void CloseFile(FILE *file) { fclose(file); }
+
+/* ReadFrame is derived from ivf_read_frame in ivfdec.c
+ * This function doesn't call warn(), but instead ignores those errors.
+ * This is done to minimize the prints on console when running fuzzer
+ * Also if fread fails to read frame_size number of bytes, instead of
+ * returning an error, this returns with partial frames.
+ * This is done to ensure that partial frames are sent to decoder.
+ */
+static int ReadFrame(FILE *infile, uint8_t **buffer, size_t *bytes_read,
+ size_t *buffer_size) {
+ char raw_header[IVF_FRAME_HDR_SZ] = { 0 };
+ size_t frame_size = 0;
+
+ if (fread(raw_header, IVF_FRAME_HDR_SZ, 1, infile) == 1) {
+ frame_size = mem_get_le32(raw_header);
+
+ if (frame_size > 256 * 1024 * 1024) {
+ frame_size = 0;
+ }
+
+ if (frame_size > *buffer_size) {
+ uint8_t *new_buffer = (uint8_t *)realloc(*buffer, 2 * frame_size);
+
+ if (new_buffer) {
+ *buffer = new_buffer;
+ *buffer_size = 2 * frame_size;
+ } else {
+ frame_size = 0;
+ }
+ }
+ }
+
+ if (!feof(infile)) {
+ *bytes_read = fread(*buffer, 1, frame_size, infile);
+ return 0;
+ }
+
+ return 1;
+}
+
+extern "C" void usage_exit(void) { exit(EXIT_FAILURE); }
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ std::unique_ptr<FILE, decltype(&CloseFile)> file(
+ fmemopen((void *)data, size, "rb"), &CloseFile);
+ if (file == nullptr) {
+ return 0;
+ }
+ // Ensure input contains at least one file header and one frame header
+ if (size < IVF_FILE_HDR_SZ + IVF_FRAME_HDR_SZ) {
+ return 0;
+ }
+ char header[IVF_FILE_HDR_SZ];
+ if (fread(header, 1, IVF_FILE_HDR_SZ, file.get()) != IVF_FILE_HDR_SZ) {
+ return 0;
+ }
+ const VpxInterface *decoder = get_vpx_decoder_by_name(VPX_STRINGIFY(DECODER));
+ if (decoder == nullptr) {
+ return 0;
+ }
+
+ vpx_codec_ctx_t codec;
+ // Set thread count in the range [1, 64].
+ const unsigned int threads = (data[IVF_FILE_HDR_SZ] & 0x3f) + 1;
+ vpx_codec_dec_cfg_t cfg = { threads, 0, 0 };
+ if (vpx_codec_dec_init(&codec, decoder->codec_interface(), &cfg, 0)) {
+ return 0;
+ }
+
+ uint8_t *buffer = nullptr;
+ size_t buffer_size = 0;
+ size_t frame_size = 0;
+
+ while (!ReadFrame(file.get(), &buffer, &frame_size, &buffer_size)) {
+ const vpx_codec_err_t err =
+ vpx_codec_decode(&codec, buffer, frame_size, nullptr, 0);
+ static_cast<void>(err);
+ vpx_codec_iter_t iter = nullptr;
+ vpx_image_t *img = nullptr;
+ while ((img = vpx_codec_get_frame(&codec, &iter)) != nullptr) {
+ }
+ }
+ vpx_codec_destroy(&codec);
+ free(buffer);
+ return 0;
+}
diff --git a/chromium/third_party/libvpx/source/libvpx/libs.mk b/chromium/third_party/libvpx/source/libvpx/libs.mk
index 20aad0b4918..7ec8c87568d 100644
--- a/chromium/third_party/libvpx/source/libvpx/libs.mk
+++ b/chromium/third_party/libvpx/source/libvpx/libs.mk
@@ -112,13 +112,6 @@ ifeq ($(CONFIG_DECODERS),yes)
CODEC_DOC_SECTIONS += decoder
endif
-# Suppress -Wextra warnings in third party code.
-$(BUILD_PFX)third_party/googletest/%.cc.o: CXXFLAGS += -Wno-missing-field-initializers
-# Suppress -Wextra warnings in first party code pending investigation.
-# https://bugs.chromium.org/p/webm/issues/detail?id=1069
-$(BUILD_PFX)vp8/encoder/onyx_if.c.o: CFLAGS += -Wno-unknown-warning-option -Wno-clobbered
-$(BUILD_PFX)vp8/decoder/onyxd_if.c.o: CFLAGS += -Wno-unknown-warning-option -Wno-clobbered
-
ifeq ($(CONFIG_MSVS),yes)
CODEC_LIB=$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd)
GTEST_LIB=$(if $(CONFIG_STATIC_MSVCRT),gtestmt,gtestmd)
diff --git a/chromium/third_party/libvpx/source/libvpx/md5_utils.c b/chromium/third_party/libvpx/source/libvpx/md5_utils.c
index 093798b8339..9ddb104c8a6 100644
--- a/chromium/third_party/libvpx/source/libvpx/md5_utils.c
+++ b/chromium/third_party/libvpx/source/libvpx/md5_utils.c
@@ -163,7 +163,7 @@ void MD5Final(md5byte digest[16], struct MD5Context *ctx) {
*/
VPX_NO_UNSIGNED_OVERFLOW_CHECK void MD5Transform(UWORD32 buf[4],
UWORD32 const in[16]) {
- register UWORD32 a, b, c, d;
+ UWORD32 a, b, c, d;
a = buf[0];
b = buf[1];
diff --git a/chromium/third_party/libvpx/source/libvpx/third_party/googletest/README.libvpx b/chromium/third_party/libvpx/source/libvpx/third_party/googletest/README.libvpx
index f4ca22bbadb..9dc2a440c9c 100644
--- a/chromium/third_party/libvpx/source/libvpx/third_party/googletest/README.libvpx
+++ b/chromium/third_party/libvpx/source/libvpx/third_party/googletest/README.libvpx
@@ -24,3 +24,5 @@ Local Modifications:
- Make WithParamInterface<T>::GetParam static in order to avoid
initialization issues
https://github.com/google/googletest/pull/1830
+- Use wcslen() instead of std::wcslen()
+ https://github.com/google/googletest/pull/1899
diff --git a/chromium/third_party/libvpx/source/libvpx/third_party/googletest/src/src/gtest-printers.cc b/chromium/third_party/libvpx/source/libvpx/third_party/googletest/src/src/gtest-printers.cc
index d55a5e9bfe8..f9b274e8ba9 100644
--- a/chromium/third_party/libvpx/source/libvpx/third_party/googletest/src/src/gtest-printers.cc
+++ b/chromium/third_party/libvpx/source/libvpx/third_party/googletest/src/src/gtest-printers.cc
@@ -349,7 +349,7 @@ void PrintTo(const wchar_t* s, ostream* os) {
*os << "NULL";
} else {
*os << ImplicitCast_<const void*>(s) << " pointing to ";
- PrintCharsAsStringTo(s, std::wcslen(s), os);
+ PrintCharsAsStringTo(s, wcslen(s), os);
}
}
#endif // wchar_t is native
diff --git a/chromium/third_party/libvpx/source/libvpx/tools/tiny_ssim.c b/chromium/third_party/libvpx/source/libvpx/tools/tiny_ssim.c
index 6f4b6d7350a..36961b3551a 100644
--- a/chromium/third_party/libvpx/source/libvpx/tools/tiny_ssim.c
+++ b/chromium/third_party/libvpx/source/libvpx/tools/tiny_ssim.c
@@ -34,6 +34,10 @@ static uint64_t calc_plane_error16(uint16_t *orig, int orig_stride,
unsigned int row, col;
uint64_t total_sse = 0;
int diff;
+ if (orig == NULL || recon == NULL) {
+ assert(0);
+ return 0;
+ }
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
@@ -53,6 +57,10 @@ static uint64_t calc_plane_error(uint8_t *orig, int orig_stride, uint8_t *recon,
unsigned int row, col;
uint64_t total_sse = 0;
int diff;
+ if (orig == NULL || recon == NULL) {
+ assert(0);
+ return 0;
+ }
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
@@ -99,6 +107,9 @@ static int open_input_file(const char *file_name, input_file_t *input, int w,
int h, int bit_depth) {
char y4m_buf[4];
size_t r1;
+ input->w = w;
+ input->h = h;
+ input->bit_depth = bit_depth;
input->type = RAW_YUV;
input->buf = NULL;
input->file = strcmp(file_name, "-") ? fopen(file_name, "rb") : stdin;
@@ -187,6 +198,11 @@ void ssim_parms_8x8(const uint8_t *s, int sp, const uint8_t *r, int rp,
uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s,
uint32_t *sum_sq_r, uint32_t *sum_sxr) {
int i, j;
+ if (s == NULL || r == NULL || sum_s == NULL || sum_r == NULL ||
+ sum_sq_s == NULL || sum_sq_r == NULL || sum_sxr == NULL) {
+ assert(0);
+ return;
+ }
for (i = 0; i < 8; i++, s += sp, r += rp) {
for (j = 0; j < 8; j++) {
*sum_s += s[j];
@@ -202,6 +218,11 @@ void highbd_ssim_parms_8x8(const uint16_t *s, int sp, const uint16_t *r, int rp,
uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s,
uint32_t *sum_sq_r, uint32_t *sum_sxr) {
int i, j;
+ if (s == NULL || r == NULL || sum_s == NULL || sum_r == NULL ||
+ sum_sq_s == NULL || sum_sq_r == NULL || sum_sxr == NULL) {
+ assert(0);
+ return;
+ }
for (i = 0; i < 8; i++, s += sp, r += rp) {
for (j = 0; j < 8; j++) {
*sum_s += s[j];
diff --git a/chromium/third_party/libvpx/source/libvpx/tools_common.h b/chromium/third_party/libvpx/source/libvpx/tools_common.h
index 41253729c6c..313acd2cfb7 100644
--- a/chromium/third_party/libvpx/source/libvpx/tools_common.h
+++ b/chromium/third_party/libvpx/source/libvpx/tools_common.h
@@ -33,6 +33,7 @@ typedef int64_t FileOffset;
#define ftello ftello64
typedef off64_t FileOffset;
#elif CONFIG_OS_SUPPORT
+#include <sys/types.h> /* NOLINT */
typedef off_t FileOffset;
/* Use 32-bit file operations in WebM file format when building ARM
* executables (.axf) with RVCT. */
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/alloccommon.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/alloccommon.h
index 517d73085da..2d376bbac31 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/alloccommon.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/alloccommon.h
@@ -21,7 +21,7 @@ void vp8_create_common(VP8_COMMON *oci);
void vp8_remove_common(VP8_COMMON *oci);
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci);
int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height);
-void vp8_setup_version(VP8_COMMON *oci);
+void vp8_setup_version(VP8_COMMON *cm);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_blk_neon.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_blk_neon.c
index d61dde86cf5..3d02e13743c 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_blk_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/arm/neon/idct_blk_neon.c
@@ -43,42 +43,42 @@ void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, unsigned char *dst,
}
void vp8_dequant_idct_add_uv_block_neon(short *q, short *dq,
- unsigned char *dstu,
- unsigned char *dstv, int stride,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
char *eobs) {
if (((short *)(eobs))[0]) {
if (((short *)eobs)[0] & 0xfefe)
- idct_dequant_full_2x_neon(q, dq, dstu, stride);
+ idct_dequant_full_2x_neon(q, dq, dst_u, stride);
else
- idct_dequant_0_2x_neon(q, dq[0], dstu, stride);
+ idct_dequant_0_2x_neon(q, dq[0], dst_u, stride);
}
q += 32;
- dstu += 4 * stride;
+ dst_u += 4 * stride;
if (((short *)(eobs))[1]) {
if (((short *)eobs)[1] & 0xfefe)
- idct_dequant_full_2x_neon(q, dq, dstu, stride);
+ idct_dequant_full_2x_neon(q, dq, dst_u, stride);
else
- idct_dequant_0_2x_neon(q, dq[0], dstu, stride);
+ idct_dequant_0_2x_neon(q, dq[0], dst_u, stride);
}
q += 32;
if (((short *)(eobs))[2]) {
if (((short *)eobs)[2] & 0xfefe)
- idct_dequant_full_2x_neon(q, dq, dstv, stride);
+ idct_dequant_full_2x_neon(q, dq, dst_v, stride);
else
- idct_dequant_0_2x_neon(q, dq[0], dstv, stride);
+ idct_dequant_0_2x_neon(q, dq[0], dst_v, stride);
}
q += 32;
- dstv += 4 * stride;
+ dst_v += 4 * stride;
if (((short *)(eobs))[3]) {
if (((short *)eobs)[3] & 0xfefe)
- idct_dequant_full_2x_neon(q, dq, dstv, stride);
+ idct_dequant_full_2x_neon(q, dq, dst_v, stride);
else
- idct_dequant_0_2x_neon(q, dq[0], dstv, stride);
+ idct_dequant_0_2x_neon(q, dq[0], dst_v, stride);
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/blockd.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/blockd.h
index 22af8980b35..f8d1539739b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/blockd.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/blockd.h
@@ -13,6 +13,7 @@
void vpx_log(const char *format, ...);
+#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_config.h"
#include "vpx_scale/yv12config.h"
#include "mv.h"
@@ -201,8 +202,9 @@ typedef struct blockd {
union b_mode_info bmi;
} BLOCKD;
-typedef void (*vp8_subpix_fn_t)(unsigned char *src, int src_pitch, int xofst,
- int yofst, unsigned char *dst, int dst_pitch);
+typedef void (*vp8_subpix_fn_t)(unsigned char *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset,
+ unsigned char *dst_ptr, int dst_pitch);
typedef struct macroblockd {
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
@@ -288,6 +290,8 @@ typedef struct macroblockd {
int corrupted;
+ struct vpx_internal_error_info error_info;
+
#if ARCH_X86 || ARCH_X86_64
/* This is an intermediate buffer currently used in sub-pixel motion search
* to keep a copy of the reference area. This buffer can be used for other
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/entropymode.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/entropymode.c
index 239492a8cb8..cbdf59fea34 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/entropymode.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/entropymode.c
@@ -99,6 +99,6 @@ void vp8_init_mbmode_probs(VP8_COMMON *x) {
memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
}
-void vp8_default_bmode_probs(vp8_prob p[VP8_BINTRAMODES - 1]) {
- memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
+void vp8_default_bmode_probs(vp8_prob dest[VP8_BINTRAMODES - 1]) {
+ memcpy(dest, vp8_bmode_prob, sizeof(vp8_bmode_prob));
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.c
index f40d2c6bde5..6889fdeddef 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.c
@@ -21,19 +21,20 @@ const unsigned char vp8_mbsplit_offset[4][16] = {
Note that we only consider one 4x4 subblock from each candidate 16x16
macroblock. */
void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
- int_mv *nearby, int_mv *best_mv, int cnt[4],
+ int_mv *nearby, int_mv *best_mv, int near_mv_ref_cnts[4],
int refframe, int *ref_frame_sign_bias) {
const MODE_INFO *above = here - xd->mode_info_stride;
const MODE_INFO *left = here - 1;
const MODE_INFO *aboveleft = above - 1;
int_mv near_mvs[4];
int_mv *mv = near_mvs;
- int *cntx = cnt;
+ int *cntx = near_mv_ref_cnts;
enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
/* Zero accumulators */
mv[0].as_int = mv[1].as_int = mv[2].as_int = 0;
- cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
+ near_mv_ref_cnts[0] = near_mv_ref_cnts[1] = near_mv_ref_cnts[2] =
+ near_mv_ref_cnts[3] = 0;
/* Process above */
if (above->mbmi.ref_frame != INTRA_FRAME) {
@@ -63,7 +64,7 @@ void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
*cntx += 2;
} else {
- cnt[CNT_INTRA] += 2;
+ near_mv_ref_cnts[CNT_INTRA] += 2;
}
}
@@ -83,33 +84,34 @@ void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
*cntx += 1;
} else {
- cnt[CNT_INTRA] += 1;
+ near_mv_ref_cnts[CNT_INTRA] += 1;
}
}
/* If we have three distinct MV's ... */
- if (cnt[CNT_SPLITMV]) {
+ if (near_mv_ref_cnts[CNT_SPLITMV]) {
/* See if above-left MV can be merged with NEAREST */
- if (mv->as_int == near_mvs[CNT_NEAREST].as_int) cnt[CNT_NEAREST] += 1;
+ if (mv->as_int == near_mvs[CNT_NEAREST].as_int)
+ near_mv_ref_cnts[CNT_NEAREST] += 1;
}
- cnt[CNT_SPLITMV] =
+ near_mv_ref_cnts[CNT_SPLITMV] =
((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) * 2 +
(aboveleft->mbmi.mode == SPLITMV);
/* Swap near and nearest if necessary */
- if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
+ if (near_mv_ref_cnts[CNT_NEAR] > near_mv_ref_cnts[CNT_NEAREST]) {
int tmp;
- tmp = cnt[CNT_NEAREST];
- cnt[CNT_NEAREST] = cnt[CNT_NEAR];
- cnt[CNT_NEAR] = tmp;
+ tmp = near_mv_ref_cnts[CNT_NEAREST];
+ near_mv_ref_cnts[CNT_NEAREST] = near_mv_ref_cnts[CNT_NEAR];
+ near_mv_ref_cnts[CNT_NEAR] = tmp;
tmp = near_mvs[CNT_NEAREST].as_int;
near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
near_mvs[CNT_NEAR].as_int = tmp;
}
/* Use near_mvs[0] to store the "best" MV */
- if (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]) {
+ if (near_mv_ref_cnts[CNT_NEAREST] >= near_mv_ref_cnts[CNT_INTRA]) {
near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST];
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.h
index 9bb6c2e9fb9..d7db9544aa3 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/findnearmv.h
@@ -70,7 +70,7 @@ static INLINE unsigned int vp8_check_mv_bounds(int_mv *mv, int mb_to_left_edge,
}
void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
- int_mv *nearby, int_mv *best, int near_mv_ref_cts[4],
+ int_mv *nearby, int_mv *best_mv, int near_mv_ref_cnts[4],
int refframe, int *ref_frame_sign_bias);
int vp8_find_near_mvs_bias(MACROBLOCKD *xd, const MODE_INFO *here,
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/idct_blk.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/idct_blk.c
index ff9f3eb7f21..ebe1774f56e 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/idct_blk.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/idct_blk.c
@@ -12,12 +12,6 @@
#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
-void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *dest,
- int stride);
-void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred,
- int pred_stride, unsigned char *dst_ptr,
- int dst_stride);
-
void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i, j;
@@ -39,40 +33,40 @@ void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst,
}
}
-void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dstu,
- unsigned char *dstv, int stride,
+void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
char *eobs) {
int i, j;
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1) {
- vp8_dequant_idct_add_c(q, dq, dstu, stride);
+ vp8_dequant_idct_add_c(q, dq, dst_u, stride);
} else {
- vp8_dc_only_idct_add_c(q[0] * dq[0], dstu, stride, dstu, stride);
+ vp8_dc_only_idct_add_c(q[0] * dq[0], dst_u, stride, dst_u, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
- dstu += 4;
+ dst_u += 4;
}
- dstu += 4 * stride - 8;
+ dst_u += 4 * stride - 8;
}
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1) {
- vp8_dequant_idct_add_c(q, dq, dstv, stride);
+ vp8_dequant_idct_add_c(q, dq, dst_v, stride);
} else {
- vp8_dc_only_idct_add_c(q[0] * dq[0], dstv, stride, dstv, stride);
+ vp8_dc_only_idct_add_c(q[0] * dq[0], dst_v, stride, dst_v, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
- dstv += 4;
+ dst_v += 4;
}
- dstv += 4 * stride - 8;
+ dst_v += 4 * stride - 8;
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/loopfilter_filters.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/loopfilter_filters.c
index 188e290ca7f..61a55d3c92f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/loopfilter_filters.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/loopfilter_filters.c
@@ -270,28 +270,32 @@ static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0,
*op0 = u ^ 0x80;
}
-void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *s, int p,
+void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *y_ptr,
+ int y_stride,
const unsigned char *blimit) {
signed char mask = 0;
int i = 0;
do {
- mask = vp8_simple_filter_mask(blimit[0], s[-2 * p], s[-1 * p], s[0 * p],
- s[1 * p]);
- vp8_simple_filter(mask, s - 2 * p, s - 1 * p, s, s + 1 * p);
- ++s;
+ mask = vp8_simple_filter_mask(blimit[0], y_ptr[-2 * y_stride],
+ y_ptr[-1 * y_stride], y_ptr[0 * y_stride],
+ y_ptr[1 * y_stride]);
+ vp8_simple_filter(mask, y_ptr - 2 * y_stride, y_ptr - 1 * y_stride, y_ptr,
+ y_ptr + 1 * y_stride);
+ ++y_ptr;
} while (++i < 16);
}
-void vp8_loop_filter_simple_vertical_edge_c(unsigned char *s, int p,
+void vp8_loop_filter_simple_vertical_edge_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit) {
signed char mask = 0;
int i = 0;
do {
- mask = vp8_simple_filter_mask(blimit[0], s[-2], s[-1], s[0], s[1]);
- vp8_simple_filter(mask, s - 2, s - 1, s, s + 1);
- s += p;
+ mask = vp8_simple_filter_mask(blimit[0], y_ptr[-2], y_ptr[-1], y_ptr[0],
+ y_ptr[1]);
+ vp8_simple_filter(mask, y_ptr - 2, y_ptr - 1, y_ptr, y_ptr + 1);
+ y_ptr += y_stride;
} while (++i < 16);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/mfqe.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/mfqe.c
index aad90857293..1fe7363f177 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/mfqe.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/mfqe.c
@@ -235,7 +235,7 @@ void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
FRAME_TYPE frame_type = cm->frame_type;
/* Point at base of Mb MODE_INFO list has motion vectors etc */
- const MODE_INFO *mode_info_context = cm->show_frame_mi;
+ const MODE_INFO *mode_info_context = cm->mi;
int mb_row;
int mb_col;
int totmap, map[4];
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
index 899dc10ad96..eae852d5928 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
@@ -35,41 +35,41 @@ void vp8_dequant_idct_add_y_block_dspr2(short *q, short *dq, unsigned char *dst,
}
void vp8_dequant_idct_add_uv_block_dspr2(short *q, short *dq,
- unsigned char *dstu,
- unsigned char *dstv, int stride,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
char *eobs) {
int i, j;
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
- vp8_dequant_idct_add_dspr2(q, dq, dstu, stride);
+ vp8_dequant_idct_add_dspr2(q, dq, dst_u, stride);
else {
- vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dstu, stride, dstu, stride);
+ vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dst_u, stride, dst_u, stride);
((int *)q)[0] = 0;
}
q += 16;
- dstu += 4;
+ dst_u += 4;
}
- dstu += 4 * stride - 8;
+ dst_u += 4 * stride - 8;
}
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
- vp8_dequant_idct_add_dspr2(q, dq, dstv, stride);
+ vp8_dequant_idct_add_dspr2(q, dq, dst_v, stride);
else {
- vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dstv, stride, dstv, stride);
+ vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dst_v, stride, dst_v, stride);
((int *)q)[0] = 0;
}
q += 16;
- dstv += 4;
+ dst_v += 4;
}
- dstv += 4 * stride - 8;
+ dst_v += 4 * stride - 8;
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c
index 3f69071748a..4fd6854c528 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/mmi/idct_blk_mmi.c
@@ -32,39 +32,39 @@ void vp8_dequant_idct_add_y_block_mmi(int16_t *q, int16_t *dq, uint8_t *dst,
}
}
-void vp8_dequant_idct_add_uv_block_mmi(int16_t *q, int16_t *dq, uint8_t *dstu,
- uint8_t *dstv, int stride, char *eobs) {
+void vp8_dequant_idct_add_uv_block_mmi(int16_t *q, int16_t *dq, uint8_t *dst_u,
+ uint8_t *dst_v, int stride, char *eobs) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
if (*eobs++ > 1) {
- vp8_dequant_idct_add_mmi(q, dq, dstu, stride);
+ vp8_dequant_idct_add_mmi(q, dq, dst_u, stride);
} else {
- vp8_dc_only_idct_add_mmi(q[0] * dq[0], dstu, stride, dstu, stride);
+ vp8_dc_only_idct_add_mmi(q[0] * dq[0], dst_u, stride, dst_u, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
- dstu += 4;
+ dst_u += 4;
}
- dstu += 4 * stride - 8;
+ dst_u += 4 * stride - 8;
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
if (*eobs++ > 1) {
- vp8_dequant_idct_add_mmi(q, dq, dstv, stride);
+ vp8_dequant_idct_add_mmi(q, dq, dst_v, stride);
} else {
- vp8_dc_only_idct_add_mmi(q[0] * dq[0], dstv, stride, dstv, stride);
+ vp8_dc_only_idct_add_mmi(q[0] * dq[0], dst_v, stride, dst_v, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
- dstv += 4;
+ dst_v += 4;
}
- dstv += 4 * stride - 8;
+ dst_v += 4 * stride - 8;
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/msa/idct_msa.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/msa/idct_msa.c
index 3d516d0f81a..efad0c29f8a 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/msa/idct_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/mips/msa/idct_msa.c
@@ -134,7 +134,7 @@ static void idct4x4_addconst_msa(int16_t in_dc, uint8_t *pred,
ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dest, dest_stride);
}
-void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dq_coeff) {
+void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dqcoeff) {
v8i16 input0, input1, tmp0, tmp1, tmp2, tmp3, out0, out1;
const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 };
const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 };
@@ -157,22 +157,22 @@ void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dq_coeff) {
ADD2(tmp0, 3, tmp1, 3, out0, out1);
out0 >>= 3;
out1 >>= 3;
- mb_dq_coeff[0] = __msa_copy_s_h(out0, 0);
- mb_dq_coeff[16] = __msa_copy_s_h(out0, 4);
- mb_dq_coeff[32] = __msa_copy_s_h(out1, 0);
- mb_dq_coeff[48] = __msa_copy_s_h(out1, 4);
- mb_dq_coeff[64] = __msa_copy_s_h(out0, 1);
- mb_dq_coeff[80] = __msa_copy_s_h(out0, 5);
- mb_dq_coeff[96] = __msa_copy_s_h(out1, 1);
- mb_dq_coeff[112] = __msa_copy_s_h(out1, 5);
- mb_dq_coeff[128] = __msa_copy_s_h(out0, 2);
- mb_dq_coeff[144] = __msa_copy_s_h(out0, 6);
- mb_dq_coeff[160] = __msa_copy_s_h(out1, 2);
- mb_dq_coeff[176] = __msa_copy_s_h(out1, 6);
- mb_dq_coeff[192] = __msa_copy_s_h(out0, 3);
- mb_dq_coeff[208] = __msa_copy_s_h(out0, 7);
- mb_dq_coeff[224] = __msa_copy_s_h(out1, 3);
- mb_dq_coeff[240] = __msa_copy_s_h(out1, 7);
+ mb_dqcoeff[0] = __msa_copy_s_h(out0, 0);
+ mb_dqcoeff[16] = __msa_copy_s_h(out0, 4);
+ mb_dqcoeff[32] = __msa_copy_s_h(out1, 0);
+ mb_dqcoeff[48] = __msa_copy_s_h(out1, 4);
+ mb_dqcoeff[64] = __msa_copy_s_h(out0, 1);
+ mb_dqcoeff[80] = __msa_copy_s_h(out0, 5);
+ mb_dqcoeff[96] = __msa_copy_s_h(out1, 1);
+ mb_dqcoeff[112] = __msa_copy_s_h(out1, 5);
+ mb_dqcoeff[128] = __msa_copy_s_h(out0, 2);
+ mb_dqcoeff[144] = __msa_copy_s_h(out0, 6);
+ mb_dqcoeff[160] = __msa_copy_s_h(out1, 2);
+ mb_dqcoeff[176] = __msa_copy_s_h(out1, 6);
+ mb_dqcoeff[192] = __msa_copy_s_h(out0, 3);
+ mb_dqcoeff[208] = __msa_copy_s_h(out0, 7);
+ mb_dqcoeff[224] = __msa_copy_s_h(out1, 3);
+ mb_dqcoeff[240] = __msa_copy_s_h(out1, 7);
}
static void dequant_idct4x4_addblk_msa(int16_t *input, int16_t *dequant_input,
@@ -359,27 +359,27 @@ void vp8_dequant_idct_add_y_block_msa(int16_t *q, int16_t *dq, uint8_t *dst,
}
}
-void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, uint8_t *dstu,
- uint8_t *dstv, int32_t stride,
+void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, uint8_t *dst_u,
+ uint8_t *dst_v, int32_t stride,
char *eobs) {
int16_t *eobs_h = (int16_t *)eobs;
if (eobs_h[0]) {
if (eobs_h[0] & 0xfefe) {
- dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride);
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_u, stride);
} else {
- dequant_idct_addconst_2x_msa(q, dq, dstu, stride);
+ dequant_idct_addconst_2x_msa(q, dq, dst_u, stride);
}
}
q += 32;
- dstu += (stride * 4);
+ dst_u += (stride * 4);
if (eobs_h[1]) {
if (eobs_h[1] & 0xfefe) {
- dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride);
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_u, stride);
} else {
- dequant_idct_addconst_2x_msa(q, dq, dstu, stride);
+ dequant_idct_addconst_2x_msa(q, dq, dst_u, stride);
}
}
@@ -387,20 +387,20 @@ void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, uint8_t *dstu,
if (eobs_h[2]) {
if (eobs_h[2] & 0xfefe) {
- dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride);
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_v, stride);
} else {
- dequant_idct_addconst_2x_msa(q, dq, dstv, stride);
+ dequant_idct_addconst_2x_msa(q, dq, dst_v, stride);
}
}
q += 32;
- dstv += (stride * 4);
+ dst_v += (stride * 4);
if (eobs_h[3]) {
if (eobs_h[3] & 0xfefe) {
- dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride);
+ dequant_idct4x4_addblk_2x_msa(q, dq, dst_v, stride);
} else {
- dequant_idct_addconst_2x_msa(q, dq, dstv, stride);
+ dequant_idct_addconst_2x_msa(q, dq, dst_v, stride);
}
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/onyx.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/onyx.h
index ebdabc9b2d0..05c72df3faa 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/onyx.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/onyx.h
@@ -247,35 +247,35 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf);
void vp8_remove_compressor(struct VP8_COMP **comp);
void vp8_init_config(struct VP8_COMP *onyx, VP8_CONFIG *oxcf);
-void vp8_change_config(struct VP8_COMP *onyx, VP8_CONFIG *oxcf);
+void vp8_change_config(struct VP8_COMP *cpi, VP8_CONFIG *oxcf);
-int vp8_receive_raw_frame(struct VP8_COMP *comp, unsigned int frame_flags,
+int vp8_receive_raw_frame(struct VP8_COMP *cpi, unsigned int frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
- int64_t end_time_stamp);
-int vp8_get_compressed_data(struct VP8_COMP *comp, unsigned int *frame_flags,
+ int64_t end_time);
+int vp8_get_compressed_data(struct VP8_COMP *cpi, unsigned int *frame_flags,
size_t *size, unsigned char *dest,
unsigned char *dest_end, int64_t *time_stamp,
int64_t *time_end, int flush);
-int vp8_get_preview_raw_frame(struct VP8_COMP *comp, YV12_BUFFER_CONFIG *dest,
+int vp8_get_preview_raw_frame(struct VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
vp8_ppflags_t *flags);
-int vp8_use_as_reference(struct VP8_COMP *comp, int ref_frame_flags);
-int vp8_update_reference(struct VP8_COMP *comp, int ref_frame_flags);
-int vp8_get_reference(struct VP8_COMP *comp,
+int vp8_use_as_reference(struct VP8_COMP *cpi, int ref_frame_flags);
+int vp8_update_reference(struct VP8_COMP *cpi, int ref_frame_flags);
+int vp8_get_reference(struct VP8_COMP *cpi,
enum vpx_ref_frame_type ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-int vp8_set_reference(struct VP8_COMP *comp,
+int vp8_set_reference(struct VP8_COMP *cpi,
enum vpx_ref_frame_type ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-int vp8_update_entropy(struct VP8_COMP *comp, int update);
-int vp8_set_roimap(struct VP8_COMP *comp, unsigned char *map, unsigned int rows,
+int vp8_update_entropy(struct VP8_COMP *cpi, int update);
+int vp8_set_roimap(struct VP8_COMP *cpi, unsigned char *map, unsigned int rows,
unsigned int cols, int delta_q[4], int delta_lf[4],
unsigned int threshold[4]);
-int vp8_set_active_map(struct VP8_COMP *comp, unsigned char *map,
+int vp8_set_active_map(struct VP8_COMP *cpi, unsigned char *map,
unsigned int rows, unsigned int cols);
-int vp8_set_internal_size(struct VP8_COMP *comp, VPX_SCALING horiz_mode,
+int vp8_set_internal_size(struct VP8_COMP *cpi, VPX_SCALING horiz_mode,
VPX_SCALING vert_mode);
-int vp8_get_quantizer(struct VP8_COMP *c);
+int vp8_get_quantizer(struct VP8_COMP *cpi);
#ifdef __cplusplus
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/onyxd.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/onyxd.h
index 49f524bb34d..801ef87b20f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/onyxd.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/onyxd.h
@@ -41,20 +41,20 @@ void vp8dx_set_setting(struct VP8D_COMP *comp, VP8D_SETTING oxst, int x);
int vp8dx_get_setting(struct VP8D_COMP *comp, VP8D_SETTING oxst);
-int vp8dx_receive_compressed_data(struct VP8D_COMP *comp, size_t size,
- const uint8_t *dest, int64_t time_stamp);
-int vp8dx_get_raw_frame(struct VP8D_COMP *comp, YV12_BUFFER_CONFIG *sd,
+int vp8dx_receive_compressed_data(struct VP8D_COMP *pbi, size_t size,
+ const uint8_t *source, int64_t time_stamp);
+int vp8dx_get_raw_frame(struct VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd,
int64_t *time_stamp, int64_t *time_end_stamp,
vp8_ppflags_t *flags);
int vp8dx_references_buffer(struct VP8Common *oci, int ref_frame);
-vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP *comp,
+vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP *pbi,
enum vpx_ref_frame_type ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP *comp,
+vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP *pbi,
enum vpx_ref_frame_type ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-int vp8dx_get_quantizer(const struct VP8D_COMP *c);
+int vp8dx_get_quantizer(const struct VP8D_COMP *pbi);
#ifdef __cplusplus
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/postproc.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/postproc.h
index 218a68ea32d..a14f5f1df1d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/postproc.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/postproc.h
@@ -27,13 +27,13 @@ struct postproc_state {
extern "C" {
#endif
int vp8_post_proc_frame(struct VP8Common *oci, YV12_BUFFER_CONFIG *dest,
- vp8_ppflags_t *flags);
+ vp8_ppflags_t *ppflags);
-void vp8_de_noise(struct VP8Common *oci, YV12_BUFFER_CONFIG *source,
+void vp8_de_noise(struct VP8Common *cm, YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag,
int uvfilter);
-void vp8_deblock(struct VP8Common *oci, YV12_BUFFER_CONFIG *source,
+void vp8_deblock(struct VP8Common *cm, YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag);
#define MFQE_PRECISION 4
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/reconinter.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/reconinter.h
index 394badb44c0..974e7ce7547 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/reconinter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/reconinter.h
@@ -15,20 +15,19 @@
extern "C" {
#endif
-extern void vp8_build_inter_predictors_mb(MACROBLOCKD *x);
-extern void vp8_build_inter16x16_predictors_mb(
- MACROBLOCKD *x, unsigned char *dst_y, unsigned char *dst_u,
- unsigned char *dst_v, int dst_ystride, int dst_uvstride);
+void vp8_build_inter_predictors_mb(MACROBLOCKD *xd);
+void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, unsigned char *dst_y,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int dst_ystride,
+ int dst_uvstride);
-extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x,
- unsigned char *dst_y,
- int dst_ystride);
-extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch,
- unsigned char *base_pre,
- int pre_stride, vp8_subpix_fn_t sppf);
+void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y,
+ int dst_ystride);
+void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
+ int pre_stride, vp8_subpix_fn_t sppf);
-extern void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x);
-extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x);
+void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x);
+void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/reconintra4x4.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/reconintra4x4.h
index 9bcd993866b..3618ec5cbeb 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/reconintra4x4.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/reconintra4x4.h
@@ -31,7 +31,7 @@ static INLINE void intra_prediction_down_copy(MACROBLOCKD *xd,
*dst_ptr2 = *src_ptr;
}
-void vp8_intra4x4_predict(unsigned char *Above, unsigned char *yleft,
+void vp8_intra4x4_predict(unsigned char *above, unsigned char *yleft,
int left_stride, B_PREDICTION_MODE b_mode,
unsigned char *dst, int dst_stride,
unsigned char top_left);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/rtcd_defs.pl b/chromium/third_party/libvpx/source/libvpx/vp8/common/rtcd_defs.pl
index 3df745f75a8..235c77e383f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/rtcd_defs.pl
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/rtcd_defs.pl
@@ -31,10 +31,10 @@ forward_decls qw/vp8_common_forward_decls/;
#
# Dequant
#
-add_proto qw/void vp8_dequantize_b/, "struct blockd*, short *dqc";
+add_proto qw/void vp8_dequantize_b/, "struct blockd*, short *DQC";
specialize qw/vp8_dequantize_b mmx neon msa mmi/;
-add_proto qw/void vp8_dequant_idct_add/, "short *input, short *dq, unsigned char *output, int stride";
+add_proto qw/void vp8_dequant_idct_add/, "short *input, short *dq, unsigned char *dest, int stride";
specialize qw/vp8_dequant_idct_add mmx neon dspr2 msa mmi/;
add_proto qw/void vp8_dequant_idct_add_y_block/, "short *q, short *dq, unsigned char *dst, int stride, char *eobs";
@@ -46,20 +46,20 @@ specialize qw/vp8_dequant_idct_add_uv_block sse2 neon dspr2 msa mmi/;
#
# Loopfilter
#
-add_proto qw/void vp8_loop_filter_mbv/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi";
+add_proto qw/void vp8_loop_filter_mbv/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
specialize qw/vp8_loop_filter_mbv sse2 neon dspr2 msa mmi/;
-add_proto qw/void vp8_loop_filter_bv/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi";
+add_proto qw/void vp8_loop_filter_bv/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
specialize qw/vp8_loop_filter_bv sse2 neon dspr2 msa mmi/;
-add_proto qw/void vp8_loop_filter_mbh/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi";
+add_proto qw/void vp8_loop_filter_mbh/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
specialize qw/vp8_loop_filter_mbh sse2 neon dspr2 msa mmi/;
-add_proto qw/void vp8_loop_filter_bh/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi";
+add_proto qw/void vp8_loop_filter_bh/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi";
specialize qw/vp8_loop_filter_bh sse2 neon dspr2 msa mmi/;
-add_proto qw/void vp8_loop_filter_simple_mbv/, "unsigned char *y, int ystride, const unsigned char *blimit";
+add_proto qw/void vp8_loop_filter_simple_mbv/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
specialize qw/vp8_loop_filter_simple_mbv sse2 neon msa mmi/;
$vp8_loop_filter_simple_mbv_c=vp8_loop_filter_simple_vertical_edge_c;
$vp8_loop_filter_simple_mbv_sse2=vp8_loop_filter_simple_vertical_edge_sse2;
@@ -67,7 +67,7 @@ $vp8_loop_filter_simple_mbv_neon=vp8_loop_filter_mbvs_neon;
$vp8_loop_filter_simple_mbv_msa=vp8_loop_filter_simple_vertical_edge_msa;
$vp8_loop_filter_simple_mbv_mmi=vp8_loop_filter_simple_vertical_edge_mmi;
-add_proto qw/void vp8_loop_filter_simple_mbh/, "unsigned char *y, int ystride, const unsigned char *blimit";
+add_proto qw/void vp8_loop_filter_simple_mbh/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
specialize qw/vp8_loop_filter_simple_mbh sse2 neon msa mmi/;
$vp8_loop_filter_simple_mbh_c=vp8_loop_filter_simple_horizontal_edge_c;
$vp8_loop_filter_simple_mbh_sse2=vp8_loop_filter_simple_horizontal_edge_sse2;
@@ -75,7 +75,7 @@ $vp8_loop_filter_simple_mbh_neon=vp8_loop_filter_mbhs_neon;
$vp8_loop_filter_simple_mbh_msa=vp8_loop_filter_simple_horizontal_edge_msa;
$vp8_loop_filter_simple_mbh_mmi=vp8_loop_filter_simple_horizontal_edge_mmi;
-add_proto qw/void vp8_loop_filter_simple_bv/, "unsigned char *y, int ystride, const unsigned char *blimit";
+add_proto qw/void vp8_loop_filter_simple_bv/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
specialize qw/vp8_loop_filter_simple_bv sse2 neon msa mmi/;
$vp8_loop_filter_simple_bv_c=vp8_loop_filter_bvs_c;
$vp8_loop_filter_simple_bv_sse2=vp8_loop_filter_bvs_sse2;
@@ -83,7 +83,7 @@ $vp8_loop_filter_simple_bv_neon=vp8_loop_filter_bvs_neon;
$vp8_loop_filter_simple_bv_msa=vp8_loop_filter_bvs_msa;
$vp8_loop_filter_simple_bv_mmi=vp8_loop_filter_bvs_mmi;
-add_proto qw/void vp8_loop_filter_simple_bh/, "unsigned char *y, int ystride, const unsigned char *blimit";
+add_proto qw/void vp8_loop_filter_simple_bh/, "unsigned char *y_ptr, int y_stride, const unsigned char *blimit";
specialize qw/vp8_loop_filter_simple_bh sse2 neon msa mmi/;
$vp8_loop_filter_simple_bh_c=vp8_loop_filter_bhs_c;
$vp8_loop_filter_simple_bh_sse2=vp8_loop_filter_bhs_sse2;
@@ -95,31 +95,31 @@ $vp8_loop_filter_simple_bh_mmi=vp8_loop_filter_bhs_mmi;
# IDCT
#
#idct16
-add_proto qw/void vp8_short_idct4x4llm/, "short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride";
+add_proto qw/void vp8_short_idct4x4llm/, "short *input, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride";
specialize qw/vp8_short_idct4x4llm mmx neon dspr2 msa mmi/;
#iwalsh1
-add_proto qw/void vp8_short_inv_walsh4x4_1/, "short *input, short *output";
+add_proto qw/void vp8_short_inv_walsh4x4_1/, "short *input, short *mb_dqcoeff";
specialize qw/vp8_short_inv_walsh4x4_1 dspr2/;
#iwalsh16
-add_proto qw/void vp8_short_inv_walsh4x4/, "short *input, short *output";
+add_proto qw/void vp8_short_inv_walsh4x4/, "short *input, short *mb_dqcoeff";
specialize qw/vp8_short_inv_walsh4x4 sse2 neon dspr2 msa mmi/;
#idct1_scalar_add
-add_proto qw/void vp8_dc_only_idct_add/, "short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride";
+add_proto qw/void vp8_dc_only_idct_add/, "short input_dc, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride";
specialize qw/vp8_dc_only_idct_add mmx neon dspr2 msa mmi/;
#
# RECON
#
-add_proto qw/void vp8_copy_mem16x16/, "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_copy_mem16x16/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride";
specialize qw/vp8_copy_mem16x16 sse2 neon dspr2 msa mmi/;
-add_proto qw/void vp8_copy_mem8x8/, "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_copy_mem8x8/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride";
specialize qw/vp8_copy_mem8x8 mmx neon dspr2 msa mmi/;
-add_proto qw/void vp8_copy_mem8x4/, "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_copy_mem8x4/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride";
specialize qw/vp8_copy_mem8x4 mmx neon dspr2 msa mmi/;
#
@@ -127,11 +127,11 @@ specialize qw/vp8_copy_mem8x4 mmx neon dspr2 msa mmi/;
#
if (vpx_config("CONFIG_POSTPROC") eq "yes") {
- add_proto qw/void vp8_blend_mb_inner/, "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride";
+ add_proto qw/void vp8_blend_mb_inner/, "unsigned char *y, unsigned char *u, unsigned char *v, int y_1, int u_1, int v_1, int alpha, int stride";
- add_proto qw/void vp8_blend_mb_outer/, "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride";
+ add_proto qw/void vp8_blend_mb_outer/, "unsigned char *y, unsigned char *u, unsigned char *v, int y_1, int u_1, int v_1, int alpha, int stride";
- add_proto qw/void vp8_blend_b/, "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride";
+ add_proto qw/void vp8_blend_b/, "unsigned char *y, unsigned char *u, unsigned char *v, int y_1, int u_1, int v_1, int alpha, int stride";
add_proto qw/void vp8_filter_by_weight16x16/, "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight";
specialize qw/vp8_filter_by_weight16x16 sse2 msa/;
@@ -145,29 +145,29 @@ if (vpx_config("CONFIG_POSTPROC") eq "yes") {
#
# Subpixel
#
-add_proto qw/void vp8_sixtap_predict16x16/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_sixtap_predict16x16/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
specialize qw/vp8_sixtap_predict16x16 sse2 ssse3 neon dspr2 msa mmi/;
-add_proto qw/void vp8_sixtap_predict8x8/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_sixtap_predict8x8/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
specialize qw/vp8_sixtap_predict8x8 sse2 ssse3 neon dspr2 msa mmi/;
-add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
specialize qw/vp8_sixtap_predict8x4 sse2 ssse3 neon dspr2 msa mmi/;
-add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
specialize qw/vp8_sixtap_predict4x4 mmx ssse3 neon dspr2 msa mmi/;
-add_proto qw/void vp8_bilinear_predict16x16/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_bilinear_predict16x16/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
specialize qw/vp8_bilinear_predict16x16 sse2 ssse3 neon msa/;
-add_proto qw/void vp8_bilinear_predict8x8/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
+add_proto qw/void vp8_bilinear_predict8x8/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
specialize qw/vp8_bilinear_predict8x8 sse2 ssse3 neon msa/;
-add_proto qw/void vp8_bilinear_predict8x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
-specialize qw/vp8_bilinear_predict8x4 mmx neon msa/;
+add_proto qw/void vp8_bilinear_predict8x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_bilinear_predict8x4 sse2 neon msa/;
-add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch";
-specialize qw/vp8_bilinear_predict4x4 mmx neon msa/;
+add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch";
+specialize qw/vp8_bilinear_predict4x4 sse2 neon msa/;
#
# Encoder functions below this point.
@@ -178,7 +178,7 @@ if (vpx_config("CONFIG_VP8_ENCODER") eq "yes") {
# Block copy
#
if ($opts{arch} =~ /x86/) {
- add_proto qw/void vp8_copy32xn/, "const unsigned char *src_ptr, int source_stride, unsigned char *dst_ptr, int dst_stride, int n";
+ add_proto qw/void vp8_copy32xn/, "const unsigned char *src_ptr, int src_stride, unsigned char *dst_ptr, int dst_stride, int height";
specialize qw/vp8_copy32xn sse2 sse3/;
}
@@ -223,7 +223,7 @@ specialize qw/vp8_full_search_sad sse3 sse4_1/;
$vp8_full_search_sad_sse3=vp8_full_search_sadx3;
$vp8_full_search_sad_sse4_1=vp8_full_search_sadx8;
-add_proto qw/int vp8_refining_search_sad/, "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv";
+add_proto qw/int vp8_refining_search_sad/, "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int error_per_bit, int search_range, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv";
specialize qw/vp8_refining_search_sad sse2 msa/;
$vp8_refining_search_sad_sse2=vp8_refining_search_sadx4;
$vp8_refining_search_sad_msa=vp8_refining_search_sadx4;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/treecoder.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/treecoder.c
index 8a94cdeec0d..f1e78f43210 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/treecoder.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/treecoder.c
@@ -80,7 +80,7 @@ void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */
vp8_prob probs[/* n-1 */],
unsigned int branch_ct[/* n-1 */][2],
const unsigned int num_events[/* n */],
- unsigned int Pfac, int rd) {
+ unsigned int Pfactor, int Round) {
const int tree_len = n - 1;
int t = 0;
@@ -92,7 +92,8 @@ void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */
if (tot) {
const unsigned int p =
- (unsigned int)(((uint64_t)c[0] * Pfac) + (rd ? tot >> 1 : 0)) / tot;
+ (unsigned int)(((uint64_t)c[0] * Pfactor) + (Round ? tot >> 1 : 0)) /
+ tot;
probs[t] = p < 256 ? (p ? p : 1) : 255; /* agree w/old version for now */
} else {
probs[t] = vp8_prob_half;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/bilinear_filter_sse2.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/bilinear_filter_sse2.c
new file mode 100644
index 00000000000..9bf65d8045e
--- /dev/null
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/bilinear_filter_sse2.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <xmmintrin.h>
+
+#include "./vp8_rtcd.h"
+#include "./vpx_config.h"
+#include "vp8/common/filter.h"
+#include "vpx_dsp/x86/mem_sse2.h"
+#include "vpx_ports/mem.h"
+
+static INLINE void horizontal_16x16(uint8_t *src, const int stride,
+ uint16_t *dst, const int xoffset) {
+ int h;
+ const __m128i zero = _mm_setzero_si128();
+
+ if (xoffset == 0) {
+ for (h = 0; h < 17; ++h) {
+ const __m128i a = _mm_loadu_si128((__m128i *)src);
+ const __m128i a_lo = _mm_unpacklo_epi8(a, zero);
+ const __m128i a_hi = _mm_unpackhi_epi8(a, zero);
+ _mm_store_si128((__m128i *)dst, a_lo);
+ _mm_store_si128((__m128i *)(dst + 8), a_hi);
+ src += stride;
+ dst += 16;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i hfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][0]);
+ const __m128i hfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][1]);
+
+ for (h = 0; h < 17; ++h) {
+ const __m128i a = _mm_loadu_si128((__m128i *)src);
+ const __m128i a_lo = _mm_unpacklo_epi8(a, zero);
+ const __m128i a_hi = _mm_unpackhi_epi8(a, zero);
+ const __m128i a_lo_filtered = _mm_mullo_epi16(a_lo, hfilter_0);
+ const __m128i a_hi_filtered = _mm_mullo_epi16(a_hi, hfilter_0);
+
+ const __m128i b = _mm_loadu_si128((__m128i *)(src + 1));
+ const __m128i b_lo = _mm_unpacklo_epi8(b, zero);
+ const __m128i b_hi = _mm_unpackhi_epi8(b, zero);
+ const __m128i b_lo_filtered = _mm_mullo_epi16(b_lo, hfilter_1);
+ const __m128i b_hi_filtered = _mm_mullo_epi16(b_hi, hfilter_1);
+
+ const __m128i sum_lo = _mm_add_epi16(a_lo_filtered, b_lo_filtered);
+ const __m128i sum_hi = _mm_add_epi16(a_hi_filtered, b_hi_filtered);
+
+ const __m128i compensated_lo = _mm_add_epi16(sum_lo, round_factor);
+ const __m128i compensated_hi = _mm_add_epi16(sum_hi, round_factor);
+
+ const __m128i shifted_lo =
+ _mm_srai_epi16(compensated_lo, VP8_FILTER_SHIFT);
+ const __m128i shifted_hi =
+ _mm_srai_epi16(compensated_hi, VP8_FILTER_SHIFT);
+
+ _mm_store_si128((__m128i *)dst, shifted_lo);
+ _mm_store_si128((__m128i *)(dst + 8), shifted_hi);
+ src += stride;
+ dst += 16;
+ }
+ }
+}
+
+static INLINE void vertical_16x16(uint16_t *src, uint8_t *dst, const int stride,
+ const int yoffset) {
+ int h;
+
+ if (yoffset == 0) {
+ for (h = 0; h < 16; ++h) {
+ const __m128i row_lo = _mm_load_si128((__m128i *)src);
+ const __m128i row_hi = _mm_load_si128((__m128i *)(src + 8));
+ const __m128i packed = _mm_packus_epi16(row_lo, row_hi);
+ _mm_store_si128((__m128i *)dst, packed);
+ src += 16;
+ dst += stride;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i vfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][0]);
+ const __m128i vfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][1]);
+
+ __m128i row_0_lo = _mm_load_si128((__m128i *)src);
+ __m128i row_0_hi = _mm_load_si128((__m128i *)(src + 8));
+ src += 16;
+ for (h = 0; h < 16; ++h) {
+ const __m128i row_0_lo_filtered = _mm_mullo_epi16(row_0_lo, vfilter_0);
+ const __m128i row_0_hi_filtered = _mm_mullo_epi16(row_0_hi, vfilter_0);
+
+ const __m128i row_1_lo = _mm_load_si128((__m128i *)src);
+ const __m128i row_1_hi = _mm_load_si128((__m128i *)(src + 8));
+ const __m128i row_1_lo_filtered = _mm_mullo_epi16(row_1_lo, vfilter_1);
+ const __m128i row_1_hi_filtered = _mm_mullo_epi16(row_1_hi, vfilter_1);
+
+ const __m128i sum_lo =
+ _mm_add_epi16(row_0_lo_filtered, row_1_lo_filtered);
+ const __m128i sum_hi =
+ _mm_add_epi16(row_0_hi_filtered, row_1_hi_filtered);
+
+ const __m128i compensated_lo = _mm_add_epi16(sum_lo, round_factor);
+ const __m128i compensated_hi = _mm_add_epi16(sum_hi, round_factor);
+
+ const __m128i shifted_lo =
+ _mm_srai_epi16(compensated_lo, VP8_FILTER_SHIFT);
+ const __m128i shifted_hi =
+ _mm_srai_epi16(compensated_hi, VP8_FILTER_SHIFT);
+
+ const __m128i packed = _mm_packus_epi16(shifted_lo, shifted_hi);
+ _mm_store_si128((__m128i *)dst, packed);
+ row_0_lo = row_1_lo;
+ row_0_hi = row_1_hi;
+ src += 16;
+ dst += stride;
+ }
+ }
+}
+
+void vp8_bilinear_predict16x16_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[16 * 17]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_16x16(src_ptr, src_pixels_per_line, FData, xoffset);
+
+ vertical_16x16(FData, dst_ptr, dst_pitch, yoffset);
+}
+
+static INLINE void horizontal_8xN(uint8_t *src, const int stride, uint16_t *dst,
+ const int xoffset, const int height) {
+ int h;
+ const __m128i zero = _mm_setzero_si128();
+
+ if (xoffset == 0) {
+ for (h = 0; h < height; ++h) {
+ const __m128i a = _mm_loadl_epi64((__m128i *)src);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ _mm_store_si128((__m128i *)dst, a_u16);
+ src += stride;
+ dst += 8;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i hfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][0]);
+ const __m128i hfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][1]);
+
+ // Filter horizontally. Rather than load the whole array and transpose, load
+ // 16 values (overreading) and shift to set up the second value. Do an
+ // "extra" 9th line so the vertical pass has the necessary context.
+ for (h = 0; h < height; ++h) {
+ const __m128i a = _mm_loadu_si128((__m128i *)src);
+ const __m128i b = _mm_srli_si128(a, 1);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ const __m128i b_u16 = _mm_unpacklo_epi8(b, zero);
+ const __m128i a_filtered = _mm_mullo_epi16(a_u16, hfilter_0);
+ const __m128i b_filtered = _mm_mullo_epi16(b_u16, hfilter_1);
+ const __m128i sum = _mm_add_epi16(a_filtered, b_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ _mm_store_si128((__m128i *)dst, shifted);
+ src += stride;
+ dst += 8;
+ }
+ }
+}
+
+static INLINE void vertical_8xN(uint16_t *src, uint8_t *dst, const int stride,
+ const int yoffset, const int height) {
+ int h;
+
+ if (yoffset == 0) {
+ for (h = 0; h < height; ++h) {
+ const __m128i row = _mm_load_si128((__m128i *)src);
+ const __m128i packed = _mm_packus_epi16(row, row);
+ _mm_storel_epi64((__m128i *)dst, packed);
+ src += 8;
+ dst += stride;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i vfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][0]);
+ const __m128i vfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][1]);
+
+ __m128i row_0 = _mm_load_si128((__m128i *)src);
+ src += 8;
+ for (h = 0; h < height; ++h) {
+ const __m128i row_1 = _mm_load_si128((__m128i *)src);
+ const __m128i row_0_filtered = _mm_mullo_epi16(row_0, vfilter_0);
+ const __m128i row_1_filtered = _mm_mullo_epi16(row_1, vfilter_1);
+ const __m128i sum = _mm_add_epi16(row_0_filtered, row_1_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ const __m128i packed = _mm_packus_epi16(shifted, shifted);
+ _mm_storel_epi64((__m128i *)dst, packed);
+ row_0 = row_1;
+ src += 8;
+ dst += stride;
+ }
+ }
+}
+
+void vp8_bilinear_predict8x8_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[8 * 9]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_8xN(src_ptr, src_pixels_per_line, FData, xoffset, 9);
+
+ vertical_8xN(FData, dst_ptr, dst_pitch, yoffset, 8);
+}
+
+void vp8_bilinear_predict8x4_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[8 * 5]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_8xN(src_ptr, src_pixels_per_line, FData, xoffset, 5);
+
+ vertical_8xN(FData, dst_ptr, dst_pitch, yoffset, 4);
+}
+
+static INLINE void horizontal_4x4(uint8_t *src, const int stride, uint16_t *dst,
+ const int xoffset) {
+ int h;
+ const __m128i zero = _mm_setzero_si128();
+
+ if (xoffset == 0) {
+ for (h = 0; h < 5; ++h) {
+ const __m128i a = load_unaligned_u32(src);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ _mm_storel_epi64((__m128i *)dst, a_u16);
+ src += stride;
+ dst += 4;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i hfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][0]);
+ const __m128i hfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[xoffset][1]);
+
+ for (h = 0; h < 5; ++h) {
+ const __m128i a = load_unaligned_u32(src);
+ const __m128i b = load_unaligned_u32(src + 1);
+ const __m128i a_u16 = _mm_unpacklo_epi8(a, zero);
+ const __m128i b_u16 = _mm_unpacklo_epi8(b, zero);
+ const __m128i a_filtered = _mm_mullo_epi16(a_u16, hfilter_0);
+ const __m128i b_filtered = _mm_mullo_epi16(b_u16, hfilter_1);
+ const __m128i sum = _mm_add_epi16(a_filtered, b_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ _mm_storel_epi64((__m128i *)dst, shifted);
+ src += stride;
+ dst += 4;
+ }
+ }
+}
+
+static INLINE void vertical_4x4(uint16_t *src, uint8_t *dst, const int stride,
+ const int yoffset) {
+ int h;
+
+ if (yoffset == 0) {
+ for (h = 0; h < 4; h += 2) {
+ const __m128i row = _mm_load_si128((__m128i *)src);
+ __m128i packed = _mm_packus_epi16(row, row);
+ store_unaligned_u32(dst, packed);
+ dst += stride;
+ packed = _mm_srli_si128(packed, 4);
+ store_unaligned_u32(dst, packed);
+ dst += stride;
+ src += 8;
+ }
+ return;
+ }
+
+ {
+ const __m128i round_factor = _mm_set1_epi16(1 << (VP8_FILTER_SHIFT - 1));
+ const __m128i vfilter_0 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][0]);
+ const __m128i vfilter_1 = _mm_set1_epi16(vp8_bilinear_filters[yoffset][1]);
+
+ for (h = 0; h < 4; h += 2) {
+ const __m128i row_0 = _mm_load_si128((__m128i *)src);
+ const __m128i row_1 = _mm_loadu_si128((__m128i *)(src + 4));
+ const __m128i row_0_filtered = _mm_mullo_epi16(row_0, vfilter_0);
+ const __m128i row_1_filtered = _mm_mullo_epi16(row_1, vfilter_1);
+ const __m128i sum = _mm_add_epi16(row_0_filtered, row_1_filtered);
+ const __m128i compensated = _mm_add_epi16(sum, round_factor);
+ const __m128i shifted = _mm_srai_epi16(compensated, VP8_FILTER_SHIFT);
+ __m128i packed = _mm_packus_epi16(shifted, shifted);
+ storeu_uint32(dst, _mm_cvtsi128_si32(packed));
+ packed = _mm_srli_si128(packed, 4);
+ dst += stride;
+ storeu_uint32(dst, _mm_cvtsi128_si32(packed));
+ dst += stride;
+ src += 8;
+ }
+ }
+}
+
+void vp8_bilinear_predict4x4_sse2(uint8_t *src_ptr, int src_pixels_per_line,
+ int xoffset, int yoffset, uint8_t *dst_ptr,
+ int dst_pitch) {
+ DECLARE_ALIGNED(16, uint16_t, FData[4 * 5]);
+
+ assert((xoffset | yoffset) != 0);
+
+ horizontal_4x4(src_ptr, src_pixels_per_line, FData, xoffset);
+
+ vertical_4x4(FData, dst_ptr, dst_pitch, yoffset);
+}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.c
deleted file mode 100644
index 2405342f02a..00000000000
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "vp8/common/x86/filter_x86.h"
-
-DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_4[8][8]) = {
- { 128, 128, 128, 128, 0, 0, 0, 0 }, { 112, 112, 112, 112, 16, 16, 16, 16 },
- { 96, 96, 96, 96, 32, 32, 32, 32 }, { 80, 80, 80, 80, 48, 48, 48, 48 },
- { 64, 64, 64, 64, 64, 64, 64, 64 }, { 48, 48, 48, 48, 80, 80, 80, 80 },
- { 32, 32, 32, 32, 96, 96, 96, 96 }, { 16, 16, 16, 16, 112, 112, 112, 112 }
-};
-
-DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_8[8][16]) = {
- { 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16 },
- { 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32 },
- { 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48 },
- { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 },
- { 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80 },
- { 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96 },
- { 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112 }
-};
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.h b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.h
deleted file mode 100644
index 570ff866644..00000000000
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/filter_x86.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VPX_VP8_COMMON_X86_FILTER_X86_H_
-#define VPX_VP8_COMMON_X86_FILTER_X86_H_
-
-#include "vpx_ports/mem.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* x86 assembly specific copy of vp8/common/filter.c:vp8_bilinear_filters with
- * duplicated values */
-
-/* duplicated 4x */
-extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_4[8][8]);
-
-/* duplicated 8x */
-extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_8[8][16]);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // VPX_VP8_COMMON_X86_FILTER_X86_H_
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c
index 8aefb279970..897ed5b6527 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/idct_blk_sse2.c
@@ -42,43 +42,43 @@ void vp8_dequant_idct_add_y_block_sse2(short *q, short *dq, unsigned char *dst,
}
void vp8_dequant_idct_add_uv_block_sse2(short *q, short *dq,
- unsigned char *dstu,
- unsigned char *dstv, int stride,
+ unsigned char *dst_u,
+ unsigned char *dst_v, int stride,
char *eobs) {
if (((short *)(eobs))[0]) {
if (((short *)(eobs))[0] & 0xfefe) {
- vp8_idct_dequant_full_2x_sse2(q, dq, dstu, stride);
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_u, stride);
} else {
- vp8_idct_dequant_0_2x_sse2(q, dq, dstu, stride);
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_u, stride);
}
}
q += 32;
- dstu += stride * 4;
+ dst_u += stride * 4;
if (((short *)(eobs))[1]) {
if (((short *)(eobs))[1] & 0xfefe) {
- vp8_idct_dequant_full_2x_sse2(q, dq, dstu, stride);
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_u, stride);
} else {
- vp8_idct_dequant_0_2x_sse2(q, dq, dstu, stride);
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_u, stride);
}
}
q += 32;
if (((short *)(eobs))[2]) {
if (((short *)(eobs))[2] & 0xfefe) {
- vp8_idct_dequant_full_2x_sse2(q, dq, dstv, stride);
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_v, stride);
} else {
- vp8_idct_dequant_0_2x_sse2(q, dq, dstv, stride);
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_v, stride);
}
}
q += 32;
- dstv += stride * 4;
+ dst_v += stride * 4;
if (((short *)(eobs))[3]) {
if (((short *)(eobs))[3] & 0xfefe) {
- vp8_idct_dequant_full_2x_sse2(q, dq, dstv, stride);
+ vp8_idct_dequant_full_2x_sse2(q, dq, dst_v, stride);
} else {
- vp8_idct_dequant_0_2x_sse2(q, dq, dstv, stride);
+ vp8_idct_dequant_0_2x_sse2(q, dq, dst_v, stride);
}
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/iwalsh_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/iwalsh_sse2.asm
index 82d7bf91a69..0043e93b061 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/iwalsh_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/iwalsh_sse2.asm
@@ -13,7 +13,7 @@
SECTION .text
-;void vp8_short_inv_walsh4x4_sse2(short *input, short *output)
+;void vp8_short_inv_walsh4x4_sse2(short *input, short *mb_dqcoeff)
global sym(vp8_short_inv_walsh4x4_sse2) PRIVATE
sym(vp8_short_inv_walsh4x4_sse2):
push rbp
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_mmx.asm b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_mmx.asm
index 1f3a2baca00..67bcd0cbd71 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_mmx.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_mmx.asm
@@ -10,8 +10,6 @@
%include "vpx_ports/x86_abi_support.asm"
-extern sym(vp8_bilinear_filters_x86_8)
-
%define BLOCK_HEIGHT_WIDTH 4
%define vp8_filter_weight 128
@@ -205,280 +203,6 @@ sym(vp8_filter_block1dc_v6_mmx):
ret
-;void bilinear_predict8x4_mmx
-;(
-; unsigned char *src_ptr,
-; int src_pixels_per_line,
-; int xoffset,
-; int yoffset,
-; unsigned char *dst_ptr,
-; int dst_pitch
-;)
-global sym(vp8_bilinear_predict8x4_mmx) PRIVATE
-sym(vp8_bilinear_predict8x4_mmx):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- GET_GOT rbx
- push rsi
- push rdi
- ; end prolog
-
- ;const short *HFilter = vp8_bilinear_filters_x86_8[xoffset];
- ;const short *VFilter = vp8_bilinear_filters_x86_8[yoffset];
-
- movsxd rax, dword ptr arg(2) ;xoffset
- mov rdi, arg(4) ;dst_ptr ;
-
- lea rcx, [GLOBAL(sym(vp8_bilinear_filters_x86_8))]
- shl rax, 5
-
- mov rsi, arg(0) ;src_ptr ;
- add rax, rcx
-
- movsxd rdx, dword ptr arg(5) ;dst_pitch
- movq mm1, [rax] ;
-
- movq mm2, [rax+16] ;
- movsxd rax, dword ptr arg(3) ;yoffset
-
- pxor mm0, mm0 ;
- shl rax, 5
-
- add rax, rcx
- lea rcx, [rdi+rdx*4] ;
-
- movsxd rdx, dword ptr arg(1) ;src_pixels_per_line ;
-
- ; get the first horizontal line done ;
- movq mm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- movq mm4, mm3 ; make a copy of current line
-
- punpcklbw mm3, mm0 ; xx 00 01 02 03 04 05 06
- punpckhbw mm4, mm0 ;
-
- pmullw mm3, mm1 ;
- pmullw mm4, mm1 ;
-
- movq mm5, [rsi+1] ;
- movq mm6, mm5 ;
-
- punpcklbw mm5, mm0 ;
- punpckhbw mm6, mm0 ;
-
- pmullw mm5, mm2 ;
- pmullw mm6, mm2 ;
-
- paddw mm3, mm5 ;
- paddw mm4, mm6 ;
-
- paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw mm4, [GLOBAL(rd)] ;
- psraw mm4, VP8_FILTER_SHIFT ;
-
- movq mm7, mm3 ;
- packuswb mm7, mm4 ;
-
- add rsi, rdx ; next line
-.next_row_8x4:
- movq mm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- movq mm4, mm3 ; make a copy of current line
-
- punpcklbw mm3, mm0 ; xx 00 01 02 03 04 05 06
- punpckhbw mm4, mm0 ;
-
- pmullw mm3, mm1 ;
- pmullw mm4, mm1 ;
-
- movq mm5, [rsi+1] ;
- movq mm6, mm5 ;
-
- punpcklbw mm5, mm0 ;
- punpckhbw mm6, mm0 ;
-
- pmullw mm5, mm2 ;
- pmullw mm6, mm2 ;
-
- paddw mm3, mm5 ;
- paddw mm4, mm6 ;
-
- movq mm5, mm7 ;
- movq mm6, mm7 ;
-
- punpcklbw mm5, mm0 ;
- punpckhbw mm6, mm0
-
- pmullw mm5, [rax] ;
- pmullw mm6, [rax] ;
-
- paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw mm4, [GLOBAL(rd)] ;
- psraw mm4, VP8_FILTER_SHIFT ;
-
- movq mm7, mm3 ;
- packuswb mm7, mm4 ;
-
-
- pmullw mm3, [rax+16] ;
- pmullw mm4, [rax+16] ;
-
- paddw mm3, mm5 ;
- paddw mm4, mm6 ;
-
-
- paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw mm4, [GLOBAL(rd)] ;
- psraw mm4, VP8_FILTER_SHIFT ;
-
- packuswb mm3, mm4
-
- movq [rdi], mm3 ; store the results in the destination
-
-%if ABI_IS_32BIT
- add rsi, rdx ; next line
- add rdi, dword ptr arg(5) ;dst_pitch ;
-%else
- movsxd r8, dword ptr arg(5) ;dst_pitch
- add rsi, rdx ; next line
- add rdi, r8
-%endif
- cmp rdi, rcx ;
- jne .next_row_8x4
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_GOT
- UNSHADOW_ARGS
- pop rbp
- ret
-
-
-;void bilinear_predict4x4_mmx
-;(
-; unsigned char *src_ptr,
-; int src_pixels_per_line,
-; int xoffset,
-; int yoffset,
-; unsigned char *dst_ptr,
-; int dst_pitch
-;)
-global sym(vp8_bilinear_predict4x4_mmx) PRIVATE
-sym(vp8_bilinear_predict4x4_mmx):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- GET_GOT rbx
- push rsi
- push rdi
- ; end prolog
-
- ;const short *HFilter = vp8_bilinear_filters_x86_8[xoffset];
- ;const short *VFilter = vp8_bilinear_filters_x86_8[yoffset];
-
- movsxd rax, dword ptr arg(2) ;xoffset
- mov rdi, arg(4) ;dst_ptr ;
-
- lea rcx, [GLOBAL(sym(vp8_bilinear_filters_x86_8))]
- shl rax, 5
-
- add rax, rcx ; HFilter
- mov rsi, arg(0) ;src_ptr ;
-
- movsxd rdx, dword ptr arg(5) ;ldst_pitch
- movq mm1, [rax] ;
-
- movq mm2, [rax+16] ;
- movsxd rax, dword ptr arg(3) ;yoffset
-
- pxor mm0, mm0 ;
- shl rax, 5
-
- add rax, rcx
- lea rcx, [rdi+rdx*4] ;
-
- movsxd rdx, dword ptr arg(1) ;src_pixels_per_line ;
-
- ; get the first horizontal line done ;
- movd mm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- punpcklbw mm3, mm0 ; xx 00 01 02 03 04 05 06
-
- pmullw mm3, mm1 ;
- movd mm5, [rsi+1] ;
-
- punpcklbw mm5, mm0 ;
- pmullw mm5, mm2 ;
-
- paddw mm3, mm5 ;
- paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
-
- psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- movq mm7, mm3 ;
- packuswb mm7, mm0 ;
-
- add rsi, rdx ; next line
-.next_row_4x4:
- movd mm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- punpcklbw mm3, mm0 ; xx 00 01 02 03 04 05 06
-
- pmullw mm3, mm1 ;
- movd mm5, [rsi+1] ;
-
- punpcklbw mm5, mm0 ;
- pmullw mm5, mm2 ;
-
- paddw mm3, mm5 ;
-
- movq mm5, mm7 ;
- punpcklbw mm5, mm0 ;
-
- pmullw mm5, [rax] ;
- paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
-
- psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
- movq mm7, mm3 ;
-
- packuswb mm7, mm0 ;
-
- pmullw mm3, [rax+16] ;
- paddw mm3, mm5 ;
-
-
- paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- packuswb mm3, mm0
- movd [rdi], mm3 ; store the results in the destination
-
-%if ABI_IS_32BIT
- add rsi, rdx ; next line
- add rdi, dword ptr arg(5) ;dst_pitch ;
-%else
- movsxd r8, dword ptr arg(5) ;dst_pitch ;
- add rsi, rdx ; next line
- add rdi, r8
-%endif
-
- cmp rdi, rcx ;
- jne .next_row_4x4
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_GOT
- UNSHADOW_ARGS
- pop rbp
- ret
-
-
-
SECTION_RODATA
align 16
rd:
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_sse2.asm
index 6e70f6d2e80..51c015e3df0 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/subpixel_sse2.asm
@@ -10,7 +10,6 @@
%include "vpx_ports/x86_abi_support.asm"
-extern sym(vp8_bilinear_filters_x86_8)
%define BLOCK_HEIGHT_WIDTH 4
%define VP8_FILTER_WEIGHT 128
@@ -958,419 +957,6 @@ sym(vp8_unpack_block1d16_h6_sse2):
ret
-;void vp8_bilinear_predict16x16_sse2
-;(
-; unsigned char *src_ptr,
-; int src_pixels_per_line,
-; int xoffset,
-; int yoffset,
-; unsigned char *dst_ptr,
-; int dst_pitch
-;)
-extern sym(vp8_bilinear_filters_x86_8)
-global sym(vp8_bilinear_predict16x16_sse2) PRIVATE
-sym(vp8_bilinear_predict16x16_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- GET_GOT rbx
- push rsi
- push rdi
- ; end prolog
-
- ;const short *HFilter = vp8_bilinear_filters_x86_8[xoffset]
- ;const short *VFilter = vp8_bilinear_filters_x86_8[yoffset]
-
- lea rcx, [GLOBAL(sym(vp8_bilinear_filters_x86_8))]
- movsxd rax, dword ptr arg(2) ;xoffset
-
- cmp rax, 0 ;skip first_pass filter if xoffset=0
- je .b16x16_sp_only
-
- shl rax, 5
- add rax, rcx ;HFilter
-
- mov rdi, arg(4) ;dst_ptr
- mov rsi, arg(0) ;src_ptr
- movsxd rdx, dword ptr arg(5) ;dst_pitch
-
- movdqa xmm1, [rax]
- movdqa xmm2, [rax+16]
-
- movsxd rax, dword ptr arg(3) ;yoffset
-
- cmp rax, 0 ;skip second_pass filter if yoffset=0
- je .b16x16_fp_only
-
- shl rax, 5
- add rax, rcx ;VFilter
-
- lea rcx, [rdi+rdx*8]
- lea rcx, [rcx+rdx*8]
- movsxd rdx, dword ptr arg(1) ;src_pixels_per_line
-
- pxor xmm0, xmm0
-
-%if ABI_IS_32BIT=0
- movsxd r8, dword ptr arg(5) ;dst_pitch
-%endif
- ; get the first horizontal line done
- movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- movdqa xmm4, xmm3 ; make a copy of current line
-
- punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06
- punpckhbw xmm4, xmm0
-
- pmullw xmm3, xmm1
- pmullw xmm4, xmm1
-
- movdqu xmm5, [rsi+1]
- movdqa xmm6, xmm5
-
- punpcklbw xmm5, xmm0
- punpckhbw xmm6, xmm0
-
- pmullw xmm5, xmm2
- pmullw xmm6, xmm2
-
- paddw xmm3, xmm5
- paddw xmm4, xmm6
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw xmm4, [GLOBAL(rd)]
- psraw xmm4, VP8_FILTER_SHIFT
-
- movdqa xmm7, xmm3
- packuswb xmm7, xmm4
-
- add rsi, rdx ; next line
-.next_row:
- movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- movdqa xmm4, xmm3 ; make a copy of current line
-
- punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06
- punpckhbw xmm4, xmm0
-
- pmullw xmm3, xmm1
- pmullw xmm4, xmm1
-
- movdqu xmm5, [rsi+1]
- movdqa xmm6, xmm5
-
- punpcklbw xmm5, xmm0
- punpckhbw xmm6, xmm0
-
- pmullw xmm5, xmm2
- pmullw xmm6, xmm2
-
- paddw xmm3, xmm5
- paddw xmm4, xmm6
-
- movdqa xmm5, xmm7
- movdqa xmm6, xmm7
-
- punpcklbw xmm5, xmm0
- punpckhbw xmm6, xmm0
-
- pmullw xmm5, [rax]
- pmullw xmm6, [rax]
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw xmm4, [GLOBAL(rd)]
- psraw xmm4, VP8_FILTER_SHIFT
-
- movdqa xmm7, xmm3
- packuswb xmm7, xmm4
-
- pmullw xmm3, [rax+16]
- pmullw xmm4, [rax+16]
-
- paddw xmm3, xmm5
- paddw xmm4, xmm6
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw xmm4, [GLOBAL(rd)]
- psraw xmm4, VP8_FILTER_SHIFT
-
- packuswb xmm3, xmm4
- movdqa [rdi], xmm3 ; store the results in the destination
-
- add rsi, rdx ; next line
-%if ABI_IS_32BIT
- add rdi, DWORD PTR arg(5) ;dst_pitch
-%else
- add rdi, r8
-%endif
-
- cmp rdi, rcx
- jne .next_row
-
- jmp .done
-
-.b16x16_sp_only:
- movsxd rax, dword ptr arg(3) ;yoffset
- shl rax, 5
- add rax, rcx ;VFilter
-
- mov rdi, arg(4) ;dst_ptr
- mov rsi, arg(0) ;src_ptr
- movsxd rdx, dword ptr arg(5) ;dst_pitch
-
- movdqa xmm1, [rax]
- movdqa xmm2, [rax+16]
-
- lea rcx, [rdi+rdx*8]
- lea rcx, [rcx+rdx*8]
- movsxd rax, dword ptr arg(1) ;src_pixels_per_line
-
- pxor xmm0, xmm0
-
- ; get the first horizontal line done
- movdqu xmm7, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
-
- add rsi, rax ; next line
-.next_row_spo:
- movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
-
- movdqa xmm5, xmm7
- movdqa xmm6, xmm7
-
- movdqa xmm4, xmm3 ; make a copy of current line
- movdqa xmm7, xmm3
-
- punpcklbw xmm5, xmm0
- punpckhbw xmm6, xmm0
- punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06
- punpckhbw xmm4, xmm0
-
- pmullw xmm5, xmm1
- pmullw xmm6, xmm1
- pmullw xmm3, xmm2
- pmullw xmm4, xmm2
-
- paddw xmm3, xmm5
- paddw xmm4, xmm6
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw xmm4, [GLOBAL(rd)]
- psraw xmm4, VP8_FILTER_SHIFT
-
- packuswb xmm3, xmm4
- movdqa [rdi], xmm3 ; store the results in the destination
-
- add rsi, rax ; next line
- add rdi, rdx ;dst_pitch
- cmp rdi, rcx
- jne .next_row_spo
-
- jmp .done
-
-.b16x16_fp_only:
- lea rcx, [rdi+rdx*8]
- lea rcx, [rcx+rdx*8]
- movsxd rax, dword ptr arg(1) ;src_pixels_per_line
- pxor xmm0, xmm0
-
-.next_row_fpo:
- movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14
- movdqa xmm4, xmm3 ; make a copy of current line
-
- punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06
- punpckhbw xmm4, xmm0
-
- pmullw xmm3, xmm1
- pmullw xmm4, xmm1
-
- movdqu xmm5, [rsi+1]
- movdqa xmm6, xmm5
-
- punpcklbw xmm5, xmm0
- punpckhbw xmm6, xmm0
-
- pmullw xmm5, xmm2
- pmullw xmm6, xmm2
-
- paddw xmm3, xmm5
- paddw xmm4, xmm6
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- paddw xmm4, [GLOBAL(rd)]
- psraw xmm4, VP8_FILTER_SHIFT
-
- packuswb xmm3, xmm4
- movdqa [rdi], xmm3 ; store the results in the destination
-
- add rsi, rax ; next line
- add rdi, rdx ; dst_pitch
- cmp rdi, rcx
- jne .next_row_fpo
-
-.done:
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_GOT
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-
-;void vp8_bilinear_predict8x8_sse2
-;(
-; unsigned char *src_ptr,
-; int src_pixels_per_line,
-; int xoffset,
-; int yoffset,
-; unsigned char *dst_ptr,
-; int dst_pitch
-;)
-global sym(vp8_bilinear_predict8x8_sse2) PRIVATE
-sym(vp8_bilinear_predict8x8_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 6
- SAVE_XMM 7
- GET_GOT rbx
- push rsi
- push rdi
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 144 ; reserve 144 bytes
-
- ;const short *HFilter = vp8_bilinear_filters_x86_8[xoffset]
- ;const short *VFilter = vp8_bilinear_filters_x86_8[yoffset]
- lea rcx, [GLOBAL(sym(vp8_bilinear_filters_x86_8))]
-
- mov rsi, arg(0) ;src_ptr
- movsxd rdx, dword ptr arg(1) ;src_pixels_per_line
-
- ;Read 9-line unaligned data in and put them on stack. This gives a big
- ;performance boost.
- movdqu xmm0, [rsi]
- lea rax, [rdx + rdx*2]
- movdqu xmm1, [rsi+rdx]
- movdqu xmm2, [rsi+rdx*2]
- add rsi, rax
- movdqu xmm3, [rsi]
- movdqu xmm4, [rsi+rdx]
- movdqu xmm5, [rsi+rdx*2]
- add rsi, rax
- movdqu xmm6, [rsi]
- movdqu xmm7, [rsi+rdx]
-
- movdqa XMMWORD PTR [rsp], xmm0
-
- movdqu xmm0, [rsi+rdx*2]
-
- movdqa XMMWORD PTR [rsp+16], xmm1
- movdqa XMMWORD PTR [rsp+32], xmm2
- movdqa XMMWORD PTR [rsp+48], xmm3
- movdqa XMMWORD PTR [rsp+64], xmm4
- movdqa XMMWORD PTR [rsp+80], xmm5
- movdqa XMMWORD PTR [rsp+96], xmm6
- movdqa XMMWORD PTR [rsp+112], xmm7
- movdqa XMMWORD PTR [rsp+128], xmm0
-
- movsxd rax, dword ptr arg(2) ;xoffset
- shl rax, 5
- add rax, rcx ;HFilter
-
- mov rdi, arg(4) ;dst_ptr
- movsxd rdx, dword ptr arg(5) ;dst_pitch
-
- movdqa xmm1, [rax]
- movdqa xmm2, [rax+16]
-
- movsxd rax, dword ptr arg(3) ;yoffset
- shl rax, 5
- add rax, rcx ;VFilter
-
- lea rcx, [rdi+rdx*8]
-
- movdqa xmm5, [rax]
- movdqa xmm6, [rax+16]
-
- pxor xmm0, xmm0
-
- ; get the first horizontal line done
- movdqa xmm3, XMMWORD PTR [rsp]
- movdqa xmm4, xmm3 ; make a copy of current line
- psrldq xmm4, 1
-
- punpcklbw xmm3, xmm0 ; 00 01 02 03 04 05 06 07
- punpcklbw xmm4, xmm0 ; 01 02 03 04 05 06 07 08
-
- pmullw xmm3, xmm1
- pmullw xmm4, xmm2
-
- paddw xmm3, xmm4
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- movdqa xmm7, xmm3
- add rsp, 16 ; next line
-.next_row8x8:
- movdqa xmm3, XMMWORD PTR [rsp] ; 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15
- movdqa xmm4, xmm3 ; make a copy of current line
- psrldq xmm4, 1
-
- punpcklbw xmm3, xmm0 ; 00 01 02 03 04 05 06 07
- punpcklbw xmm4, xmm0 ; 01 02 03 04 05 06 07 08
-
- pmullw xmm3, xmm1
- pmullw xmm4, xmm2
-
- paddw xmm3, xmm4
- pmullw xmm7, xmm5
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- movdqa xmm4, xmm3
-
- pmullw xmm3, xmm6
- paddw xmm3, xmm7
-
- movdqa xmm7, xmm4
-
- paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value
- psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128
-
- packuswb xmm3, xmm0
- movq [rdi], xmm3 ; store the results in the destination
-
- add rsp, 16 ; next line
- add rdi, rdx
-
- cmp rdi, rcx
- jne .next_row8x8
-
- ;add rsp, 144
- pop rsp
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_GOT
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-
-
SECTION_RODATA
align 16
rd:
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c
index de836f19df4..7fb83c2d5e2 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/common/x86/vp8_asm_stubs.c
@@ -11,7 +11,6 @@
#include "vpx_config.h"
#include "vp8_rtcd.h"
#include "vpx_ports/mem.h"
-#include "filter_x86.h"
extern const short vp8_six_tap_x86[8][6 * 8];
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decodeframe.c b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decodeframe.c
index 82b72d21edc..650d1d0408d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decodeframe.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decodeframe.c
@@ -1220,7 +1220,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd) &&
pc->multi_token_partition != ONE_PARTITION) {
unsigned int thread;
- vp8mt_decode_mb_rows(pbi, xd);
+ if (vp8mt_decode_mb_rows(pbi, xd)) {
+ vp8_decoder_remove_threads(pbi);
+ pbi->restart_threads = 1;
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, NULL);
+ }
vp8_yv12_extend_frame_borders(yv12_fb_new);
for (thread = 0; thread < pbi->decoding_thread_count; ++thread) {
corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decoderthreading.h b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decoderthreading.h
index 1402956259a..3d49bc83174 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decoderthreading.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/decoderthreading.h
@@ -16,7 +16,7 @@ extern "C" {
#endif
#if CONFIG_MULTITHREAD
-void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
+int vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
void vp8_decoder_remove_threads(VP8D_COMP *pbi);
void vp8_decoder_create_threads(VP8D_COMP *pbi);
void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_if.c b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_if.c
index f516eb0c78b..c6fb51d0cb5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_if.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_if.c
@@ -16,6 +16,7 @@
#include "onyxd_int.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/alloccommon.h"
+#include "vp8/common/common.h"
#include "vp8/common/loopfilter.h"
#include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h"
@@ -321,21 +322,6 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
pbi->dec_fb_ref[GOLDEN_FRAME] = &cm->yv12_fb[cm->gld_fb_idx];
pbi->dec_fb_ref[ALTREF_FRAME] = &cm->yv12_fb[cm->alt_fb_idx];
- if (setjmp(pbi->common.error.jmp)) {
- /* We do not know if the missing frame(s) was supposed to update
- * any of the reference buffers, but we act conservative and
- * mark only the last buffer as corrupted.
- */
- cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
-
- if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) {
- cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
- }
- goto decode_exit;
- }
-
- pbi->common.error.setjmp = 1;
-
retcode = vp8_decode_frame(pbi);
if (retcode < 0) {
@@ -344,6 +330,12 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
}
pbi->common.error.error_code = VPX_CODEC_ERROR;
+ // Propagate the error info.
+ if (pbi->mb.error_info.error_code != 0) {
+ pbi->common.error.error_code = pbi->mb.error_info.error_code;
+ memcpy(pbi->common.error.detail, pbi->mb.error_info.detail,
+ sizeof(pbi->mb.error_info.detail));
+ }
goto decode_exit;
}
@@ -382,7 +374,6 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
pbi->last_time_stamp = time_stamp;
decode_exit:
- pbi->common.error.setjmp = 0;
vpx_clear_system_state();
return retcode;
}
@@ -445,7 +436,7 @@ int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf) {
#if CONFIG_MULTITHREAD
if (setjmp(fb->pbi[0]->common.error.jmp)) {
vp8_remove_decoder_instances(fb);
- memset(fb->pbi, 0, sizeof(fb->pbi));
+ vp8_zero(fb->pbi);
vpx_clear_system_state();
return VPX_CODEC_ERROR;
}
@@ -471,6 +462,6 @@ int vp8_remove_decoder_instances(struct frame_buffers *fb) {
return VPX_CODEC_OK;
}
-int vp8dx_get_quantizer(const VP8D_COMP *cpi) {
- return cpi->common.base_qindex;
+int vp8dx_get_quantizer(const VP8D_COMP *pbi) {
+ return pbi->common.base_qindex;
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_int.h b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_int.h
index bdf990c6d8f..75286e43071 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_int.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/onyxd_int.h
@@ -118,11 +118,17 @@ typedef struct VP8D_COMP {
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
+#if CONFIG_MULTITHREAD
+ // Restart threads on next frame if set to 1.
+ // This is set when error happens in multithreaded decoding and all threads
+ // are shut down.
+ int restart_threads;
+#endif
} VP8D_COMP;
void vp8cx_init_de_quantizer(VP8D_COMP *pbi);
void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
-int vp8_decode_frame(VP8D_COMP *cpi);
+int vp8_decode_frame(VP8D_COMP *pbi);
int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf);
int vp8_remove_decoder_instances(struct frame_buffers *fb);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/threading.c b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/threading.c
index aadc8dc712f..561922de329 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/threading.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/threading.c
@@ -15,8 +15,8 @@
#endif
#include "onyxd_int.h"
#include "vpx_mem/vpx_mem.h"
+#include "vp8/common/common.h"
#include "vp8/common/threading.h"
-
#include "vp8/common/loopfilter.h"
#include "vp8/common/extend.h"
#include "vpx_ports/vpx_timer.h"
@@ -400,16 +400,32 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;
- xd->pre.y_buffer =
- ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset;
- xd->pre.u_buffer =
- ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset;
- xd->pre.v_buffer =
- ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset;
-
/* propagate errors from reference frames */
xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
+ if (xd->corrupted) {
+ // Move current decoding marcoblock to the end of row for all rows
+ // assigned to this thread, such that other threads won't be waiting.
+ for (; mb_row < pc->mb_rows;
+ mb_row += (pbi->decoding_thread_count + 1)) {
+ current_mb_col = &pbi->mt_current_mb_col[mb_row];
+ vpx_atomic_store_release(current_mb_col, pc->mb_cols + nsync);
+ }
+ vpx_internal_error(&xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Corrupted reference frame");
+ }
+
+ if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) {
+ const MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame;
+ xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset;
+ xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset;
+ xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset;
+ } else {
+ // ref_frame is INTRA_FRAME, pre buffer should not be used.
+ xd->pre.y_buffer = 0;
+ xd->pre.u_buffer = 0;
+ xd->pre.v_buffer = 0;
+ }
mt_decode_macroblock(pbi, xd, 0);
xd->left_available = 1;
@@ -557,8 +573,9 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count;
}
- /* signal end of frame decoding if this thread processed the last mb_row */
- if (last_mb_row == (pc->mb_rows - 1)) sem_post(&pbi->h_event_end_decoding);
+ /* signal end of decoding of current thread for current frame */
+ if (last_mb_row + (int)pbi->decoding_thread_count + 1 >= pc->mb_rows)
+ sem_post(&pbi->h_event_end_decoding);
}
static THREAD_FUNCTION thread_decoding_proc(void *p_data) {
@@ -576,7 +593,13 @@ static THREAD_FUNCTION thread_decoding_proc(void *p_data) {
} else {
MACROBLOCKD *xd = &mbrd->mbd;
xd->left_context = &mb_row_left_context;
-
+ if (setjmp(xd->error_info.jmp)) {
+ xd->error_info.setjmp = 0;
+ // Signal the end of decoding for current thread.
+ sem_post(&pbi->h_event_end_decoding);
+ continue;
+ }
+ xd->error_info.setjmp = 1;
mt_decode_mb_rows(pbi, xd, ithread + 1);
}
}
@@ -738,22 +761,28 @@ void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) {
/* Allocate memory for above_row buffers. */
CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows);
- for (i = 0; i < pc->mb_rows; ++i)
+ for (i = 0; i < pc->mb_rows; ++i) {
CHECK_MEM_ERROR(pbi->mt_yabove_row[i],
vpx_memalign(16, sizeof(unsigned char) *
(width + (VP8BORDERINPIXELS << 1))));
+ vp8_zero_array(pbi->mt_yabove_row[i], width + (VP8BORDERINPIXELS << 1));
+ }
CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows);
- for (i = 0; i < pc->mb_rows; ++i)
+ for (i = 0; i < pc->mb_rows; ++i) {
CHECK_MEM_ERROR(pbi->mt_uabove_row[i],
vpx_memalign(16, sizeof(unsigned char) *
(uv_width + VP8BORDERINPIXELS)));
+ vp8_zero_array(pbi->mt_uabove_row[i], uv_width + VP8BORDERINPIXELS);
+ }
CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows);
- for (i = 0; i < pc->mb_rows; ++i)
+ for (i = 0; i < pc->mb_rows; ++i) {
CHECK_MEM_ERROR(pbi->mt_vabove_row[i],
vpx_memalign(16, sizeof(unsigned char) *
(uv_width + VP8BORDERINPIXELS)));
+ vp8_zero_array(pbi->mt_vabove_row[i], uv_width + VP8BORDERINPIXELS);
+ }
/* Allocate memory for left_col buffers. */
CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows);
@@ -809,7 +838,7 @@ void vp8_decoder_remove_threads(VP8D_COMP *pbi) {
}
}
-void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
+int vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
VP8_COMMON *pc = &pbi->common;
unsigned int i;
int j;
@@ -855,7 +884,22 @@ void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
sem_post(&pbi->h_event_start_decoding[i]);
}
+ if (setjmp(xd->error_info.jmp)) {
+ xd->error_info.setjmp = 0;
+ xd->corrupted = 1;
+ // Wait for other threads to finish. This prevents other threads decoding
+ // the current frame while the main thread starts decoding the next frame,
+ // which causes a data race.
+ for (i = 0; i < pbi->decoding_thread_count; ++i)
+ sem_wait(&pbi->h_event_end_decoding);
+ return -1;
+ }
+
+ xd->error_info.setjmp = 1;
mt_decode_mb_rows(pbi, xd, 0);
- sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */
+ for (i = 0; i < pbi->decoding_thread_count + 1; ++i)
+ sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */
+
+ return 0;
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/treereader.h b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/treereader.h
index 1e27f29fd05..4bf938a741d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/decoder/treereader.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/decoder/treereader.h
@@ -30,7 +30,7 @@ typedef BOOL_DECODER vp8_reader;
static INLINE int vp8_treed_read(
vp8_reader *const r, /* !!! must return a 0 or 1 !!! */
vp8_tree t, const vp8_prob *const p) {
- register vp8_tree_index i = 0;
+ vp8_tree_index i = 0;
while ((i = t[i + vp8_read(r, p[i >> 1])]) > 0) {
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c
index c42005df6c8..d066be1a7a1 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c
@@ -26,9 +26,11 @@ void vp8_fast_quantize_b_neon(BLOCK *b, BLOCKD *d) {
zig_zag1 = vld1q_u16(inv_zig_zag + 8);
int16x8_t x0, x1, sz0, sz1, y0, y1;
uint16x8_t eob0, eob1;
+#ifndef __aarch64__
uint16x4_t eob_d16;
uint32x2_t eob_d32;
uint32x4_t eob_q32;
+#endif // __arch64__
/* sign of z: z >> 15 */
sz0 = vshrq_n_s16(z0, 15);
@@ -66,11 +68,17 @@ void vp8_fast_quantize_b_neon(BLOCK *b, BLOCKD *d) {
/* select the largest value */
eob0 = vmaxq_u16(eob0, eob1);
+#ifdef __aarch64__
+ *d->eob = (int8_t)vmaxvq_u16(eob0);
+#else
eob_d16 = vmax_u16(vget_low_u16(eob0), vget_high_u16(eob0));
eob_q32 = vmovl_u16(eob_d16);
eob_d32 = vmax_u32(vget_low_u32(eob_q32), vget_high_u32(eob_q32));
eob_d32 = vpmax_u32(eob_d32, eob_d32);
+ vst1_lane_s8((int8_t *)d->eob, vreinterpret_s8_u32(eob_d32), 0);
+#endif // __aarch64__
+
/* qcoeff = x */
vst1q_s16(d->qcoeff, x0);
vst1q_s16(d->qcoeff + 8, x1);
@@ -78,6 +86,4 @@ void vp8_fast_quantize_b_neon(BLOCK *b, BLOCKD *d) {
/* dqcoeff = x * dequant */
vst1q_s16(d->dqcoeff, vmulq_s16(dequant0, x0));
vst1q_s16(d->dqcoeff + 8, vmulq_s16(dequant1, x1));
-
- vst1_lane_s8((int8_t *)d->eob, vreinterpret_s8_u32(eob_d32), 0);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/bitstream.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/bitstream.c
index 8cacb645055..8dd04277556 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/bitstream.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/bitstream.c
@@ -41,13 +41,6 @@ const int vp8cx_base_skip_false_prob[128] = {
unsigned __int64 Sectionbits[500];
#endif
-#ifdef VP8_ENTROPY_STATS
-int intra_mode_stats[10][10][10];
-static unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
- [PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-extern unsigned int active_section;
-#endif
-
#ifdef MODE_STATS
int count_mb_seg[4] = { 0, 0, 0, 0 };
#endif
@@ -428,10 +421,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
vp8_convert_rfct_to_prob(cpi);
-#ifdef VP8_ENTROPY_STATS
- active_section = 1;
-#endif
-
if (pc->mb_no_coeff_skip) {
int total_mbs = pc->mb_rows * pc->mb_cols;
@@ -472,10 +461,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
-#ifdef VP8_ENTROPY_STATS
- active_section = 9;
-#endif
-
if (cpi->mb.e_mbd.update_mb_segmentation_map) {
write_mb_features(w, mi, &cpi->mb.e_mbd);
}
@@ -486,9 +471,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
if (rf == INTRA_FRAME) {
vp8_write(w, 0, cpi->prob_intra_coded);
-#ifdef VP8_ENTROPY_STATS
- active_section = 6;
-#endif
write_ymode(w, mode, pc->fc.ymode_prob);
if (mode == B_PRED) {
@@ -522,28 +504,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
vp8_clamp_mv2(&best_mv, xd);
vp8_mv_ref_probs(mv_ref_p, ct);
-
-#ifdef VP8_ENTROPY_STATS
- accum_mv_refs(mode, ct);
-#endif
}
-#ifdef VP8_ENTROPY_STATS
- active_section = 3;
-#endif
-
write_mv_ref(w, mode, mv_ref_p);
switch (mode) /* new, split require MVs */
{
- case NEWMV:
-
-#ifdef VP8_ENTROPY_STATS
- active_section = 5;
-#endif
-
- write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
- break;
+ case NEWMV: write_mv(w, &mi->mv.as_mv, &best_mv, mvc); break;
case SPLITMV: {
int j = 0;
@@ -574,9 +541,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2[mv_contz]);
if (blockmode == NEW4X4) {
-#ifdef VP8_ENTROPY_STATS
- active_section = 11;
-#endif
write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *)mvc);
}
} while (++j < cpi->mb.partition_info->count);
@@ -642,10 +606,6 @@ static void write_kfmodes(VP8_COMP *cpi) {
const B_PREDICTION_MODE L = left_block_mode(m, i);
const int bm = m->bmi[i].as_mode;
-#ifdef VP8_ENTROPY_STATS
- ++intra_mode_stats[A][L][bm];
-#endif
-
write_bmode(bc, bm, vp8_kf_bmode_prob[A][L]);
} while (++i < 16);
}
@@ -973,10 +933,6 @@ void vp8_update_coef_probs(VP8_COMP *cpi) {
vp8_write(w, u, upd);
#endif
-#ifdef VP8_ENTROPY_STATS
- ++tree_update_hist[i][j][k][t][u];
-#endif
-
if (u) {
/* send/use new probability */
@@ -990,16 +946,6 @@ void vp8_update_coef_probs(VP8_COMP *cpi) {
} while (++t < ENTROPY_NODES);
-/* Accum token counts for generation of default statistics */
-#ifdef VP8_ENTROPY_STATS
- t = 0;
-
- do {
- context_counters[i][j][k][t] += cpi->coef_counts[i][j][k][t];
- } while (++t < MAX_ENTROPY_TOKENS);
-
-#endif
-
} while (++k < PREV_COEF_CONTEXTS);
} while (++j < COEF_BANDS);
} while (++i < BLOCK_TYPES);
@@ -1286,15 +1232,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
if (pc->frame_type != KEY_FRAME) vp8_write_bit(bc, pc->refresh_last_frame);
-#ifdef VP8_ENTROPY_STATS
-
- if (pc->frame_type == INTER_FRAME)
- active_section = 0;
- else
- active_section = 7;
-
-#endif
-
vpx_clear_system_state();
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
@@ -1308,25 +1245,13 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
vp8_update_coef_probs(cpi);
#endif
-#ifdef VP8_ENTROPY_STATS
- active_section = 2;
-#endif
-
/* Write out the mb_no_coeff_skip flag */
vp8_write_bit(bc, pc->mb_no_coeff_skip);
if (pc->frame_type == KEY_FRAME) {
write_kfmodes(cpi);
-
-#ifdef VP8_ENTROPY_STATS
- active_section = 8;
-#endif
} else {
pack_inter_mode_mvs(cpi);
-
-#ifdef VP8_ENTROPY_STATS
- active_section = 1;
-#endif
}
vp8_stop_encode(bc);
@@ -1431,50 +1356,3 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
}
#endif
}
-
-#ifdef VP8_ENTROPY_STATS
-void print_tree_update_probs() {
- int i, j, k, l;
- FILE *f = fopen("context.c", "a");
- int Sum;
- fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
- fprintf(f,
- "const vp8_prob tree_update_probs[BLOCK_TYPES] [COEF_BANDS] "
- "[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
-
- for (i = 0; i < BLOCK_TYPES; ++i) {
- fprintf(f, " { \n");
-
- for (j = 0; j < COEF_BANDS; ++j) {
- fprintf(f, " {\n");
-
- for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
- fprintf(f, " {");
-
- for (l = 0; l < ENTROPY_NODES; ++l) {
- Sum =
- tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1];
-
- if (Sum > 0) {
- if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0)
- fprintf(f, "%3ld, ",
- (tree_update_hist[i][j][k][l][0] * 255) / Sum);
- else
- fprintf(f, "%3ld, ", 1);
- } else
- fprintf(f, "%3ld, ", 128);
- }
-
- fprintf(f, "},\n");
- }
-
- fprintf(f, " },\n");
- }
-
- fprintf(f, " },\n");
- }
-
- fprintf(f, "};\n");
- fclose(f);
-}
-#endif
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.c
index 04f8db93311..819c2f22a0f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.c
@@ -15,10 +15,6 @@ unsigned __int64 Sectionbits[500];
#endif
-#ifdef VP8_ENTROPY_STATS
-unsigned int active_section = 0;
-#endif
-
const unsigned int vp8_prob_cost[256] = {
2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129,
1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858,
@@ -42,26 +38,26 @@ const unsigned int vp8_prob_cost[256] = {
12, 10, 9, 7, 6, 4, 3, 1, 1
};
-void vp8_start_encode(BOOL_CODER *br, unsigned char *source,
+void vp8_start_encode(BOOL_CODER *bc, unsigned char *source,
unsigned char *source_end) {
- br->lowvalue = 0;
- br->range = 255;
- br->count = -24;
- br->buffer = source;
- br->buffer_end = source_end;
- br->pos = 0;
+ bc->lowvalue = 0;
+ bc->range = 255;
+ bc->count = -24;
+ bc->buffer = source;
+ bc->buffer_end = source_end;
+ bc->pos = 0;
}
-void vp8_stop_encode(BOOL_CODER *br) {
+void vp8_stop_encode(BOOL_CODER *bc) {
int i;
- for (i = 0; i < 32; ++i) vp8_encode_bool(br, 0, 128);
+ for (i = 0; i < 32; ++i) vp8_encode_bool(bc, 0, 128);
}
-void vp8_encode_value(BOOL_CODER *br, int data, int bits) {
+void vp8_encode_value(BOOL_CODER *bc, int data, int bits) {
int bit;
for (bit = bits - 1; bit >= 0; bit--) {
- vp8_encode_bool(br, (1 & (data >> bit)), 0x80);
+ vp8_encode_bool(bc, (1 & (data >> bit)), 0x80);
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.h b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.h
index 2cf62def135..8ac0a2cc4a4 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/boolhuff.h
@@ -35,11 +35,11 @@ typedef struct {
struct vpx_internal_error_info *error;
} BOOL_CODER;
-extern void vp8_start_encode(BOOL_CODER *bc, unsigned char *buffer,
- unsigned char *buffer_end);
+void vp8_start_encode(BOOL_CODER *bc, unsigned char *source,
+ unsigned char *source_end);
-extern void vp8_encode_value(BOOL_CODER *br, int data, int bits);
-extern void vp8_stop_encode(BOOL_CODER *bc);
+void vp8_encode_value(BOOL_CODER *bc, int data, int bits);
+void vp8_stop_encode(BOOL_CODER *bc);
extern const unsigned int vp8_prob_cost[256];
DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
@@ -56,31 +56,20 @@ static int validate_buffer(const unsigned char *start, size_t len,
return 0;
}
-static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
+static void vp8_encode_bool(BOOL_CODER *bc, int bit, int probability) {
unsigned int split;
- int count = br->count;
- unsigned int range = br->range;
- unsigned int lowvalue = br->lowvalue;
+ int count = bc->count;
+ unsigned int range = bc->range;
+ unsigned int lowvalue = bc->lowvalue;
int shift;
-#ifdef VP8_ENTROPY_STATS
-#if defined(SECTIONBITS_OUTPUT)
-
- if (bit)
- Sectionbits[active_section] += vp8_prob_cost[255 - probability];
- else
- Sectionbits[active_section] += vp8_prob_cost[probability];
-
-#endif
-#endif
-
split = 1 + (((range - 1) * probability) >> 8);
range = split;
if (bit) {
lowvalue += split;
- range = br->range - split;
+ range = bc->range - split;
}
shift = vp8_norm[range];
@@ -92,18 +81,18 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000) {
- int x = br->pos - 1;
+ int x = bc->pos - 1;
- while (x >= 0 && br->buffer[x] == 0xff) {
- br->buffer[x] = (unsigned char)0;
+ while (x >= 0 && bc->buffer[x] == 0xff) {
+ bc->buffer[x] = (unsigned char)0;
x--;
}
- br->buffer[x] += 1;
+ bc->buffer[x] += 1;
}
- validate_buffer(br->buffer + br->pos, 1, br->buffer_end, br->error);
- br->buffer[br->pos++] = (lowvalue >> (24 - offset));
+ validate_buffer(bc->buffer + bc->pos, 1, bc->buffer_end, bc->error);
+ bc->buffer[bc->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
@@ -112,9 +101,9 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
}
lowvalue <<= shift;
- br->count = count;
- br->lowvalue = lowvalue;
- br->range = range;
+ bc->count = count;
+ bc->lowvalue = lowvalue;
+ bc->range = range;
}
#ifdef __cplusplus
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/denoising.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/denoising.c
index eb963b97e36..e54d1e9f4bd 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/denoising.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/denoising.c
@@ -213,13 +213,12 @@ int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
return FILTER_BLOCK;
}
-int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
- int mc_avg_uv_stride,
- unsigned char *running_avg_uv, int avg_uv_stride,
+int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg, int mc_avg_stride,
+ unsigned char *running_avg, int avg_stride,
unsigned char *sig, int sig_stride,
unsigned int motion_magnitude,
int increase_denoising) {
- unsigned char *running_avg_uv_start = running_avg_uv;
+ unsigned char *running_avg_start = running_avg;
unsigned char *sig_start = sig;
int sum_diff_thresh;
int r, c;
@@ -259,13 +258,13 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
int adjustment = 0;
int absdiff = 0;
- diff = mc_running_avg_uv[c] - sig[c];
+ diff = mc_running_avg[c] - sig[c];
absdiff = abs(diff);
// When |diff| <= |3 + shift_inc1|, use pixel value from
// last denoised raw.
if (absdiff <= 3 + shift_inc1) {
- running_avg_uv[c] = mc_running_avg_uv[c];
+ running_avg[c] = mc_running_avg[c];
sum_diff += diff;
} else {
if (absdiff >= 4 && absdiff <= 7) {
@@ -277,16 +276,16 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
}
if (diff > 0) {
if ((sig[c] + adjustment) > 255) {
- running_avg_uv[c] = 255;
+ running_avg[c] = 255;
} else {
- running_avg_uv[c] = sig[c] + adjustment;
+ running_avg[c] = sig[c] + adjustment;
}
sum_diff += adjustment;
} else {
if ((sig[c] - adjustment) < 0) {
- running_avg_uv[c] = 0;
+ running_avg[c] = 0;
} else {
- running_avg_uv[c] = sig[c] - adjustment;
+ running_avg[c] = sig[c] - adjustment;
}
sum_diff -= adjustment;
}
@@ -294,8 +293,8 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
}
/* Update pointers for next iteration. */
sig += sig_stride;
- mc_running_avg_uv += mc_avg_uv_stride;
- running_avg_uv += avg_uv_stride;
+ mc_running_avg += mc_avg_stride;
+ running_avg += avg_stride;
}
sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
@@ -314,27 +313,27 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
sig -= sig_stride * 8;
- mc_running_avg_uv -= mc_avg_uv_stride * 8;
- running_avg_uv -= avg_uv_stride * 8;
+ mc_running_avg -= mc_avg_stride * 8;
+ running_avg -= avg_stride * 8;
for (r = 0; r < 8; ++r) {
for (c = 0; c < 8; ++c) {
- int diff = mc_running_avg_uv[c] - sig[c];
+ int diff = mc_running_avg[c] - sig[c];
int adjustment = abs(diff);
if (adjustment > delta) adjustment = delta;
if (diff > 0) {
// Bring denoised signal down.
- if (running_avg_uv[c] - adjustment < 0) {
- running_avg_uv[c] = 0;
+ if (running_avg[c] - adjustment < 0) {
+ running_avg[c] = 0;
} else {
- running_avg_uv[c] = running_avg_uv[c] - adjustment;
+ running_avg[c] = running_avg[c] - adjustment;
}
sum_diff -= adjustment;
} else if (diff < 0) {
// Bring denoised signal up.
- if (running_avg_uv[c] + adjustment > 255) {
- running_avg_uv[c] = 255;
+ if (running_avg[c] + adjustment > 255) {
+ running_avg[c] = 255;
} else {
- running_avg_uv[c] = running_avg_uv[c] + adjustment;
+ running_avg[c] = running_avg[c] + adjustment;
}
sum_diff += adjustment;
}
@@ -342,8 +341,8 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
// TODO(marpan): Check here if abs(sum_diff) has gone below the
// threshold sum_diff_thresh, and if so, we can exit the row loop.
sig += sig_stride;
- mc_running_avg_uv += mc_avg_uv_stride;
- running_avg_uv += avg_uv_stride;
+ mc_running_avg += mc_avg_stride;
+ running_avg += avg_stride;
}
if (abs(sum_diff) > sum_diff_thresh) return COPY_BLOCK;
} else {
@@ -351,7 +350,7 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
}
}
- vp8_copy_mem8x8(running_avg_uv_start, avg_uv_stride, sig_start, sig_stride);
+ vp8_copy_mem8x8(running_avg_start, avg_stride, sig_start, sig_stride);
return FILTER_BLOCK;
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/encodemv.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/encodemv.c
index ea93ccd7105..04adf105b9b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/encodemv.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/encodemv.c
@@ -16,10 +16,6 @@
#include <math.h>
-#ifdef VP8_ENTROPY_STATS
-extern unsigned int active_section;
-#endif
-
static void encode_mvcomponent(vp8_writer *const w, const int v,
const struct mv_context *mvc) {
const vp8_prob *p = mvc->prob;
@@ -309,9 +305,6 @@ void vp8_write_mvprobs(VP8_COMP *cpi) {
vp8_writer *const w = cpi->bc;
MV_CONTEXT *mvc = cpi->common.fc.mvc;
int flags[2] = { 0, 0 };
-#ifdef VP8_ENTROPY_STATS
- active_section = 4;
-#endif
write_component_probs(w, &mvc[0], &vp8_default_mv_context[0],
&vp8_mv_update_probs[0], cpi->mb.MVcount[0], 0,
&flags[0]);
@@ -323,8 +316,4 @@ void vp8_write_mvprobs(VP8_COMP *cpi) {
vp8_build_component_cost_table(
cpi->mb.mvcost, (const MV_CONTEXT *)cpi->common.fc.mvc, flags);
}
-
-#ifdef VP8_ENTROPY_STATS
- active_section = 5;
-#endif
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.c
index b4a49a3b1fb..1c3612fc300 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.c
@@ -21,11 +21,6 @@
#include "vp8/common/common.h"
#include "vpx_dsp/vpx_dsp_common.h"
-#ifdef VP8_ENTROPY_STATS
-static int mv_ref_ct[31][4][2];
-static int mv_mode_cts[4][2];
-#endif
-
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
/* MV costing is based on the distribution of vectors in the previous
* frame and as such will tend to over state the cost of vectors. In
@@ -1821,96 +1816,3 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
}
-
-#ifdef VP8_ENTROPY_STATS
-void print_mode_context(void) {
- FILE *f = fopen("modecont.c", "w");
- int i, j;
-
- fprintf(f, "#include \"entropy.h\"\n");
- fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
- fprintf(f, "{\n");
-
- for (j = 0; j < 6; ++j) {
- fprintf(f, " { /* %d */\n", j);
- fprintf(f, " ");
-
- for (i = 0; i < 4; ++i) {
- int overal_prob;
- int this_prob;
- int count;
-
- /* Overall probs */
- count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
-
- if (count)
- overal_prob = 256 * mv_mode_cts[i][0] / count;
- else
- overal_prob = 128;
-
- if (overal_prob == 0) overal_prob = 1;
-
- /* context probs */
- count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
-
- if (count)
- this_prob = 256 * mv_ref_ct[j][i][0] / count;
- else
- this_prob = 128;
-
- if (this_prob == 0) this_prob = 1;
-
- fprintf(f, "%5d, ", this_prob);
- }
-
- fprintf(f, " },\n");
- }
-
- fprintf(f, "};\n");
- fclose(f);
-}
-
-/* MV ref count VP8_ENTROPY_STATS stats code */
-#ifdef VP8_ENTROPY_STATS
-void init_mv_ref_counts() {
- memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
- memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
-}
-
-void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4]) {
- if (m == ZEROMV) {
- ++mv_ref_ct[ct[0]][0][0];
- ++mv_mode_cts[0][0];
- } else {
- ++mv_ref_ct[ct[0]][0][1];
- ++mv_mode_cts[0][1];
-
- if (m == NEARESTMV) {
- ++mv_ref_ct[ct[1]][1][0];
- ++mv_mode_cts[1][0];
- } else {
- ++mv_ref_ct[ct[1]][1][1];
- ++mv_mode_cts[1][1];
-
- if (m == NEARMV) {
- ++mv_ref_ct[ct[2]][2][0];
- ++mv_mode_cts[2][0];
- } else {
- ++mv_ref_ct[ct[2]][2][1];
- ++mv_mode_cts[2][1];
-
- if (m == NEWMV) {
- ++mv_ref_ct[ct[3]][3][0];
- ++mv_mode_cts[3][0];
- } else {
- ++mv_ref_ct[ct[3]][3][1];
- ++mv_mode_cts[3][1];
- }
- }
- }
- }
-}
-
-#endif /* END MV ref count VP8_ENTROPY_STATS stats code */
-
-#endif
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.h b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.h
index 490b0b87252..6c77995da4d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/mcomp.h
@@ -18,11 +18,6 @@
extern "C" {
#endif
-#ifdef VP8_ENTROPY_STATS
-extern void init_mv_ref_counts();
-extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
-#endif
-
/* The maximum number of steps in a step search given the largest allowed
* initial step
*/
@@ -34,15 +29,14 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
/* Maximum size of the first step in full pel units */
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
-extern void print_mode_context(void);
-extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
-extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
-extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
+void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
+void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
-extern int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
- int_mv *best_mv, int search_param, int error_per_bit,
- const vp8_variance_fn_ptr_t *vf, int *mvsadcost[2],
- int *mvcost[2], int_mv *center_mv);
+int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+ int_mv *best_mv, int search_param, int sad_per_bit,
+ const vp8_variance_fn_ptr_t *vfp, int *mvsadcost[2],
+ int *mvcost[2], int_mv *center_mv);
typedef int(fractional_mv_step_fp)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
@@ -51,10 +45,10 @@ typedef int(fractional_mv_step_fp)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int *mvcost[2], int *distortion,
unsigned int *sse);
-extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
-extern fractional_mv_step_fp vp8_find_best_sub_pixel_step;
-extern fractional_mv_step_fp vp8_find_best_half_pixel_step;
-extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
+fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
+fractional_mv_step_fp vp8_find_best_sub_pixel_step;
+fractional_mv_step_fp vp8_find_best_half_pixel_step;
+fractional_mv_step_fp vp8_skip_fractional_mv_step;
typedef int (*vp8_full_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int sad_per_bit,
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/modecosts.h b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/modecosts.h
index 422a79b3608..09ee2b5520b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/modecosts.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/modecosts.h
@@ -17,7 +17,7 @@ extern "C" {
struct VP8_COMP;
-void vp8_init_mode_costs(struct VP8_COMP *x);
+void vp8_init_mode_costs(struct VP8_COMP *c);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/onyx_if.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/onyx_if.c
index 2da940199cf..adc25024cfc 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/onyx_if.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/onyx_if.c
@@ -65,9 +65,7 @@ extern int vp8_update_coef_context(VP8_COMP *cpi);
extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *post, int filt_lvl,
int low_var_thresh, int flag);
-extern void print_parms(VP8_CONFIG *ocf, char *filenam);
extern unsigned int vp8_get_processor_freq();
-extern void print_tree_update_probs();
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
@@ -101,10 +99,6 @@ extern int skip_true_count;
extern int skip_false_count;
#endif
-#ifdef VP8_ENTROPY_STATS
-extern int intra_mode_stats[10][10][10];
-#endif
-
#ifdef SPEEDSTATS
unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 };
@@ -1893,10 +1887,6 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
-#ifdef VP8_ENTROPY_STATS
- init_context_counters();
-#endif
-
/*Initialize the feed-forward activity masking.*/
cpi->activity_avg = 90 << 12;
@@ -2005,10 +1995,6 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->mb.rd_thresh_mult[i] = 128;
}
-#ifdef VP8_ENTROPY_STATS
- init_mv_ref_counts();
-#endif
-
#if CONFIG_MULTITHREAD
if (vp8cx_create_encoder_threads(cpi)) {
vp8_remove_compressor(&cpi);
@@ -2106,8 +2092,8 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
return cpi;
}
-void vp8_remove_compressor(VP8_COMP **ptr) {
- VP8_COMP *cpi = *ptr;
+void vp8_remove_compressor(VP8_COMP **comp) {
+ VP8_COMP *cpi = *comp;
if (!cpi) return;
@@ -2120,12 +2106,6 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
#endif
-#ifdef VP8_ENTROPY_STATS
- print_context_counters();
- print_tree_update_probs();
- print_mode_context();
-#endif
-
#if CONFIG_INTERNAL_STATS
if (cpi->pass != 1) {
@@ -2252,40 +2232,6 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
}
#endif
-#ifdef VP8_ENTROPY_STATS
- {
- int i, j, k;
- FILE *fmode = fopen("modecontext.c", "w");
-
- fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
- fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
- fprintf(fmode,
- "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
-
- for (i = 0; i < 10; ++i) {
- fprintf(fmode, " { /* Above Mode : %d */\n", i);
-
- for (j = 0; j < 10; ++j) {
- fprintf(fmode, " {");
-
- for (k = 0; k < 10; ++k) {
- if (!intra_mode_stats[i][j][k])
- fprintf(fmode, " %5d, ", 1);
- else
- fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
- }
-
- fprintf(fmode, "}, /* left_mode %d */\n", j);
- }
-
- fprintf(fmode, " },\n");
- }
-
- fprintf(fmode, "};\n");
- fclose(fmode);
- }
-#endif
-
#if defined(SECTIONBITS_OUTPUT)
if (0) {
@@ -2326,7 +2272,7 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
vp8_remove_common(&cpi->common);
vpx_free(cpi);
- *ptr = 0;
+ *comp = 0;
#ifdef OUTPUT_YUV_SRC
fclose(yuv_file);
@@ -4867,14 +4813,6 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
cm = &cpi->common;
- if (setjmp(cpi->common.error.jmp)) {
- cpi->common.error.setjmp = 0;
- vpx_clear_system_state();
- return VPX_CODEC_CORRUPT_FRAME;
- }
-
- cpi->common.error.setjmp = 1;
-
vpx_usec_timer_start(&cmptimer);
cpi->source = NULL;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/pickinter.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/pickinter.c
index 1bb54fc2b14..6bb3cacc5ff 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/pickinter.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/pickinter.c
@@ -173,9 +173,8 @@ static int get_prediction_error(BLOCK *be, BLOCKD *b) {
static int pick_intra4x4block(MACROBLOCK *x, int ib,
B_PREDICTION_MODE *best_mode,
- const int *mode_costs,
-
- int *bestrate, int *bestdistortion) {
+ const int *mode_costs, int *bestrate,
+ int *bestdistortion) {
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
int dst_stride = x->e_mbd.dst.y_stride;
@@ -1303,9 +1302,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
update_mvcount(x, &best_ref_mv);
}
-void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
+void vp8_pick_intra_mode(MACROBLOCK *x, int *rate) {
int error4x4, error16x16 = INT_MAX;
- int rate, best_rate = 0, distortion, best_sse;
+ int rate_, best_rate = 0, distortion, best_sse;
MB_PREDICTION_MODE mode, best_mode = DC_PRED;
int this_rd;
unsigned int sse;
@@ -1323,23 +1322,23 @@ void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
xd->predictor, 16);
distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
16, &sse);
- rate = x->mbmode_cost[xd->frame_type][mode];
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ rate_ = x->mbmode_cost[xd->frame_type][mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate_, distortion);
if (error16x16 > this_rd) {
error16x16 = this_rd;
best_mode = mode;
best_sse = sse;
- best_rate = rate;
+ best_rate = rate_;
}
}
xd->mode_info_context->mbmi.mode = best_mode;
- error4x4 = pick_intra4x4mby_modes(x, &rate, &best_sse);
+ error4x4 = pick_intra4x4mby_modes(x, &rate_, &best_sse);
if (error4x4 < error16x16) {
xd->mode_info_context->mbmi.mode = B_PRED;
- best_rate = rate;
+ best_rate = rate_;
}
- *rate_ = best_rate;
+ *rate = best_rate;
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/ratectrl.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/ratectrl.c
index fc833bccc94..ce07a6f197f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/ratectrl.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/ratectrl.c
@@ -1464,7 +1464,7 @@ int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q) {
(cpi->oxcf.screen_content_mode == 2 ||
(cpi->drop_frames_allowed &&
(force_drop_overshoot ||
- (cpi->rate_correction_factor < (4.0f * MIN_BPB_FACTOR) &&
+ (cpi->rate_correction_factor < (8.0f * MIN_BPB_FACTOR) &&
cpi->frames_since_last_drop_overshoot > (int)cpi->framerate))))) {
// Note: the "projected_frame_size" from encode_frame() only gives estimate
// of mode/motion vector rate (in non-rd mode): so below we only require
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.c
index b4182c5cd5c..679d66bbf23 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.c
@@ -2358,11 +2358,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
rd_update_mvcount(x, &best_ref_mv);
}
-void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_) {
+void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate) {
int error4x4, error16x16;
int rate4x4, rate16x16 = 0, rateuv;
int dist4x4, dist16x16, distuv;
- int rate;
+ int rate_;
int rate4x4_tokenonly = 0;
int rate16x16_tokenonly = 0;
int rateuv_tokenonly = 0;
@@ -2370,7 +2370,7 @@ void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_) {
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
rd_pick_intra_mbuv_mode(x, &rateuv, &rateuv_tokenonly, &distuv);
- rate = rateuv;
+ rate_ = rateuv;
error16x16 = rd_pick_intra16x16mby_mode(x, &rate16x16, &rate16x16_tokenonly,
&dist16x16);
@@ -2380,10 +2380,10 @@ void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_) {
if (error4x4 < error16x16) {
x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
- rate += rate4x4;
+ rate_ += rate4x4;
} else {
- rate += rate16x16;
+ rate_ += rate16x16;
}
- *rate_ = rate;
+ *rate = rate_;
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.h b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.h
index e22b58b8af2..cc3db8197c7 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/rdopt.h
@@ -63,12 +63,12 @@ static INLINE void insertsortsad(int arr[], int idx[], int len) {
}
}
-extern void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue);
-extern void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
- int recon_yoffset, int recon_uvoffset,
- int *returnrate, int *returndistortion,
- int *returnintra, int mb_row, int mb_col);
-extern void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate);
+void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue);
+void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
+ int recon_uvoffset, int *returnrate,
+ int *returndistortion, int *returnintra, int mb_row,
+ int mb_col);
+void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate);
static INLINE void get_plane_pointers(const YV12_BUFFER_CONFIG *fb,
unsigned char *plane[3],
@@ -110,9 +110,9 @@ static INLINE void get_reference_search_order(const VP8_COMP *cpi,
for (; i < 4; ++i) ref_frame_map[i] = -1;
}
-extern void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
- int_mv *mvp, int refframe, int *ref_frame_sign_bias,
- int *sr, int near_sadidx[]);
+void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
+ int_mv *mvp, int refframe, int *ref_frame_sign_bias, int *sr,
+ int near_sadidx[]);
void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x,
int recon_yoffset, int near_sadidx[]);
int VP8_UVSSE(MACROBLOCK *x);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.c b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.c
index ca5f0e3d892..c3d70266074 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.c
@@ -19,10 +19,6 @@
/* Global event counters used for accumulating statistics across several
compressions, then generating context.c = initial stats. */
-#ifdef VP8_ENTROPY_STATS
-_int64 context_counters[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
- [MAX_ENTROPY_TOKENS];
-#endif
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
void vp8_fix_contexts(MACROBLOCKD *x);
@@ -383,72 +379,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) {
tokenize1st_order_b(x, t, plane_type, cpi);
}
-#ifdef VP8_ENTROPY_STATS
-
-void init_context_counters(void) {
- memset(context_counters, 0, sizeof(context_counters));
-}
-
-void print_context_counters() {
- int type, band, pt, t;
-
- FILE *const f = fopen("context.c", "w");
-
- fprintf(f, "#include \"entropy.h\"\n");
-
- fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n");
-
- fprintf(f,
- "int Contexts[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] "
- "[MAX_ENTROPY_TOKENS];\n\n");
-
- fprintf(f,
- "const int default_contexts[BLOCK_TYPES] [COEF_BANDS] "
- "[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
-
-#define Comma(X) (X ? "," : "")
-
- type = 0;
-
- do {
- fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
-
- band = 0;
-
- do {
- fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
-
- pt = 0;
-
- do {
- fprintf(f, "%s\n {", Comma(pt));
-
- t = 0;
-
- do {
- const _int64 x = context_counters[type][band][pt][t];
- const int y = (int)x;
-
- assert(x == (_int64)y); /* no overflow handling yet */
- fprintf(f, "%s %d", Comma(t), y);
-
- } while (++t < MAX_ENTROPY_TOKENS);
-
- fprintf(f, "}");
- } while (++pt < PREV_COEF_CONTEXTS);
-
- fprintf(f, "\n }");
-
- } while (++band < COEF_BANDS);
-
- fprintf(f, "\n }");
- } while (++type < BLOCK_TYPES);
-
- fprintf(f, "\n};\n");
- fclose(f);
-}
-#endif
-
static void stuff2nd_order_b(TOKENEXTRA **tp, ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l, VP8_COMP *cpi, MACROBLOCK *x) {
int pt; /* near block/prev token context index */
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.h b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.h
index 46425ac8f9b..47b5be17f1f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/tokenize.h
@@ -34,14 +34,6 @@ typedef struct {
int rd_cost_mby(MACROBLOCKD *);
-#ifdef VP8_ENTROPY_STATS
-void init_context_counters();
-void print_context_counters();
-
-extern _int64 context_counters[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
- [MAX_ENTROPY_TOKENS];
-#endif
-
extern const short *const vp8_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/treewriter.h b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/treewriter.h
index 0d7b06e568d..c02683a58b7 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/encoder/treewriter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/encoder/treewriter.h
@@ -91,9 +91,9 @@ static INLINE int vp8_cost_token(vp8_tree t, const vp8_prob *const p,
/* Fill array of costs for all possible token values. */
-void vp8_cost_tokens(int *Costs, const vp8_prob *, vp8_tree);
+void vp8_cost_tokens(int *c, const vp8_prob *, vp8_tree);
-void vp8_cost_tokens2(int *Costs, const vp8_prob *, vp8_tree, int);
+void vp8_cost_tokens2(int *c, const vp8_prob *, vp8_tree, int);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/vp8_common.mk b/chromium/third_party/libvpx/source/libvpx/vp8/vp8_common.mk
index 246fe6a6772..9f106a2c384 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/vp8_common.mk
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/vp8_common.mk
@@ -70,8 +70,6 @@ VP8_COMMON_SRCS-yes += common/vp8_entropymodedata.h
VP8_COMMON_SRCS-yes += common/treecoder.c
-VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/filter_x86.c
-VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/filter_x86.h
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp8_asm_stubs.c
VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/loopfilter_x86.c
VP8_COMMON_SRCS-$(CONFIG_POSTPROC) += common/mfqe.c
@@ -86,6 +84,7 @@ VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/copy_sse2.asm
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_blk_sse2.c
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idctllm_sse2.asm
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/recon_sse2.asm
+VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/bilinear_filter_sse2.c
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/subpixel_sse2.asm
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/loopfilter_sse2.asm
VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/iwalsh_sse2.asm
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/vp8_cx_iface.c b/chromium/third_party/libvpx/source/libvpx/vp8/vp8_cx_iface.c
index b5f96a17e41..d01d2095f36 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/vp8_cx_iface.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/vp8_cx_iface.c
@@ -16,6 +16,7 @@
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_version.h"
#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/system_state.h"
#include "vpx_ports/vpx_once.h"
#include "vp8/encoder/onyx_int.h"
#include "vpx/vp8cx.h"
@@ -796,9 +797,11 @@ static vpx_codec_err_t set_reference_and_update(vpx_codec_alg_priv_t *ctx,
static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
const vpx_image_t *img, vpx_codec_pts_t pts,
unsigned long duration,
- vpx_enc_frame_flags_t flags,
+ vpx_enc_frame_flags_t enc_flags,
unsigned long deadline) {
- vpx_codec_err_t res = VPX_CODEC_OK;
+ volatile vpx_codec_err_t res = VPX_CODEC_OK;
+ // Make a copy as volatile to avoid -Wclobbered with longjmp.
+ volatile vpx_enc_frame_flags_t flags = enc_flags;
if (!ctx->cfg.rc_target_bitrate) {
#if CONFIG_MULTI_RES_ENCODING
@@ -840,6 +843,12 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
}
}
+ if (setjmp(ctx->cpi->common.error.jmp)) {
+ ctx->cpi->common.error.setjmp = 0;
+ vpx_clear_system_state();
+ return VPX_CODEC_CORRUPT_FRAME;
+ }
+
/* Initialize the encoder instance on the first frame*/
if (!res && ctx->cpi) {
unsigned int lib_flags;
@@ -886,6 +895,8 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
cx_data_end = ctx->cx_data + cx_data_sz;
lib_flags = 0;
+ ctx->cpi->common.error.setjmp = 1;
+
while (cx_data_sz >= ctx->cx_data_sz / 2) {
comp_data_state = vp8_get_compressed_data(
ctx->cpi, &lib_flags, &size, cx_data, cx_data_end, &dst_time_stamp,
@@ -1190,7 +1201,7 @@ static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = {
static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] = {
{ 0,
{
- 0, /* g_usage */
+ 0, /* g_usage (unused) */
0, /* g_threads */
0, /* g_profile */
diff --git a/chromium/third_party/libvpx/source/libvpx/vp8/vp8_dx_iface.c b/chromium/third_party/libvpx/source/libvpx/vp8/vp8_dx_iface.c
index a2008b90355..7db77195b24 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp8/vp8_dx_iface.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp8/vp8_dx_iface.c
@@ -45,6 +45,12 @@ struct vpx_codec_alg_priv {
vpx_codec_dec_cfg_t cfg;
vp8_stream_info_t si;
int decoder_init;
+#if CONFIG_MULTITHREAD
+ // Restart threads on next frame if set to 1.
+ // This is set when error happens in multithreaded decoding and all threads
+ // are shut down.
+ int restart_threads;
+#endif
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
vpx_decrypt_cb decrypt_cb;
@@ -268,7 +274,7 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
volatile vpx_codec_err_t res;
- unsigned int resolution_change = 0;
+ volatile unsigned int resolution_change = 0;
unsigned int w, h;
if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) {
@@ -298,6 +304,27 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
if ((ctx->si.h != h) || (ctx->si.w != w)) resolution_change = 1;
+#if CONFIG_MULTITHREAD
+ if (!res && ctx->restart_threads) {
+ struct frame_buffers *fb = &ctx->yv12_frame_buffers;
+ VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
+ VP8_COMMON *const pc = &pbi->common;
+ if (setjmp(pbi->common.error.jmp)) {
+ vp8_remove_decoder_instances(fb);
+ vp8_zero(fb->pbi);
+ vpx_clear_system_state();
+ return VPX_CODEC_ERROR;
+ }
+ pbi->common.error.setjmp = 1;
+ pbi->max_threads = ctx->cfg.threads;
+ vp8_decoder_create_threads(pbi);
+ if (vpx_atomic_load_acquire(&pbi->b_multithreaded_rd)) {
+ vp8mt_alloc_temp_buffers(pbi, pc->Width, pc->mb_rows);
+ }
+ ctx->restart_threads = 0;
+ pbi->common.error.setjmp = 0;
+ }
+#endif
/* Initialize the decoder instance on the first frame*/
if (!res && !ctx->decoder_init) {
VP8D_CONFIG oxcf;
@@ -335,8 +362,8 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
if (!res) {
VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
+ VP8_COMMON *const pc = &pbi->common;
if (resolution_change) {
- VP8_COMMON *const pc = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
#if CONFIG_MULTITHREAD
int i;
@@ -428,9 +455,35 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
pbi->common.fb_idx_ref_cnt[0] = 0;
}
+ if (setjmp(pbi->common.error.jmp)) {
+ /* We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ */
+ pc->yv12_fb[pc->lst_fb_idx].corrupted = 1;
+
+ if (pc->fb_idx_ref_cnt[pc->new_fb_idx] > 0) {
+ pc->fb_idx_ref_cnt[pc->new_fb_idx]--;
+ }
+ pc->error.setjmp = 0;
+#if CONFIG_MULTITHREAD
+ if (pbi->restart_threads) {
+ ctx->si.w = 0;
+ ctx->si.h = 0;
+ ctx->restart_threads = 1;
+ }
+#endif
+ res = update_error_state(ctx, &pbi->common.error);
+ return res;
+ }
+
+ pbi->common.error.setjmp = 1;
+
/* update the pbi fragment data */
pbi->fragments = ctx->fragments;
-
+#if CONFIG_MULTITHREAD
+ pbi->restart_threads = 0;
+#endif
ctx->user_priv = user_priv;
if (vp8dx_receive_compressed_data(pbi, data_sz, data, deadline)) {
res = update_error_state(ctx, &pbi->common.error);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_entropymv.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_entropymv.h
index dcc8e299899..ee9d37973ff 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_entropymv.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_entropymv.h
@@ -25,7 +25,7 @@ struct VP9Common;
void vp9_init_mv_probs(struct VP9Common *cm);
-void vp9_adapt_mv_probs(struct VP9Common *cm, int usehp);
+void vp9_adapt_mv_probs(struct VP9Common *cm, int allow_hp);
static INLINE int use_mv_hp(const MV *ref) {
const int kMvRefThresh = 64; // threshold for use of high-precision 1/8 mv
@@ -127,7 +127,7 @@ typedef struct {
nmv_component_counts comps[2];
} nmv_context_counts;
-void vp9_inc_mv(const MV *mv, nmv_context_counts *mvctx);
+void vp9_inc_mv(const MV *mv, nmv_context_counts *counts);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.c b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.c
index 6c43af8ce80..adbda6c825b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.c
@@ -63,6 +63,20 @@ DECLARE_ALIGNED(256, static const InterpKernel,
{ 0, -3, 2, 41, 63, 29, -2, -2 }, { 0, -3, 1, 38, 64, 32, -1, -3 }
};
-const InterpKernel *vp9_filter_kernels[4] = {
- sub_pel_filters_8, sub_pel_filters_8lp, sub_pel_filters_8s, bilinear_filters
+// 4-tap filter
+DECLARE_ALIGNED(256, static const InterpKernel,
+ sub_pel_filters_4[SUBPEL_SHIFTS]) = {
+ { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 0, -4, 126, 8, -2, 0, 0 },
+ { 0, 0, -6, 120, 18, -4, 0, 0 }, { 0, 0, -8, 114, 28, -6, 0, 0 },
+ { 0, 0, -10, 108, 36, -6, 0, 0 }, { 0, 0, -12, 102, 46, -8, 0, 0 },
+ { 0, 0, -12, 94, 56, -10, 0, 0 }, { 0, 0, -12, 84, 66, -10, 0, 0 },
+ { 0, 0, -12, 76, 76, -12, 0, 0 }, { 0, 0, -10, 66, 84, -12, 0, 0 },
+ { 0, 0, -10, 56, 94, -12, 0, 0 }, { 0, 0, -8, 46, 102, -12, 0, 0 },
+ { 0, 0, -6, 36, 108, -10, 0, 0 }, { 0, 0, -6, 28, 114, -8, 0, 0 },
+ { 0, 0, -4, 18, 120, -6, 0, 0 }, { 0, 0, -2, 8, 126, -4, 0, 0 }
+};
+
+const InterpKernel *vp9_filter_kernels[5] = {
+ sub_pel_filters_8, sub_pel_filters_8lp, sub_pel_filters_8s, bilinear_filters,
+ sub_pel_filters_4
};
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.h
index b379665b1c1..0382c88e7c0 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_filter.h
@@ -25,6 +25,7 @@ extern "C" {
#define EIGHTTAP_SHARP 2
#define SWITCHABLE_FILTERS 3 /* Number of switchable filters */
#define BILINEAR 3
+#define FOURTAP 4
// The codec can operate in four possible inter prediction filter mode:
// 8-tap, 8-tap-smooth, 8-tap-sharp, and switching between the three.
#define SWITCHABLE_FILTER_CONTEXTS (SWITCHABLE_FILTERS + 1)
@@ -32,7 +33,7 @@ extern "C" {
typedef uint8_t INTERP_FILTER;
-extern const InterpKernel *vp9_filter_kernels[4];
+extern const InterpKernel *vp9_filter_kernels[5];
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.c b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.c
index da9180b71a5..95d6029f3b5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.c
@@ -880,12 +880,12 @@ void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
- MODE_INFO **mi, const int mode_info_stride,
+ MODE_INFO **mi8x8, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
- MODE_INFO **mip = mi;
- MODE_INFO **mip2 = mi;
+ MODE_INFO **mip = mi8x8;
+ MODE_INFO **mip2 = mi8x8;
// These are offsets to the next mi in the 64x64 block. It is what gets
// added to the mi ptr as we go through each loop. It helps us to avoid
@@ -1087,13 +1087,19 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
const int row_step_stride = cm->mi_stride * row_step;
struct buf_2d *const dst = &plane->dst;
uint8_t *const dst0 = dst->buf;
- unsigned int mask_16x16[MI_BLOCK_SIZE] = { 0 };
- unsigned int mask_8x8[MI_BLOCK_SIZE] = { 0 };
- unsigned int mask_4x4[MI_BLOCK_SIZE] = { 0 };
- unsigned int mask_4x4_int[MI_BLOCK_SIZE] = { 0 };
+ unsigned int mask_16x16[MI_BLOCK_SIZE];
+ unsigned int mask_8x8[MI_BLOCK_SIZE];
+ unsigned int mask_4x4[MI_BLOCK_SIZE];
+ unsigned int mask_4x4_int[MI_BLOCK_SIZE];
uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE];
int r, c;
+ vp9_zero(mask_16x16);
+ vp9_zero(mask_8x8);
+ vp9_zero(mask_4x4);
+ vp9_zero(mask_4x4_int);
+ vp9_zero(lfl);
+
for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
unsigned int mask_16x16_c = 0;
unsigned int mask_8x8_c = 0;
@@ -1330,6 +1336,8 @@ void vp9_filter_block_plane_ss11(VP9_COMMON *const cm,
uint16_t mask_4x4 = lfm->left_uv[TX_4X4];
uint16_t mask_4x4_int = lfm->int_4x4_uv;
+ vp9_zero(lfl_uv);
+
assert(plane->subsampling_x == 1 && plane->subsampling_y == 1);
// Vertical pass: do 2 rows at one time
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.h
index daf3b91315e..39648a72c32 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_loopfilter.h
@@ -97,7 +97,7 @@ struct VP9LfSyncData;
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
void vp9_setup_mask(struct VP9Common *const cm, const int mi_row,
- const int mi_col, MODE_INFO **mi_8x8,
+ const int mi_col, MODE_INFO **mi8x8,
const int mode_info_stride, LOOP_FILTER_MASK *lfm);
void vp9_filter_block_plane_ss00(struct VP9Common *const cm,
@@ -120,7 +120,7 @@ void vp9_loop_filter_init(struct VP9Common *cm);
void vp9_loop_filter_frame_init(struct VP9Common *cm, int default_filt_lvl);
void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP9Common *cm,
- struct macroblockd *mbd, int filter_level,
+ struct macroblockd *xd, int frame_filter_level,
int y_only, int partial_frame);
// Get the superblock lfm for a given mi_row, mi_col.
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_onyxc_int.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_onyxc_int.h
index 45d3b0f82f3..662b8ef5e12 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_onyxc_int.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_onyxc_int.h
@@ -37,10 +37,9 @@ extern "C" {
#define REF_FRAMES_LOG2 3
#define REF_FRAMES (1 << REF_FRAMES_LOG2)
-// 1 scratch frame for the new frame, 3 for scaled references on the encoder.
-// TODO(jkoleszar): These 3 extra references could probably come from the
-// normal reference pool.
-#define FRAME_BUFFERS (REF_FRAMES + 4)
+// 1 scratch frame for the new frame, REFS_PER_FRAME for scaled references on
+// the encoder.
+#define FRAME_BUFFERS (REF_FRAMES + 1 + REFS_PER_FRAME)
#define FRAME_CONTEXTS_LOG2 2
#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
@@ -259,6 +258,8 @@ typedef struct VP9Common {
PARTITION_CONTEXT *above_seg_context;
ENTROPY_CONTEXT *above_context;
int above_context_alloc_cols;
+
+ int lf_row;
} VP9_COMMON;
static INLINE YV12_BUFFER_CONFIG *get_buf_frame(VP9_COMMON *cm, int index) {
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_postproc.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_postproc.h
index 0aafa72ca8a..67efc1b4e4b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_postproc.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_postproc.h
@@ -38,7 +38,7 @@ struct VP9Common;
#define MFQE_PRECISION 4
int vp9_post_proc_frame(struct VP9Common *cm, YV12_BUFFER_CONFIG *dest,
- vp9_ppflags_t *flags, int unscaled_width);
+ vp9_ppflags_t *ppflags, int unscaled_width);
void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q,
uint8_t *limits);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_reconinter.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_reconinter.h
index 2c6d6695aba..992e30c344b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_reconinter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_reconinter.h
@@ -61,15 +61,15 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst,
- int dst_stride, const MV *mv_q3,
+ int dst_stride, const MV *src_mv,
const struct scale_factors *sf, int w, int h,
- int do_avg, const InterpKernel *kernel,
+ int ref, const InterpKernel *kernel,
enum mv_precision precision, int x, int y);
#if CONFIG_VP9_HIGHBITDEPTH
void vp9_highbd_build_inter_predictor(
const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
- const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
+ const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
const InterpKernel *kernel, enum mv_precision precision, int x, int y,
int bd);
#endif
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_rtcd_defs.pl b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_rtcd_defs.pl
index 6d7f9526098..d7ad2b693bc 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_rtcd_defs.pl
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_rtcd_defs.pl
@@ -62,7 +62,7 @@ add_proto qw/void vp9_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, i
add_proto qw/void vp9_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int stride, int tx_type";
-add_proto qw/void vp9_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+add_proto qw/void vp9_iht16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int stride, int tx_type";
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
# Note that there are more specializations appended when
@@ -100,7 +100,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/void vp9_highbd_iht8x8_64_add/, "const tran_low_t *input, uint16_t *dest, int stride, int tx_type, int bd";
- add_proto qw/void vp9_highbd_iht16x16_256_add/, "const tran_low_t *input, uint16_t *output, int pitch, int tx_type, int bd";
+ add_proto qw/void vp9_highbd_iht16x16_256_add/, "const tran_low_t *input, uint16_t *dest, int stride, int tx_type, int bd";
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
specialize qw/vp9_highbd_iht4x4_16_add neon sse4_1/;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_scale.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_scale.h
index 53c6eef7256..aaafdf86719 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_scale.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_scale.h
@@ -42,7 +42,7 @@ MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
#if CONFIG_VP9_HIGHBITDEPTH
void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
int other_h, int this_w, int this_h,
- int use_high);
+ int use_highbd);
#else
void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
int other_h, int this_w, int this_h);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.c b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.c
index d4b076645fb..36530fae677 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.c
@@ -229,6 +229,28 @@ void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm,
workers, num_workers, lf_sync);
}
+void vp9_lpf_mt_init(VP9LfSync *lf_sync, VP9_COMMON *cm, int frame_filter_level,
+ int num_workers) {
+ const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
+
+ if (!frame_filter_level) return;
+
+ if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
+ num_workers > lf_sync->num_workers) {
+ vp9_loop_filter_dealloc(lf_sync);
+ vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+ }
+
+ // Initialize cur_sb_col to -1 for all SB rows.
+ memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
+
+ lf_sync->corrupted = 0;
+
+ memset(lf_sync->num_tiles_done, 0,
+ sizeof(*lf_sync->num_tiles_done) * sb_rows);
+ cm->lf_row = 0;
+}
+
// Set up nsync by width.
static INLINE int get_sync_range(int width) {
// nsync numbers are picked by testing. For example, for 4k
@@ -266,6 +288,25 @@ void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
pthread_cond_init(&lf_sync->cond[i], NULL);
}
}
+ pthread_mutex_init(&lf_sync->lf_mutex, NULL);
+
+ CHECK_MEM_ERROR(cm, lf_sync->recon_done_mutex,
+ vpx_malloc(sizeof(*lf_sync->recon_done_mutex) * rows));
+ if (lf_sync->recon_done_mutex) {
+ int i;
+ for (i = 0; i < rows; ++i) {
+ pthread_mutex_init(&lf_sync->recon_done_mutex[i], NULL);
+ }
+ }
+
+ CHECK_MEM_ERROR(cm, lf_sync->recon_done_cond,
+ vpx_malloc(sizeof(*lf_sync->recon_done_cond) * rows));
+ if (lf_sync->recon_done_cond) {
+ int i;
+ for (i = 0; i < rows; ++i) {
+ pthread_cond_init(&lf_sync->recon_done_cond[i], NULL);
+ }
+ }
}
#endif // CONFIG_MULTITHREAD
@@ -276,6 +317,11 @@ void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
+ CHECK_MEM_ERROR(cm, lf_sync->num_tiles_done,
+ vpx_malloc(sizeof(*lf_sync->num_tiles_done) *
+ mi_cols_aligned_to_sb(cm->mi_rows) >>
+ MI_BLOCK_SIZE_LOG2));
+
// Set up nsync.
lf_sync->sync_range = get_sync_range(width);
}
@@ -298,15 +344,126 @@ void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) {
}
vpx_free(lf_sync->cond);
}
+ if (lf_sync->recon_done_mutex != NULL) {
+ int i;
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_mutex_destroy(&lf_sync->recon_done_mutex[i]);
+ }
+ vpx_free(lf_sync->recon_done_mutex);
+ }
+
+ pthread_mutex_destroy(&lf_sync->lf_mutex);
+ if (lf_sync->recon_done_cond != NULL) {
+ int i;
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_cond_destroy(&lf_sync->recon_done_cond[i]);
+ }
+ vpx_free(lf_sync->recon_done_cond);
+ }
#endif // CONFIG_MULTITHREAD
+
vpx_free(lf_sync->lfdata);
vpx_free(lf_sync->cur_sb_col);
+ vpx_free(lf_sync->num_tiles_done);
// clear the structure as the source of this call may be a resize in which
// case this call will be followed by an _alloc() which may fail.
vp9_zero(*lf_sync);
}
}
+static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) {
+ int return_val = -1;
+ int cur_row;
+ const int max_rows = cm->mi_rows;
+
+#if CONFIG_MULTITHREAD
+ const int tile_cols = 1 << cm->log2_tile_cols;
+
+ pthread_mutex_lock(&lf_sync->lf_mutex);
+ if (cm->lf_row < max_rows) {
+ cur_row = cm->lf_row >> MI_BLOCK_SIZE_LOG2;
+ return_val = cm->lf_row;
+ cm->lf_row += MI_BLOCK_SIZE;
+ if (cm->lf_row < max_rows) {
+ /* If this is not the last row, make sure the next row is also decoded.
+ * This is because the intra predict has to happen before loop filter */
+ cur_row += 1;
+ }
+ }
+ pthread_mutex_unlock(&lf_sync->lf_mutex);
+
+ if (return_val == -1) return return_val;
+
+ pthread_mutex_lock(&lf_sync->recon_done_mutex[cur_row]);
+ if (lf_sync->num_tiles_done[cur_row] < tile_cols) {
+ pthread_cond_wait(&lf_sync->recon_done_cond[cur_row],
+ &lf_sync->recon_done_mutex[cur_row]);
+ }
+ pthread_mutex_unlock(&lf_sync->recon_done_mutex[cur_row]);
+ pthread_mutex_lock(&lf_sync->lf_mutex);
+ if (lf_sync->corrupted) {
+ return_val = -1;
+ }
+ pthread_mutex_unlock(&lf_sync->lf_mutex);
+#else
+ (void)lf_sync;
+ if (cm->lf_row < max_rows) {
+ cur_row = cm->lf_row >> MI_BLOCK_SIZE_LOG2;
+ return_val = cm->lf_row;
+ cm->lf_row += MI_BLOCK_SIZE;
+ if (cm->lf_row < max_rows) {
+ /* If this is not the last row, make sure the next row is also decoded.
+ * This is because the intra predict has to happen before loop filter */
+ cur_row += 1;
+ }
+ }
+#endif // CONFIG_MULTITHREAD
+
+ return return_val;
+}
+
+void vp9_loopfilter_rows(LFWorkerData *lf_data, VP9LfSync *lf_sync) {
+ int mi_row;
+ VP9_COMMON *cm = lf_data->cm;
+
+ while ((mi_row = get_next_row(cm, lf_sync)) != -1 && mi_row < cm->mi_rows) {
+ lf_data->start = mi_row;
+ lf_data->stop = mi_row + MI_BLOCK_SIZE;
+
+ thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+ lf_data->start, lf_data->stop, lf_data->y_only,
+ lf_sync);
+ }
+}
+
+void vp9_set_row(VP9LfSync *lf_sync, int num_tiles, int row, int is_last_row,
+ int corrupted) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&lf_sync->lf_mutex);
+ lf_sync->corrupted |= corrupted;
+ pthread_mutex_unlock(&lf_sync->lf_mutex);
+ pthread_mutex_lock(&lf_sync->recon_done_mutex[row]);
+ lf_sync->num_tiles_done[row] += 1;
+ if (num_tiles == lf_sync->num_tiles_done[row]) {
+ if (is_last_row) {
+ /* The last 2 rows wait on the last row to be done.
+ * So, we have to broadcast the signal in this case.
+ */
+ pthread_cond_broadcast(&lf_sync->recon_done_cond[row]);
+ } else {
+ pthread_cond_signal(&lf_sync->recon_done_cond[row]);
+ }
+ }
+ pthread_mutex_unlock(&lf_sync->recon_done_mutex[row]);
+#else
+ (void)lf_sync;
+ (void)num_tiles;
+ (void)row;
+ (void)is_last_row;
+ (void)corrupted;
+#endif // CONFIG_MULTITHREAD
+}
+
// Accumulate frame counts.
void vp9_accumulate_frame_counts(FRAME_COUNTS *accum,
const FRAME_COUNTS *counts, int is_dec) {
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.h b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.h
index f92df5bd62d..b97e9ee134d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/common/vp9_thread_common.h
@@ -37,6 +37,14 @@ typedef struct VP9LfSyncData {
// Row-based parallel loopfilter data
LFWorkerData *lfdata;
int num_workers;
+
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t lf_mutex;
+ pthread_mutex_t *recon_done_mutex;
+ pthread_cond_t *recon_done_cond;
+#endif
+ int *num_tiles_done;
+ int corrupted;
} VP9LfSync;
// Allocate memory for loopfilter row synchronization.
@@ -53,6 +61,17 @@ void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP9Common *cm,
int partial_frame, VPxWorker *workers,
int num_workers, VP9LfSync *lf_sync);
+// Multi-threaded loopfilter initialisations
+void vp9_lpf_mt_init(VP9LfSync *lf_sync, struct VP9Common *cm,
+ int frame_filter_level, int num_workers);
+
+void vp9_loopfilter_rows(LFWorkerData *lf_data, VP9LfSync *lf_sync);
+
+void vp9_set_row(VP9LfSync *lf_sync, int num_tiles, int row, int is_last_row,
+ int corrupted);
+
+void vp9_set_last_decoded_row(struct VP9Common *cm, int tile_col, int mi_row);
+
void vp9_accumulate_frame_counts(struct FRAME_COUNTS *accum,
const struct FRAME_COUNTS *counts, int is_dec);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodeframe.c b/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodeframe.c
index 48c49e2f5f3..bc0fc6197e6 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodeframe.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decodeframe.c
@@ -1451,6 +1451,25 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, const uint8_t *data,
return vpx_reader_find_end(&tile_data->bit_reader);
}
+static void set_rows_after_error(VP9LfSync *lf_sync, int start_row, int mi_rows,
+ int num_tiles_left, int total_num_tiles) {
+ do {
+ int mi_row;
+ const int aligned_rows = mi_cols_aligned_to_sb(mi_rows);
+ const int sb_rows = (aligned_rows >> MI_BLOCK_SIZE_LOG2);
+ const int corrupted = 1;
+ for (mi_row = start_row; mi_row < mi_rows; mi_row += MI_BLOCK_SIZE) {
+ const int is_last_row = (sb_rows - 1 == mi_row >> MI_BLOCK_SIZE_LOG2);
+ vp9_set_row(lf_sync, total_num_tiles, mi_row >> MI_BLOCK_SIZE_LOG2,
+ is_last_row, corrupted);
+ }
+ /* If there are multiple tiles, the second tile should start marking row
+ * progress from row 0.
+ */
+ start_row = 0;
+ } while (num_tiles_left--);
+}
+
// On entry 'tile_data->data_end' points to the end of the input frame, on exit
// it is updated to reflect the bitreader position of the final tile column if
// present in the tile buffer group or NULL otherwise.
@@ -1461,6 +1480,12 @@ static int tile_worker_hook(void *arg1, void *arg2) {
TileInfo *volatile tile = &tile_data->xd.tile;
const int final_col = (1 << pbi->common.log2_tile_cols) - 1;
const uint8_t *volatile bit_reader_end = NULL;
+ VP9_COMMON *cm = &pbi->common;
+
+ LFWorkerData *lf_data = tile_data->lf_data;
+ VP9LfSync *lf_sync = tile_data->lf_sync;
+
+ volatile int mi_row = 0;
volatile int n = tile_data->buf_start;
tile_data->error_info.setjmp = 1;
@@ -1468,14 +1493,26 @@ static int tile_worker_hook(void *arg1, void *arg2) {
tile_data->error_info.setjmp = 0;
tile_data->xd.corrupted = 1;
tile_data->data_end = NULL;
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ const int num_tiles_left = tile_data->buf_end - n;
+ const int mi_row_start = mi_row;
+ set_rows_after_error(lf_sync, mi_row_start, cm->mi_rows, num_tiles_left,
+ 1 << cm->log2_tile_cols);
+ }
return 0;
}
tile_data->xd.corrupted = 0;
do {
- int mi_row, mi_col;
+ int mi_col;
const TileBuffer *const buf = pbi->tile_buffers + n;
+
+ /* Initialize to 0 is safe since we do not deal with streams that have
+ * more than one row of tiles. (So tile->mi_row_start will be 0)
+ */
+ assert(cm->log2_tile_rows == 0);
+ mi_row = 0;
vp9_zero(tile_data->dqcoeff);
vp9_tile_init(tile, &pbi->common, 0, buf->col);
setup_token_decoder(buf->data, tile_data->data_end, buf->size,
@@ -1493,6 +1530,14 @@ static int tile_worker_hook(void *arg1, void *arg2) {
mi_col += MI_BLOCK_SIZE) {
decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4);
}
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ const int aligned_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ const int sb_rows = (aligned_rows >> MI_BLOCK_SIZE_LOG2);
+ const int is_last_row = (sb_rows - 1 == mi_row >> MI_BLOCK_SIZE_LOG2);
+ vp9_set_row(lf_sync, 1 << cm->log2_tile_cols,
+ mi_row >> MI_BLOCK_SIZE_LOG2, is_last_row,
+ tile_data->xd.corrupted);
+ }
}
if (buf->col == final_col) {
@@ -1500,6 +1545,21 @@ static int tile_worker_hook(void *arg1, void *arg2) {
}
} while (!tile_data->xd.corrupted && ++n <= tile_data->buf_end);
+ if (pbi->lpf_mt_opt && n < tile_data->buf_end && cm->lf.filter_level &&
+ !cm->skip_loop_filter) {
+ /* This was not incremented in the tile loop, so increment before tiles left
+ * calculation
+ */
+ ++n;
+ set_rows_after_error(lf_sync, 0, cm->mi_rows, tile_data->buf_end - n,
+ 1 << cm->log2_tile_cols);
+ }
+
+ if (pbi->lpf_mt_opt && !tile_data->xd.corrupted && cm->lf.filter_level &&
+ !cm->skip_loop_filter) {
+ vp9_loopfilter_rows(lf_data, lf_sync);
+ }
+
tile_data->data_end = bit_reader_end;
return !tile_data->xd.corrupted;
}
@@ -1516,6 +1576,8 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const uint8_t *data,
VP9_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
const uint8_t *bit_reader_end = NULL;
+ VP9LfSync *lf_row_sync = &pbi->lf_row_sync;
+ YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
@@ -1542,12 +1604,26 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const uint8_t *data,
}
}
+ // Initialize LPF
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ vp9_lpf_mt_init(lf_row_sync, cm, cm->lf.filter_level,
+ pbi->num_tile_workers);
+ }
+
// Reset tile decoding hook
for (n = 0; n < num_workers; ++n) {
VPxWorker *const worker = &pbi->tile_workers[n];
TileWorkerData *const tile_data =
&pbi->tile_worker_data[n + pbi->total_tiles];
winterface->sync(worker);
+
+ if (pbi->lpf_mt_opt && cm->lf.filter_level && !cm->skip_loop_filter) {
+ tile_data->lf_sync = lf_row_sync;
+ tile_data->lf_data = &tile_data->lf_sync->lfdata[n];
+ vp9_loop_filter_data_reset(tile_data->lf_data, new_fb, cm, pbi->mb.plane);
+ tile_data->lf_data->y_only = 0;
+ }
+
tile_data->xd = pbi->mb;
tile_data->xd.counts =
cm->frame_parallel_decoding_mode ? NULL : &tile_data->counts;
@@ -1908,6 +1984,28 @@ static size_t read_uncompressed_header(VP9Decoder *pbi,
setup_segmentation_dequant(cm);
setup_tile_info(cm, rb);
+ if (pbi->row_mt == 1) {
+ int num_sbs = 1;
+
+ if (pbi->row_mt_worker_data == NULL) {
+ CHECK_MEM_ERROR(cm, pbi->row_mt_worker_data,
+ vpx_calloc(1, sizeof(*pbi->row_mt_worker_data)));
+ }
+
+ if (pbi->max_threads > 1) {
+ const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int sb_cols = aligned_cols >> MI_BLOCK_SIZE_LOG2;
+ const int aligned_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+ const int sb_rows = aligned_rows >> MI_BLOCK_SIZE_LOG2;
+
+ num_sbs = sb_cols * sb_rows;
+ }
+
+ if (num_sbs > pbi->row_mt_worker_data->num_sbs) {
+ vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data);
+ vp9_dec_alloc_row_mt_mem(pbi->row_mt_worker_data, cm, num_sbs);
+ }
+ }
sz = vpx_rb_read_literal(rb, 16);
if (sz == 0)
@@ -2069,17 +2167,19 @@ void vp9_decode_frame(VP9Decoder *pbi, const uint8_t *data,
if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
// Multi-threaded tile decoder
*p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
- if (!xd->corrupted) {
- if (!cm->skip_loop_filter) {
- // If multiple threads are used to decode tiles, then we use those
- // threads to do parallel loopfiltering.
- vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
- 0, 0, pbi->tile_workers, pbi->num_tile_workers,
- &pbi->lf_row_sync);
+ if (!pbi->lpf_mt_opt) {
+ if (!xd->corrupted) {
+ if (!cm->skip_loop_filter) {
+ // If multiple threads are used to decode tiles, then we use those
+ // threads to do parallel loopfiltering.
+ vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
+ cm->lf.filter_level, 0, 0, pbi->tile_workers,
+ pbi->num_tile_workers, &pbi->lf_row_sync);
+ }
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Decode failed. Frame data is corrupted.");
}
- } else {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Decode failed. Frame data is corrupted.");
}
} else {
*p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.c b/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.c
index 5e41274cc89..1e2a4429347 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.c
@@ -55,6 +55,43 @@ static void vp9_dec_setup_mi(VP9_COMMON *cm) {
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
+void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data,
+ VP9_COMMON *cm, int num_sbs) {
+ int plane;
+ const size_t dqcoeff_size = (num_sbs << DQCOEFFS_PER_SB_LOG2) *
+ sizeof(*row_mt_worker_data->dqcoeff[0]);
+ row_mt_worker_data->num_sbs = num_sbs;
+ for (plane = 0; plane < 3; ++plane) {
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->dqcoeff[plane],
+ vpx_memalign(16, dqcoeff_size));
+ memset(row_mt_worker_data->dqcoeff[plane], 0, dqcoeff_size);
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->eob[plane],
+ vpx_calloc(num_sbs << EOBS_PER_SB_LOG2,
+ sizeof(*row_mt_worker_data->eob[plane])));
+ }
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->partition,
+ vpx_calloc(num_sbs * PARTITIONS_PER_SB,
+ sizeof(*row_mt_worker_data->partition)));
+ CHECK_MEM_ERROR(cm, row_mt_worker_data->recon_map,
+ vpx_calloc(num_sbs, sizeof(*row_mt_worker_data->recon_map)));
+}
+
+void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data) {
+ if (row_mt_worker_data != NULL) {
+ int plane;
+ for (plane = 0; plane < 3; ++plane) {
+ vpx_free(row_mt_worker_data->eob[plane]);
+ row_mt_worker_data->eob[plane] = NULL;
+ vpx_free(row_mt_worker_data->dqcoeff[plane]);
+ row_mt_worker_data->dqcoeff[plane] = NULL;
+ }
+ vpx_free(row_mt_worker_data->partition);
+ row_mt_worker_data->partition = NULL;
+ vpx_free(row_mt_worker_data->recon_map);
+ row_mt_worker_data->recon_map = NULL;
+ }
+}
+
static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
@@ -140,6 +177,10 @@ void vp9_decoder_remove(VP9Decoder *pbi) {
vp9_loop_filter_dealloc(&pbi->lf_row_sync);
}
+ if (pbi->row_mt == 1) {
+ vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data);
+ vpx_free(pbi->row_mt_worker_data);
+ }
vp9_remove_common(&pbi->common);
vpx_free(pbi);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.h b/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.h
index 1c488961a8d..9a582fffbb8 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/decoder/vp9_decoder.h
@@ -26,6 +26,10 @@
extern "C" {
#endif
+#define EOBS_PER_SB_LOG2 8
+#define DQCOEFFS_PER_SB_LOG2 12
+#define PARTITIONS_PER_SB 85
+
typedef struct TileBuffer {
const uint8_t *data;
size_t size;
@@ -37,12 +41,22 @@ typedef struct TileWorkerData {
int buf_start, buf_end; // pbi->tile_buffers to decode, inclusive
vpx_reader bit_reader;
FRAME_COUNTS counts;
+ LFWorkerData *lf_data;
+ VP9LfSync *lf_sync;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]);
struct vpx_internal_error_info error_info;
} TileWorkerData;
+typedef struct RowMTWorkerData {
+ int num_sbs;
+ int *eob[MAX_MB_PLANE];
+ PARTITION_TYPE *partition;
+ tran_low_t *dqcoeff[MAX_MB_PLANE];
+ int8_t *recon_map;
+} RowMTWorkerData;
+
typedef struct VP9Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
@@ -74,10 +88,12 @@ typedef struct VP9Decoder {
int hold_ref_buf; // hold the reference buffer.
int row_mt;
+ int lpf_mt_opt;
+ RowMTWorkerData *row_mt_worker_data;
} VP9Decoder;
int vp9_receive_compressed_data(struct VP9Decoder *pbi, size_t size,
- const uint8_t **dest);
+ const uint8_t **psource);
int vp9_get_raw_frame(struct VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
vp9_ppflags_t *flags);
@@ -111,6 +127,10 @@ struct VP9Decoder *vp9_decoder_create(BufferPool *const pool);
void vp9_decoder_remove(struct VP9Decoder *pbi);
+void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data,
+ VP9_COMMON *cm, int num_sbs);
+void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data);
+
static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
BufferPool *const pool) {
if (idx >= 0 && frame_bufs[idx].ref_count > 0) {
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
index 513718e7cb1..f8dd0a6f7a9 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
@@ -23,13 +23,13 @@ void vp9_fdct8x8_quant_neon(const int16_t *input, int stride,
int skip_block, const int16_t *round_ptr,
const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
tran_low_t temp_buffer[64];
(void)coeff_ptr;
vpx_fdct8x8_neon(input, temp_buffer, stride);
vp9_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, round_ptr, quant_ptr,
- qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr, scan_ptr,
- iscan_ptr);
+ qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr, scan,
+ iscan);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
index 97a09bdff6f..8b62b450cef 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
@@ -97,6 +97,9 @@ void vp9_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t count,
store_s16q_to_tran_low(qcoeff_ptr + i, v_qcoeff);
store_s16q_to_tran_low(dqcoeff_ptr + i, v_dqcoeff);
}
+#ifdef __aarch64__
+ *eob_ptr = vmaxvq_s16(v_eobmax_76543210);
+#else
{
const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
vget_high_s16(v_eobmax_76543210));
@@ -111,6 +114,7 @@ void vp9_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t count,
*eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
}
+#endif // __aarch64__
}
static INLINE int32x4_t extract_sign_bit(int32x4_t a) {
@@ -122,7 +126,7 @@ void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
const int16_t *quant_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan_ptr) {
+ const int16_t *scan, const int16_t *iscan) {
const int16x8_t one = vdupq_n_s16(1);
const int16x8_t neg_one = vdupq_n_s16(-1);
@@ -134,8 +138,8 @@ void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
const int16x8_t dequant_thresh = vshrq_n_s16(vld1q_s16(dequant_ptr), 2);
// Process dc and the first seven ac coeffs.
- const uint16x8_t iscan =
- vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one));
+ const uint16x8_t v_iscan =
+ vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan), one));
const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr);
const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
const int16x8_t coeff_abs = vabsq_s16(coeff);
@@ -169,12 +173,12 @@ void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
dqcoeff = vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1));
- eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), iscan);
+ eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan);
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff);
- iscan_ptr += 8;
+ iscan += 8;
coeff_ptr += 8;
qcoeff_ptr += 8;
dqcoeff_ptr += 8;
@@ -188,8 +192,8 @@ void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
// Process the rest of the ac coeffs.
for (i = 8; i < 32 * 32; i += 8) {
- const uint16x8_t iscan =
- vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one));
+ const uint16x8_t v_iscan =
+ vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan), one));
const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr);
const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
const int16x8_t coeff_abs = vabsq_s16(coeff);
@@ -215,17 +219,20 @@ void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1));
eob_max =
- vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), iscan));
+ vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan));
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff);
- iscan_ptr += 8;
+ iscan += 8;
coeff_ptr += 8;
qcoeff_ptr += 8;
dqcoeff_ptr += 8;
}
+#ifdef __aarch64__
+ *eob_ptr = vmaxvq_u16(eob_max);
+#else
{
const uint16x4_t eob_max_0 =
vmax_u16(vget_low_u16(eob_max), vget_high_u16(eob_max));
@@ -233,5 +240,6 @@ void vp9_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t count,
const uint16x4_t eob_max_2 = vpmax_u16(eob_max_1, eob_max_1);
vst1_lane_u16(eob_ptr, eob_max_2, 0);
}
+#endif // __aarch64__
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c
index 3720b0876d8..4f88b8fff6f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/ppc/vp9_quantize_vsx.c
@@ -42,8 +42,8 @@ void vp9_quantize_fp_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
int16x8_t qcoeff0, qcoeff1, dqcoeff0, dqcoeff1, eob;
bool16x8_t zero_coeff0, zero_coeff1;
@@ -52,10 +52,10 @@ void vp9_quantize_fp_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int16x8_t dequant = vec_vsx_ld(0, dequant_ptr);
int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr);
int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr);
- int16x8_t scan0 = vec_vsx_ld(0, iscan_ptr);
- int16x8_t scan1 = vec_vsx_ld(16, iscan_ptr);
+ int16x8_t scan0 = vec_vsx_ld(0, iscan);
+ int16x8_t scan1 = vec_vsx_ld(16, iscan);
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
@@ -103,9 +103,9 @@ void vp9_quantize_fp_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
coeff0 = vec_vsx_ld(off0, coeff_ptr);
coeff1 = vec_vsx_ld(off1, coeff_ptr);
coeff2 = vec_vsx_ld(off2, coeff_ptr);
- scan0 = vec_vsx_ld(off0, iscan_ptr);
- scan1 = vec_vsx_ld(off1, iscan_ptr);
- scan2 = vec_vsx_ld(off2, iscan_ptr);
+ scan0 = vec_vsx_ld(off0, iscan);
+ scan1 = vec_vsx_ld(off1, iscan);
+ scan2 = vec_vsx_ld(off2, iscan);
qcoeff0 = vec_mulhi(vec_vaddshs(vec_abs(coeff0), round), quant);
zero_coeff0 = vec_cmpeq(qcoeff0, vec_zeros_s16);
@@ -169,8 +169,7 @@ void vp9_quantize_fp_32x32_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ const int16_t *scan, const int16_t *iscan) {
// In stage 1, we quantize 16 coeffs (DC + 15 AC)
// In stage 2, we loop 42 times and quantize 24 coeffs per iteration
// (32 * 32 - 16) / 24 = 42
@@ -188,13 +187,13 @@ void vp9_quantize_fp_32x32_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int16x8_t dequant = vec_vsx_ld(0, dequant_ptr);
int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr);
int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr);
- int16x8_t scan0 = vec_vsx_ld(0, iscan_ptr);
- int16x8_t scan1 = vec_vsx_ld(16, iscan_ptr);
+ int16x8_t scan0 = vec_vsx_ld(0, iscan);
+ int16x8_t scan1 = vec_vsx_ld(16, iscan);
int16x8_t thres = vec_sra(dequant, vec_splats((uint16_t)2));
int16x8_t abs_coeff0 = vec_abs(coeff0);
int16x8_t abs_coeff1 = vec_abs(coeff1);
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
(void)n_coeffs;
assert(!skip_block);
@@ -238,9 +237,9 @@ void vp9_quantize_fp_32x32_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
coeff0 = vec_vsx_ld(off0, coeff_ptr);
coeff1 = vec_vsx_ld(off1, coeff_ptr);
coeff2 = vec_vsx_ld(off2, coeff_ptr);
- scan0 = vec_vsx_ld(off0, iscan_ptr);
- scan1 = vec_vsx_ld(off1, iscan_ptr);
- scan2 = vec_vsx_ld(off2, iscan_ptr);
+ scan0 = vec_vsx_ld(off0, iscan);
+ scan1 = vec_vsx_ld(off1, iscan);
+ scan2 = vec_vsx_ld(off2, iscan);
abs_coeff0 = vec_abs(coeff0);
abs_coeff1 = vec_abs(coeff1);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_block.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_block.h
index 06130584f0f..563fdbbdecd 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_block.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_block.h
@@ -211,6 +211,8 @@ struct macroblock {
#if CONFIG_ML_VAR_PARTITION
DECLARE_ALIGNED(16, uint8_t, est_pred[64 * 64]);
#endif // CONFIG_ML_VAR_PARTITION
+
+ struct scale_factors *me_sf;
};
#ifdef __cplusplus
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_denoiser.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_denoiser.c
index 8c039b2cb9d..b70890e68a8 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_denoiser.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_denoiser.c
@@ -360,6 +360,7 @@ void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col,
int is_skin = 0;
int increase_denoising = 0;
int consec_zeromv = 0;
+ int last_is_reference = cpi->ref_frame_flags & VP9_LAST_FLAG;
mv_col = ctx->best_sse_mv.as_mv.col;
mv_row = ctx->best_sse_mv.as_mv.row;
motion_magnitude = mv_row * mv_row + mv_col * mv_col;
@@ -403,7 +404,12 @@ void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col,
}
if (!is_skin && denoiser->denoising_level == kDenHigh) increase_denoising = 1;
- if (denoiser->denoising_level >= kDenLow && !ctx->sb_skip_denoising)
+ // Copy block if LAST_FRAME is not a reference.
+ // Last doesn't always exist when SVC layers are dynamically changed, e.g. top
+ // spatial layer doesn't have last reference when it's brought up for the
+ // first time on the fly.
+ if (last_is_reference && denoiser->denoising_level >= kDenLow &&
+ !ctx->sb_skip_denoising)
decision = perform_motion_compensation(
&cpi->common, denoiser, mb, bs, increase_denoising, mi_row, mi_col, ctx,
motion_magnitude, is_skin, &zeromv_filter, consec_zeromv,
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
index ad30951afa3..98343f0d243 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodeframe.c
@@ -930,7 +930,9 @@ static int scale_partitioning_svc(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
PARTITION_TYPE partition_high;
if (mi_row_high >= cm->mi_rows || mi_col_high >= cm->mi_cols) return 0;
- if (mi_row >= (cm->mi_rows >> 1) || mi_col >= (cm->mi_cols >> 1)) return 0;
+ if (mi_row >= svc->mi_rows[svc->spatial_layer_id - 1] ||
+ mi_col >= svc->mi_cols[svc->spatial_layer_id - 1])
+ return 0;
// Find corresponding (mi_col/mi_row) block down-scaled by 2x2.
start_pos = mi_row * (svc->mi_stride[svc->spatial_layer_id - 1]) + mi_col;
@@ -1378,6 +1380,20 @@ static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
x->sb_use_mv_part = 1;
x->sb_mvcol_part = mi->mv[0].as_mv.col;
x->sb_mvrow_part = mi->mv[0].as_mv.row;
+ if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
+ cpi->svc.spatial_layer_id == 0 &&
+ cpi->rc.high_num_blocks_with_motion && !x->zero_temp_sad_source &&
+ cm->width > 640 && cm->height > 480) {
+ // Disable split below 16x16 block size when scroll motion is detected.
+ // TODO(marpan/jianj): Improve this condition: issue is that search
+ // range is hard-coded/limited in vp9_int_pro_motion_estimation() so
+ // scroll motion may not be detected here.
+ if ((abs(x->sb_mvrow_part) >= 48 && abs(x->sb_mvcol_part) <= 8) ||
+ y_sad < 100000) {
+ compute_minmax_variance = 0;
+ thresholds[2] = INT64_MAX;
+ }
+ }
}
y_sad_last = y_sad;
@@ -3183,7 +3199,7 @@ static int ml_pruning_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
#define FEATURES 4
// ML-based partition search breakout.
-static int ml_predict_breakout(const VP9_COMP *const cpi, BLOCK_SIZE bsize,
+static int ml_predict_breakout(VP9_COMP *const cpi, BLOCK_SIZE bsize,
const MACROBLOCK *const x,
const RD_COST *const rd_cost) {
DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
@@ -3214,14 +3230,29 @@ static int ml_predict_breakout(const VP9_COMP *const cpi, BLOCK_SIZE bsize,
if (!linear_weights) return 0;
{ // Generate feature values.
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int ac_q =
+ vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
+#else
const int ac_q = vp9_ac_quant(qindex, 0, cm->bit_depth);
+#endif // CONFIG_VP9_HIGHBITDEPTH
const int num_pels_log2 = num_pels_log2_lookup[bsize];
int feature_index = 0;
unsigned int var, sse;
float rate_f, dist_f;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ var =
+ vp9_high_get_sby_variance(cpi, &x->plane[0].src, bsize, x->e_mbd.bd);
+ } else {
+ var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ vp9_64_zeros, 0, &sse);
+ }
+#else
var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, &sse);
+#endif
var = var >> num_pels_log2;
vpx_clear_system_state();
@@ -3288,7 +3319,12 @@ static void ml_prune_rect_partition(VP9_COMP *const cpi, MACROBLOCK *const x,
{
const int64_t none_rdcost = pc_tree->none.rdcost;
const VP9_COMMON *const cm = &cpi->common;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int dc_q =
+ vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
+#else
const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
+#endif // CONFIG_VP9_HIGHBITDEPTH
int feature_index = 0;
unsigned int block_var = 0;
unsigned int sub_block_var[4] = { 0 };
@@ -3404,31 +3440,38 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
const NN_CONFIG *nn_config = NULL;
- DECLARE_ALIGNED(16, uint8_t, pred_buf[64 * 64]);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64 * 2]);
+ uint8_t *const pred_buf = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+ ? (CONVERT_TO_BYTEPTR(pred_buffer))
+ : pred_buffer;
+#else
+ DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64]);
+ uint8_t *const pred_buf = pred_buffer;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ const int speed = cpi->oxcf.speed;
int i;
- float thresh_low = -1.0f;
- float thresh_high = 0.0f;
+ float thresh = 0.0f;
switch (bsize) {
case BLOCK_64X64:
nn_config = &vp9_var_rd_part_nnconfig_64;
- thresh_low = -3.0f;
- thresh_high = 3.0f;
+ thresh = speed > 0 ? 3.5f : 3.0f;
break;
case BLOCK_32X32:
nn_config = &vp9_var_rd_part_nnconfig_32;
- thresh_low = -3.0;
- thresh_high = 3.0f;
+ thresh = speed > 0 ? 3.5f : 3.0f;
break;
case BLOCK_16X16:
nn_config = &vp9_var_rd_part_nnconfig_16;
- thresh_low = -4.0;
- thresh_high = 4.0f;
+ thresh = speed > 0 ? 3.5f : 4.0f;
break;
case BLOCK_8X8:
nn_config = &vp9_var_rd_part_nnconfig_8;
- thresh_low = -2.0;
- thresh_high = 2.0f;
+ if (cm->width >= 720 && cm->height >= 720)
+ thresh = speed > 0 ? 2.5f : 2.0f;
+ else
+ thresh = speed > 0 ? 3.5f : 2.0f;
break;
default: assert(0 && "Unexpected block size."); return;
}
@@ -3476,7 +3519,12 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
{
float features[FEATURES] = { 0.0f };
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int dc_q =
+ vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (xd->bd - 8);
+#else
const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
+#endif // CONFIG_VP9_HIGHBITDEPTH
int feature_idx = 0;
float score;
@@ -3520,8 +3568,8 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
// partition is better than the non-split partition. So if the score is
// high enough, we skip the none-split partition search; if the score is
// low enough, we skip the split partition search.
- if (score > thresh_high) *none = 0;
- if (score < thresh_low) *split = 0;
+ if (score > thresh) *none = 0;
+ if (score < -thresh) *split = 0;
}
}
#undef FEATURES
@@ -3529,7 +3577,8 @@ static void ml_predict_var_rd_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
int get_rdmult_delta(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row, int mi_col,
int orig_rdmult) {
- TplDepFrame *tpl_frame = &cpi->tpl_stats[cpi->twopass.gf_group.index];
+ const int gf_group_index = cpi->twopass.gf_group.index;
+ TplDepFrame *tpl_frame = &cpi->tpl_stats[gf_group_index];
TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
int tpl_stride = tpl_frame->stride;
int64_t intra_cost = 0;
@@ -3544,9 +3593,9 @@ int get_rdmult_delta(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row, int mi_col,
if (tpl_frame->is_valid == 0) return orig_rdmult;
- if (cpi->common.show_frame) return orig_rdmult;
+ if (cpi->twopass.gf_group.layer_depth[gf_group_index] > 1) return orig_rdmult;
- if (cpi->twopass.gf_group.index >= MAX_LAG_BUFFERS) return orig_rdmult;
+ if (gf_group_index >= MAX_ARF_GOP_SIZE) return orig_rdmult;
for (row = mi_row; row < mi_row + mi_high; ++row) {
for (col = mi_col; col < mi_col + mi_wide; ++col) {
@@ -3759,14 +3808,10 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
pc_tree->partitioning = PARTITION_NONE;
if (cpi->sf.ml_var_partition_pruning) {
- int do_ml_var_partition_pruning =
+ const int do_ml_var_partition_pruning =
!frame_is_intra_only(cm) && partition_none_allowed && do_split &&
mi_row + num_8x8_blocks_high_lookup[bsize] <= cm->mi_rows &&
mi_col + num_8x8_blocks_wide_lookup[bsize] <= cm->mi_cols;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- do_ml_var_partition_pruning = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
if (do_ml_var_partition_pruning) {
ml_predict_var_rd_paritioning(cpi, x, bsize, mi_row, mi_col,
&partition_none_allowed, &do_split);
@@ -3814,13 +3859,9 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
}
if ((do_split || do_rect) && !x->e_mbd.lossless && ctx->skippable) {
- int use_ml_based_breakout =
+ const int use_ml_based_breakout =
cpi->sf.use_ml_partition_search_breakout &&
cm->base_qindex >= 100;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- use_ml_based_breakout = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
if (use_ml_based_breakout) {
if (ml_predict_breakout(cpi, bsize, x, &this_rdc)) {
do_split = 0;
@@ -4019,13 +4060,9 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
}
{
- int do_ml_rect_partition_pruning =
+ const int do_ml_rect_partition_pruning =
!frame_is_intra_only(cm) && !force_horz_split && !force_vert_split &&
(partition_horz_allowed || partition_vert_allowed) && bsize > BLOCK_8X8;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- do_ml_rect_partition_pruning = 0;
-#endif
if (do_ml_rect_partition_pruning) {
ml_prune_rect_partition(cpi, x, bsize, pc_tree, &partition_horz_allowed,
&partition_vert_allowed, best_rdc.rdcost, mi_row,
@@ -4505,15 +4542,9 @@ static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const NN_CONFIG *nn_config = NULL;
- float thresh_low = -0.2f;
- float thresh_high = 0.0f;
switch (bsize) {
- case BLOCK_64X64:
- nn_config = &vp9_var_part_nnconfig_64;
- thresh_low = -0.3f;
- thresh_high = -0.1f;
- break;
+ case BLOCK_64X64: nn_config = &vp9_var_part_nnconfig_64; break;
case BLOCK_32X32: nn_config = &vp9_var_part_nnconfig_32; break;
case BLOCK_16X16: nn_config = &vp9_var_part_nnconfig_16; break;
case BLOCK_8X8: break;
@@ -4525,6 +4556,7 @@ static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
vpx_clear_system_state();
{
+ const float thresh = cpi->oxcf.speed <= 5 ? 1.25f : 0.0f;
float features[FEATURES] = { 0.0f };
const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
int feature_idx = 0;
@@ -4565,8 +4597,8 @@ static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
assert(feature_idx == FEATURES);
nn_predict(features, nn_config, score);
- if (score[0] > thresh_high) return 3;
- if (score[0] < thresh_low) return 0;
+ if (score[0] > thresh) return PARTITION_SPLIT;
+ if (score[0] < -thresh) return PARTITION_NONE;
return -1;
}
}
@@ -4644,8 +4676,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
if (partition_none_allowed && do_split) {
const int ml_predicted_partition =
ml_predict_var_paritioning(cpi, x, bsize, mi_row, mi_col);
- if (ml_predicted_partition == 0) do_split = 0;
- if (ml_predicted_partition == 3) partition_none_allowed = 0;
+ if (ml_predicted_partition == PARTITION_NONE) do_split = 0;
+ if (ml_predicted_partition == PARTITION_SPLIT) partition_none_allowed = 0;
}
}
#endif // CONFIG_ML_VAR_PARTITION
@@ -5628,7 +5660,6 @@ static void encode_frame_internal(VP9_COMP *cpi) {
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
-
vp9_zero(*td->counts);
vp9_zero(cpi->td.rd_counts);
@@ -5693,7 +5724,7 @@ static void encode_frame_internal(VP9_COMP *cpi) {
if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
source_var_based_partition_search_method(cpi);
- } else if (gf_group_index && gf_group_index < MAX_LAG_BUFFERS &&
+ } else if (gf_group_index && gf_group_index < MAX_ARF_GOP_SIZE &&
cpi->sf.enable_tpl_model) {
TplDepFrame *tpl_frame = &cpi->tpl_stats[cpi->twopass.gf_group.index];
TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodemv.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodemv.h
index 8bbf857872d..2f1be4b233f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodemv.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encodemv.h
@@ -27,7 +27,7 @@ void vp9_encode_mv(VP9_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
unsigned int *const max_mv_magnitude);
void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context *mvctx, int usehp);
+ const nmv_context *ctx, int usehp);
void vp9_update_mv_count(ThreadData *td);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.c
index edb4cb288c8..33cfd9f75fe 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.c
@@ -52,6 +52,9 @@
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_firstpass.h"
#include "vp9/encoder/vp9_mbgraph.h"
+#if CONFIG_NON_GREEDY_MV
+#include "vp9/encoder/vp9_mcomp.h"
+#endif
#include "vp9/encoder/vp9_multi_thread.h"
#include "vp9/encoder/vp9_noise_estimate.h"
#include "vp9/encoder/vp9_picklpf.h"
@@ -2359,10 +2362,21 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
vp9_set_speed_features_framesize_dependent(cpi);
if (cpi->sf.enable_tpl_model) {
- for (frame = 0; frame < MAX_LAG_BUFFERS; ++frame) {
- int mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
- int mi_rows = mi_cols_aligned_to_sb(cm->mi_rows);
-
+ const int mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
+ const int mi_rows = mi_cols_aligned_to_sb(cm->mi_rows);
+#if CONFIG_NON_GREEDY_MV
+ CHECK_MEM_ERROR(
+ cm, cpi->feature_score_loc_arr,
+ vpx_calloc(mi_rows * mi_cols, sizeof(*cpi->feature_score_loc_arr)));
+ CHECK_MEM_ERROR(
+ cm, cpi->feature_score_loc_sort,
+ vpx_calloc(mi_rows * mi_cols, sizeof(*cpi->feature_score_loc_sort)));
+ CHECK_MEM_ERROR(
+ cm, cpi->feature_score_loc_heap,
+ vpx_calloc(mi_rows * mi_cols, sizeof(*cpi->feature_score_loc_heap)));
+#endif
+ // TODO(jingning): Reduce the actual memory use for tpl model build up.
+ for (frame = 0; frame < MAX_ARF_GOP_SIZE; ++frame) {
CHECK_MEM_ERROR(cm, cpi->tpl_stats[frame].tpl_stats_ptr,
vpx_calloc(mi_rows * mi_cols,
sizeof(*cpi->tpl_stats[frame].tpl_stats_ptr)));
@@ -2373,6 +2387,11 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
cpi->tpl_stats[frame].mi_rows = cm->mi_rows;
cpi->tpl_stats[frame].mi_cols = cm->mi_cols;
}
+
+ for (frame = 0; frame < REF_FRAMES; ++frame) {
+ cpi->enc_frame_buf[frame].mem_valid = 0;
+ cpi->enc_frame_buf[frame].released = 1;
+ }
}
// Allocate memory to store variances for a frame.
@@ -2449,6 +2468,17 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
vp9_loop_filter_init(cm);
+ // Set up the unit scaling factor used during motion search.
+#if CONFIG_VP9_HIGHBITDEPTH
+ vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
+ cm->width, cm->height,
+ cm->use_highbitdepth);
+#else
+ vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
+ cm->width, cm->height);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ cpi->td.mb.me_sf = &cpi->me_sf;
+
cm->error.setjmp = 0;
return cpi;
@@ -2561,7 +2591,12 @@ void vp9_remove_compressor(VP9_COMP *cpi) {
vp9_denoiser_free(&(cpi->denoiser));
#endif
- for (frame = 0; frame < MAX_LAG_BUFFERS; ++frame) {
+#if CONFIG_NON_GREEDY_MV
+ vpx_free(cpi->feature_score_loc_arr);
+ vpx_free(cpi->feature_score_loc_sort);
+ vpx_free(cpi->feature_score_loc_heap);
+#endif
+ for (frame = 0; frame < MAX_ARF_GOP_SIZE; ++frame) {
vpx_free(cpi->tpl_stats[frame].tpl_stats_ptr);
cpi->tpl_stats[frame].is_valid = 0;
}
@@ -3217,8 +3252,8 @@ void vp9_scale_references(VP9_COMP *cpi) {
if (cpi->oxcf.pass == 0 && !cpi->use_svc) {
// Check for release of scaled reference.
buf_idx = cpi->scaled_ref_idx[ref_frame - 1];
- buf = (buf_idx != INVALID_IDX) ? &pool->frame_bufs[buf_idx] : NULL;
- if (buf != NULL) {
+ if (buf_idx != INVALID_IDX) {
+ buf = &pool->frame_bufs[buf_idx];
--buf->ref_count;
cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
}
@@ -3249,22 +3284,21 @@ static void release_scaled_references(VP9_COMP *cpi) {
refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
const int idx = cpi->scaled_ref_idx[i - 1];
- RefCntBuffer *const buf =
- idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
- const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
- if (buf != NULL &&
- (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
- buf->buf.y_crop_height == ref->y_crop_height))) {
- --buf->ref_count;
- cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
+ if (idx != INVALID_IDX) {
+ RefCntBuffer *const buf = &cm->buffer_pool->frame_bufs[idx];
+ const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
+ if (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
+ buf->buf.y_crop_height == ref->y_crop_height)) {
+ --buf->ref_count;
+ cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
+ }
}
}
} else {
- for (i = 0; i < MAX_REF_FRAMES; ++i) {
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
const int idx = cpi->scaled_ref_idx[i];
- RefCntBuffer *const buf =
- idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
- if (buf != NULL) {
+ if (idx != INVALID_IDX) {
+ RefCntBuffer *const buf = &cm->buffer_pool->frame_bufs[idx];
--buf->ref_count;
cpi->scaled_ref_idx[i] = INVALID_IDX;
}
@@ -3457,6 +3491,11 @@ static void set_size_dependent_vars(VP9_COMP *cpi, int *q, int *bottom_index,
// Decide q and q bounds.
*q = vp9_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+ if (cpi->oxcf.rc_mode == VPX_CBR && cpi->rc.force_max_q) {
+ *q = cpi->rc.worst_quality;
+ cpi->rc.force_max_q = 0;
+ }
+
if (!frame_is_intra_only(cm)) {
vp9_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
}
@@ -3661,14 +3700,16 @@ static INLINE void set_raw_source_frame(VP9_COMP *cpi) {
static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
uint8_t *dest) {
VP9_COMMON *const cm = &cpi->common;
+ SVC *const svc = &cpi->svc;
int q = 0, bottom_index = 0, top_index = 0;
+ int no_drop_scene_change = 0;
const INTERP_FILTER filter_scaler =
(is_one_pass_cbr_svc(cpi))
- ? cpi->svc.downsample_filter_type[cpi->svc.spatial_layer_id]
+ ? svc->downsample_filter_type[svc->spatial_layer_id]
: EIGHTTAP;
const int phase_scaler =
(is_one_pass_cbr_svc(cpi))
- ? cpi->svc.downsample_filter_phase[cpi->svc.spatial_layer_id]
+ ? svc->downsample_filter_phase[svc->spatial_layer_id]
: 0;
if (cm->show_existing_frame) {
@@ -3676,6 +3717,8 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
return 1;
}
+ svc->time_stamp_prev[svc->spatial_layer_id] = svc->time_stamp_superframe;
+
// Flag to check if its valid to compute the source sad (used for
// scene detection and for superblock content state in CBR mode).
// The flag may get reset below based on SVC or resizing state.
@@ -3688,25 +3731,25 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
if (is_one_pass_cbr_svc(cpi) &&
cpi->un_scaled_source->y_width == cm->width << 2 &&
cpi->un_scaled_source->y_height == cm->height << 2 &&
- cpi->svc.scaled_temp.y_width == cm->width << 1 &&
- cpi->svc.scaled_temp.y_height == cm->height << 1) {
+ svc->scaled_temp.y_width == cm->width << 1 &&
+ svc->scaled_temp.y_height == cm->height << 1) {
// For svc, if it is a 1/4x1/4 downscaling, do a two-stage scaling to take
// advantage of the 1:2 optimized scaler. In the process, the 1/2x1/2
// result will be saved in scaled_temp and might be used later.
- const INTERP_FILTER filter_scaler2 = cpi->svc.downsample_filter_type[1];
- const int phase_scaler2 = cpi->svc.downsample_filter_phase[1];
+ const INTERP_FILTER filter_scaler2 = svc->downsample_filter_type[1];
+ const int phase_scaler2 = svc->downsample_filter_phase[1];
cpi->Source = vp9_svc_twostage_scale(
- cm, cpi->un_scaled_source, &cpi->scaled_source, &cpi->svc.scaled_temp,
+ cm, cpi->un_scaled_source, &cpi->scaled_source, &svc->scaled_temp,
filter_scaler, phase_scaler, filter_scaler2, phase_scaler2);
- cpi->svc.scaled_one_half = 1;
+ svc->scaled_one_half = 1;
} else if (is_one_pass_cbr_svc(cpi) &&
cpi->un_scaled_source->y_width == cm->width << 1 &&
cpi->un_scaled_source->y_height == cm->height << 1 &&
- cpi->svc.scaled_one_half) {
+ svc->scaled_one_half) {
// If the spatial layer is 1/2x1/2 and the scaling is already done in the
// two-stage scaling, use the result directly.
- cpi->Source = &cpi->svc.scaled_temp;
- cpi->svc.scaled_one_half = 0;
+ cpi->Source = &svc->scaled_temp;
+ svc->scaled_one_half = 0;
} else {
cpi->Source = vp9_scale_if_required(
cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0),
@@ -3714,8 +3757,8 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
}
#ifdef OUTPUT_YUV_SVC_SRC
// Write out at most 3 spatial layers.
- if (is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id < 3) {
- vpx_write_yuv_frame(yuv_svc_src[cpi->svc.spatial_layer_id], cpi->Source);
+ if (is_one_pass_cbr_svc(cpi) && svc->spatial_layer_id < 3) {
+ vpx_write_yuv_frame(yuv_svc_src[svc->spatial_layer_id], cpi->Source);
}
#endif
// Unfiltered raw source used in metrics calculation if the source
@@ -3735,9 +3778,9 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
}
if ((cpi->use_svc &&
- (cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1 ||
- cpi->svc.temporal_layer_id < cpi->svc.number_temporal_layers - 1 ||
- cpi->svc.current_superframe < 1)) ||
+ (svc->spatial_layer_id < svc->number_spatial_layers - 1 ||
+ svc->temporal_layer_id < svc->number_temporal_layers - 1 ||
+ svc->current_superframe < 1)) ||
cpi->resize_pending || cpi->resize_state || cpi->external_resize ||
cpi->resize_state != ORIG) {
cpi->compute_source_sad_onepass = 0;
@@ -3786,18 +3829,33 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
(cpi->oxcf.speed >= 5 && cpi->oxcf.speed < 8)))
vp9_scene_detection_onepass(cpi);
- if (cpi->svc.spatial_layer_id == 0)
- cpi->svc.high_source_sad_superframe = cpi->rc.high_source_sad;
+ if (svc->spatial_layer_id == svc->first_spatial_layer_to_encode) {
+ svc->high_source_sad_superframe = cpi->rc.high_source_sad;
+ // On scene change reset temporal layer pattern to TL0.
+ // TODO(marpan/jianj): Fix this to handle case where base
+ // spatial layers are skipped, in which case we should insert
+ // and reset to spatial layer 0 on scene change.
+ // Only do this reset for bypass/flexible mode.
+ if (svc->high_source_sad_superframe && svc->temporal_layer_id > 0 &&
+ svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_BYPASS) {
+ // rc->high_source_sad will get reset so copy it to restore it.
+ int tmp_high_source_sad = cpi->rc.high_source_sad;
+ vp9_svc_reset_temporal_layers(cpi, cm->frame_type == KEY_FRAME);
+ cpi->rc.high_source_sad = tmp_high_source_sad;
+ }
+ }
// For 1 pass CBR, check if we are dropping this frame.
// Never drop on key frame, if base layer is key for svc,
// on scene change, or if superframe has layer sync.
+ if ((cpi->rc.high_source_sad || svc->high_source_sad_superframe) &&
+ !(cpi->rc.use_post_encode_drop && svc->last_layer_dropped[0]))
+ no_drop_scene_change = 1;
if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
- !frame_is_intra_only(cm) && !cpi->rc.high_source_sad &&
- !cpi->svc.high_source_sad_superframe &&
- !cpi->svc.superframe_has_layer_sync &&
+ !frame_is_intra_only(cm) && !no_drop_scene_change &&
+ !svc->superframe_has_layer_sync &&
(!cpi->use_svc ||
- !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)) {
+ !svc->layer_context[svc->temporal_layer_id].is_key_frame)) {
if (vp9_rc_drop_frame(cpi)) return 0;
}
@@ -3805,7 +3863,7 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
// when svc->force_zero_mode_spatial_ref = 1. Under those conditions we can
// avoid this frame-level upsampling (for non intra_only frames).
if (frame_is_intra_only(cm) == 0 &&
- !(is_one_pass_cbr_svc(cpi) && cpi->svc.force_zero_mode_spatial_ref)) {
+ !(is_one_pass_cbr_svc(cpi) && svc->force_zero_mode_spatial_ref)) {
vp9_scale_references(cpi);
}
@@ -3815,12 +3873,12 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
if (cpi->sf.copy_partition_flag) alloc_copy_partition_data(cpi);
if (cpi->sf.svc_use_lowres_part &&
- cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2) {
- if (cpi->svc.prev_partition_svc == NULL) {
+ svc->spatial_layer_id == svc->number_spatial_layers - 2) {
+ if (svc->prev_partition_svc == NULL) {
CHECK_MEM_ERROR(
- cm, cpi->svc.prev_partition_svc,
+ cm, svc->prev_partition_svc,
(BLOCK_SIZE *)vpx_calloc(cm->mi_stride * cm->mi_rows,
- sizeof(*cpi->svc.prev_partition_svc)));
+ sizeof(*svc->prev_partition_svc)));
}
}
@@ -3832,6 +3890,12 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
cpi->use_skin_detection = 1;
}
+ // Enable post encode frame dropping for CBR on non key frame, when
+ // ext_use_post_encode_drop is specified by user.
+ cpi->rc.use_post_encode_drop = cpi->rc.ext_use_post_encode_drop &&
+ cpi->oxcf.rc_mode == VPX_CBR &&
+ cm->frame_type != KEY_FRAME;
+
vp9_set_quantizer(cm, q);
vp9_set_variance_partition_thresholds(cpi, q, 0);
@@ -3842,16 +3906,24 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
if (cpi->use_svc) {
// On non-zero spatial layer, check for disabling inter-layer
// prediction.
- if (cpi->svc.spatial_layer_id > 0) vp9_svc_constrain_inter_layer_pred(cpi);
+ if (svc->spatial_layer_id > 0) vp9_svc_constrain_inter_layer_pred(cpi);
vp9_svc_assert_constraints_pattern(cpi);
}
+ if (cpi->rc.last_post_encode_dropped_scene_change) {
+ cpi->rc.high_source_sad = 1;
+ svc->high_source_sad_superframe = 1;
+ // For now disable use_source_sad since Last_Source will not be the previous
+ // encoded but the dropped one.
+ cpi->sf.use_source_sad = 0;
+ cpi->rc.last_post_encode_dropped_scene_change = 0;
+ }
// Check if this high_source_sad (scene/slide change) frame should be
// encoded at high/max QP, and if so, set the q and adjust some rate
// control parameters.
if (cpi->sf.overshoot_detection_cbr_rt == FAST_DETECTION_MAXQ &&
(cpi->rc.high_source_sad ||
- (cpi->use_svc && cpi->svc.high_source_sad_superframe))) {
+ (cpi->use_svc && svc->high_source_sad_superframe))) {
if (vp9_encodedframe_overshoot(cpi, -1, &q)) {
vp9_set_quantizer(cm, q);
vp9_set_variance_partition_thresholds(cpi, q, 0);
@@ -3886,7 +3958,7 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
// For SVC: all spatial layers are checked for re-encoding.
if (cpi->sf.overshoot_detection_cbr_rt == RE_ENCODE_MAXQ &&
(cpi->rc.high_source_sad ||
- (cpi->use_svc && cpi->svc.high_source_sad_superframe))) {
+ (cpi->use_svc && svc->high_source_sad_superframe))) {
int frame_size = 0;
// Get an estimate of the encoded frame size.
save_coding_context(cpi);
@@ -3960,9 +4032,9 @@ static void encode_with_recode_loop(VP9_COMP *cpi, size_t *size,
set_size_independent_vars(cpi);
- enable_acl = cpi->sf.allow_acl
- ? (cm->frame_type == KEY_FRAME) || (cm->show_frame == 0)
- : 0;
+ enable_acl = cpi->sf.allow_acl ? (cm->frame_type == KEY_FRAME) ||
+ (cpi->twopass.gf_group.index == 1)
+ : 0;
do {
vpx_clear_system_state();
@@ -4622,8 +4694,13 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
TX_SIZE t;
// SVC: skip encoding of enhancement layer if the layer target bandwidth = 0.
+ // If in constrained layer drop mode (svc.framedrop_mode != LAYER_DROP) and
+ // base spatial layer was dropped, no need to set svc.skip_enhancement_layer,
+ // as whole superframe will be dropped.
if (cpi->use_svc && cpi->svc.spatial_layer_id > 0 &&
- cpi->oxcf.target_bandwidth == 0) {
+ cpi->oxcf.target_bandwidth == 0 &&
+ !(cpi->svc.framedrop_mode != LAYER_DROP &&
+ cpi->svc.drop_spatial_layer[0])) {
cpi->svc.skip_enhancement_layer = 1;
vp9_rc_postencode_update_drop_frame(cpi);
cpi->ext_refresh_frame_flags_pending = 0;
@@ -4720,19 +4797,6 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
cm->ref_frame_map[cpi->alt_fb_idx]);
}
- cpi->last_frame_dropped = 0;
- cpi->svc.last_layer_dropped[cpi->svc.spatial_layer_id] = 0;
- // Keep track of the frame buffer index updated/refreshed for the
- // current encoded TL0 superframe.
- if (cpi->svc.temporal_layer_id == 0) {
- if (cpi->refresh_last_frame)
- cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->lst_fb_idx;
- else if (cpi->refresh_golden_frame)
- cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->gld_fb_idx;
- else if (cpi->refresh_alt_ref_frame)
- cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->alt_fb_idx;
- }
-
// Disable segmentation if it decrease rate/distortion ratio
if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ)
vp9_try_disable_lookahead_aq(cpi, size, dest);
@@ -4779,9 +4843,34 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
// Pick the loop filter level for the frame.
loopfilter_frame(cpi, cm);
+ if (cpi->rc.use_post_encode_drop) save_coding_context(cpi);
+
// build the bitstream
vp9_pack_bitstream(cpi, dest, size);
+ if (cpi->rc.use_post_encode_drop && cm->base_qindex < cpi->rc.worst_quality &&
+ cpi->svc.spatial_layer_id == 0 &&
+ post_encode_drop_screen_content(cpi, size)) {
+ restore_coding_context(cpi);
+ return;
+ }
+
+ cpi->last_frame_dropped = 0;
+ cpi->svc.last_layer_dropped[cpi->svc.spatial_layer_id] = 0;
+ if (cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1)
+ cpi->svc.num_encoded_top_layer++;
+
+ // Keep track of the frame buffer index updated/refreshed for the
+ // current encoded TL0 superframe.
+ if (cpi->svc.temporal_layer_id == 0) {
+ if (cpi->refresh_last_frame)
+ cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->lst_fb_idx;
+ else if (cpi->refresh_golden_frame)
+ cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->gld_fb_idx;
+ else if (cpi->refresh_alt_ref_frame)
+ cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->alt_fb_idx;
+ }
+
if (cm->seg.update_map) update_reference_segmentation_map(cpi);
if (frame_is_intra_only(cm) == 0) {
@@ -4910,6 +4999,8 @@ static void init_ref_frame_bufs(VP9_COMMON *cm) {
cm->new_fb_idx = INVALID_IDX;
for (i = 0; i < REF_FRAMES; ++i) {
cm->ref_frame_map[i] = INVALID_IDX;
+ }
+ for (i = 0; i < FRAME_BUFFERS; ++i) {
pool->frame_bufs[i].ref_count = 0;
}
}
@@ -5335,6 +5426,7 @@ static void update_level_info(VP9_COMP *cpi, size_t *size, int arf_src_index) {
typedef struct GF_PICTURE {
YV12_BUFFER_CONFIG *frame;
int ref_frame[3];
+ FRAME_UPDATE_TYPE update_type;
} GF_PICTURE;
void init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
@@ -5345,16 +5437,22 @@ void init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
int gld_index = -1;
int alt_index = -1;
int lst_index = -1;
+ int arf_index_stack[MAX_ARF_LAYERS];
+ int arf_stack_size = 0;
int extend_frame_count = 0;
int pframe_qindex = cpi->tpl_stats[2].base_qindex;
+ int frame_gop_offset = 0;
RefCntBuffer *frame_bufs = cm->buffer_pool->frame_bufs;
- int recon_frame_index[REFS_PER_FRAME + 1] = { -1, -1, -1, -1 };
+ int8_t recon_frame_index[REFS_PER_FRAME + MAX_ARF_LAYERS];
+
+ memset(recon_frame_index, -1, sizeof(recon_frame_index));
+ stack_init(arf_index_stack, MAX_ARF_LAYERS);
// TODO(jingning): To be used later for gf frame type parsing.
(void)gf_group;
- for (i = 0; i < FRAME_BUFFERS && frame_idx < REFS_PER_FRAME + 1; ++i) {
+ for (i = 0; i < FRAME_BUFFERS; ++i) {
if (frame_bufs[i].ref_count == 0) {
alloc_frame_mvs(cm, i);
if (vpx_realloc_frame_buffer(&frame_bufs[i].buf, cm->width, cm->height,
@@ -5369,6 +5467,8 @@ void init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
recon_frame_index[frame_idx] = i;
++frame_idx;
+
+ if (frame_idx >= REFS_PER_FRAME + cpi->oxcf.enable_auto_arf) break;
}
}
@@ -5382,21 +5482,24 @@ void init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
// Initialize Golden reference frame.
gf_picture[0].frame = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
for (i = 0; i < 3; ++i) gf_picture[0].ref_frame[i] = -1;
+ gf_picture[0].update_type = gf_group->update_type[0];
gld_index = 0;
++*tpl_group_frames;
- // Initialize ARF frame
+ // Initialize base layer ARF frame
gf_picture[1].frame = cpi->Source;
gf_picture[1].ref_frame[0] = gld_index;
gf_picture[1].ref_frame[1] = lst_index;
gf_picture[1].ref_frame[2] = alt_index;
+ gf_picture[1].update_type = gf_group->update_type[1];
alt_index = 1;
++*tpl_group_frames;
// Initialize P frames
- for (frame_idx = 2; frame_idx < MAX_LAG_BUFFERS; ++frame_idx) {
- struct lookahead_entry *buf =
- vp9_lookahead_peek(cpi->lookahead, frame_idx - 2);
+ for (frame_idx = 2; frame_idx < MAX_ARF_GOP_SIZE; ++frame_idx) {
+ struct lookahead_entry *buf;
+ frame_gop_offset = gf_group->frame_gop_index[frame_idx];
+ buf = vp9_lookahead_peek(cpi->lookahead, frame_gop_offset - 1);
if (buf == NULL) break;
@@ -5404,25 +5507,44 @@ void init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
gf_picture[frame_idx].ref_frame[0] = gld_index;
gf_picture[frame_idx].ref_frame[1] = lst_index;
gf_picture[frame_idx].ref_frame[2] = alt_index;
+ gf_picture[frame_idx].update_type = gf_group->update_type[frame_idx];
+
+ switch (gf_group->update_type[frame_idx]) {
+ case ARF_UPDATE:
+ stack_push(arf_index_stack, alt_index, arf_stack_size);
+ ++arf_stack_size;
+ alt_index = frame_idx;
+ break;
+ case LF_UPDATE: lst_index = frame_idx; break;
+ case OVERLAY_UPDATE:
+ gld_index = frame_idx;
+ alt_index = stack_pop(arf_index_stack, arf_stack_size);
+ --arf_stack_size;
+ break;
+ case USE_BUF_FRAME:
+ lst_index = alt_index;
+ alt_index = stack_pop(arf_index_stack, arf_stack_size);
+ --arf_stack_size;
+ break;
+ default: break;
+ }
++*tpl_group_frames;
- lst_index = frame_idx;
// The length of group of pictures is baseline_gf_interval, plus the
// beginning golden frame from last GOP, plus the last overlay frame in
// the same GOP.
- if (frame_idx == cpi->rc.baseline_gf_interval + 1) break;
+ if (frame_idx == gf_group->gf_group_size) break;
}
- gld_index = frame_idx;
- lst_index = VPXMAX(0, frame_idx - 1);
alt_index = -1;
++frame_idx;
+ ++frame_gop_offset;
// Extend two frames outside the current gf group.
for (; frame_idx < MAX_LAG_BUFFERS && extend_frame_count < 2; ++frame_idx) {
struct lookahead_entry *buf =
- vp9_lookahead_peek(cpi->lookahead, frame_idx - 2);
+ vp9_lookahead_peek(cpi->lookahead, frame_gop_offset - 1);
if (buf == NULL) break;
@@ -5432,16 +5554,25 @@ void init_gop_frames(VP9_COMP *cpi, GF_PICTURE *gf_picture,
gf_picture[frame_idx].ref_frame[0] = gld_index;
gf_picture[frame_idx].ref_frame[1] = lst_index;
gf_picture[frame_idx].ref_frame[2] = alt_index;
+ gf_picture[frame_idx].update_type = LF_UPDATE;
lst_index = frame_idx;
++*tpl_group_frames;
++extend_frame_count;
+ ++frame_gop_offset;
}
}
void init_tpl_stats(VP9_COMP *cpi) {
int frame_idx;
- for (frame_idx = 0; frame_idx < MAX_LAG_BUFFERS; ++frame_idx) {
+ for (frame_idx = 0; frame_idx < MAX_ARF_GOP_SIZE; ++frame_idx) {
TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
+#if CONFIG_NON_GREEDY_MV
+ int rf_idx;
+ for (rf_idx = 0; rf_idx < 3; ++rf_idx) {
+ tpl_frame->mv_dist_sum[rf_idx] = 0;
+ tpl_frame->mv_cost_sum[rf_idx] = 0;
+ }
+#endif
memset(tpl_frame->tpl_stats_ptr, 0,
tpl_frame->height * tpl_frame->width *
sizeof(*tpl_frame->tpl_stats_ptr));
@@ -5451,20 +5582,22 @@ void init_tpl_stats(VP9_COMP *cpi) {
#if CONFIG_NON_GREEDY_MV
static void prepare_nb_full_mvs(const TplDepFrame *tpl_frame, int mi_row,
- int mi_col, int_mv *nb_full_mvs) {
+ int mi_col, int rf_idx, BLOCK_SIZE bsize,
+ int_mv *nb_full_mvs) {
+ const int mi_unit = num_8x8_blocks_wide_lookup[bsize];
const int dirs[NB_MVS_NUM][2] = { { -1, 0 }, { 0, -1 }, { 1, 0 }, { 0, 1 } };
int i;
for (i = 0; i < NB_MVS_NUM; ++i) {
- int r = dirs[i][0];
- int c = dirs[i][1];
+ int r = dirs[i][0] * mi_unit;
+ int c = dirs[i][1] * mi_unit;
if (mi_row + r >= 0 && mi_row + r < tpl_frame->mi_rows && mi_col + c >= 0 &&
mi_col + c < tpl_frame->mi_cols) {
const TplDepStats *tpl_ptr =
&tpl_frame
->tpl_stats_ptr[(mi_row + r) * tpl_frame->stride + mi_col + c];
- if (tpl_ptr->ready) {
- nb_full_mvs[i].as_mv.row = tpl_ptr->mv.as_mv.row >> 3;
- nb_full_mvs[i].as_mv.col = tpl_ptr->mv.as_mv.col >> 3;
+ if (tpl_ptr->ready[rf_idx]) {
+ nb_full_mvs[i].as_mv.row = tpl_ptr->mv_arr[rf_idx].as_mv.row >> 3;
+ nb_full_mvs[i].as_mv.col = tpl_ptr->mv_arr[rf_idx].as_mv.col >> 3;
} else {
nb_full_mvs[i].as_int = INVALID_MV;
}
@@ -5503,7 +5636,7 @@ uint32_t motion_compensated_prediction(VP9_COMP *cpi, ThreadData *td,
#if CONFIG_NON_GREEDY_MV
// lambda is used to adjust the importance of motion vector consitency.
// TODO(angiebird): Figure out lambda's proper value.
- double lambda = 10000;
+ double lambda = cpi->tpl_stats[frame_idx].lambda;
int_mv nb_full_mvs[NB_MVS_NUM];
#endif
@@ -5527,7 +5660,8 @@ uint32_t motion_compensated_prediction(VP9_COMP *cpi, ThreadData *td,
#if CONFIG_NON_GREEDY_MV
(void)search_method;
(void)sadpb;
- prepare_nb_full_mvs(&cpi->tpl_stats[frame_idx], mi_row, mi_col, nb_full_mvs);
+ prepare_nb_full_mvs(&cpi->tpl_stats[frame_idx], mi_row, mi_col, rf_idx, bsize,
+ nb_full_mvs);
vp9_full_pixel_diamond_new(cpi, x, &best_ref_mv1_full, step_param, lambda,
MAX_MVSEARCH_STEPS - 1 - step_param, 1,
&cpi->fn_ptr[bsize], nb_full_mvs, tpl_stats,
@@ -5544,12 +5678,13 @@ uint32_t motion_compensated_prediction(VP9_COMP *cpi, ThreadData *td,
/* restore UMV window */
x->mv_limits = tmp_mv_limits;
+ // TODO(yunqing): may use higher tap interp filter than 2 taps.
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme = cpi->find_fractional_mv_step(
x, mv, &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], 0, mv_sf->subpel_search_level,
- cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
- 0);
+ cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, 0,
+ USE_2_TAPS);
return bestsme;
}
@@ -5594,42 +5729,21 @@ int round_floor(int ref_pos, int bsize_pix) {
}
void tpl_model_store(TplDepStats *tpl_stats, int mi_row, int mi_col,
- BLOCK_SIZE bsize, int stride,
- const TplDepStats *src_stats) {
+ BLOCK_SIZE bsize, int stride) {
const int mi_height = num_8x8_blocks_high_lookup[bsize];
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const TplDepStats *src_stats = &tpl_stats[mi_row * stride + mi_col];
int idx, idy;
- int64_t intra_cost = src_stats->intra_cost / (mi_height * mi_width);
- int64_t inter_cost = src_stats->inter_cost / (mi_height * mi_width);
-
- TplDepStats *tpl_ptr;
-
- intra_cost = VPXMAX(1, intra_cost);
- inter_cost = VPXMAX(1, inter_cost);
-
for (idy = 0; idy < mi_height; ++idy) {
- tpl_ptr = &tpl_stats[(mi_row + idy) * stride + mi_col];
for (idx = 0; idx < mi_width; ++idx) {
-#if CONFIG_NON_GREEDY_MV
- int rf_idx;
- for (rf_idx = 0; rf_idx < 3; ++rf_idx) {
- tpl_ptr->mv_dist[rf_idx] = src_stats->mv_dist[rf_idx];
- tpl_ptr->mv_cost[rf_idx] = src_stats->mv_cost[rf_idx];
- tpl_ptr->inter_cost_arr[rf_idx] = src_stats->inter_cost;
- tpl_ptr->recon_error_arr[rf_idx] = src_stats->recon_error_arr[rf_idx];
- tpl_ptr->sse_arr[rf_idx] = src_stats->sse_arr[rf_idx];
- tpl_ptr->mv_arr[rf_idx].as_int = src_stats->mv_arr[rf_idx].as_int;
- }
- tpl_ptr->feature_score = src_stats->feature_score;
- tpl_ptr->ready = 1;
-#endif
- tpl_ptr->intra_cost = intra_cost;
- tpl_ptr->inter_cost = inter_cost;
+ TplDepStats *tpl_ptr = &tpl_stats[(mi_row + idy) * stride + mi_col + idx];
+ const int64_t mc_flow = tpl_ptr->mc_flow;
+ const int64_t mc_ref_cost = tpl_ptr->mc_ref_cost;
+ *tpl_ptr = *src_stats;
+ tpl_ptr->mc_flow = mc_flow;
+ tpl_ptr->mc_ref_cost = mc_ref_cost;
tpl_ptr->mc_dep_cost = tpl_ptr->intra_cost + tpl_ptr->mc_flow;
- tpl_ptr->ref_frame_index = src_stats->ref_frame_index;
- tpl_ptr->mv.as_int = src_stats->mv.as_int;
- ++tpl_ptr;
}
}
}
@@ -5717,9 +5831,21 @@ void get_quantize_error(MACROBLOCK *x, int plane, tran_low_t *coeff,
int pix_num = 1 << num_pels_log2_lookup[txsize_to_bsize[tx_size]];
const int shift = tx_size == TX_32X32 ? 0 : 2;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ vp9_highbd_quantize_fp_32x32(coeff, pix_num, x->skip_block, p->round_fp,
+ p->quant_fp, qcoeff, dqcoeff, pd->dequant,
+ &eob, scan_order->scan, scan_order->iscan);
+ } else {
+ vp9_quantize_fp_32x32(coeff, pix_num, x->skip_block, p->round_fp,
+ p->quant_fp, qcoeff, dqcoeff, pd->dequant, &eob,
+ scan_order->scan, scan_order->iscan);
+ }
+#else
vp9_quantize_fp_32x32(coeff, pix_num, x->skip_block, p->round_fp, p->quant_fp,
qcoeff, dqcoeff, pd->dequant, &eob, scan_order->scan,
scan_order->iscan);
+#endif // CONFIG_VP9_HIGHBITDEPTH
*recon_error = vp9_block_error(coeff, dqcoeff, pix_num, sse) >> shift;
*recon_error = VPXMAX(*recon_error, 1);
@@ -5728,6 +5854,19 @@ void get_quantize_error(MACROBLOCK *x, int plane, tran_low_t *coeff,
*sse = VPXMAX(*sse, 1);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
+ TX_SIZE tx_size) {
+ // TODO(sdeng): Implement SIMD based high bit-depth Hadamard transforms.
+ switch (tx_size) {
+ case TX_8X8: vpx_highbd_hadamard_8x8(src_diff, bw, coeff); break;
+ case TX_16X16: vpx_highbd_hadamard_16x16(src_diff, bw, coeff); break;
+ case TX_32X32: vpx_highbd_hadamard_32x32(src_diff, bw, coeff); break;
+ default: assert(0);
+ }
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
void wht_fwd_txfm(int16_t *src_diff, int bw, tran_low_t *coeff,
TX_SIZE tx_size) {
switch (tx_size) {
@@ -5763,14 +5902,23 @@ double get_feature_score(uint8_t *buf, ptrdiff_t stride, int rows, int cols) {
}
#endif
+static void set_mv_limits(const VP9_COMMON *cm, MACROBLOCK *x, int mi_row,
+ int mi_col) {
+ x->mv_limits.row_min = -((mi_row * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND));
+ x->mv_limits.row_max =
+ (cm->mi_rows - 1 - mi_row) * MI_SIZE + (17 - 2 * VP9_INTERP_EXTEND);
+ x->mv_limits.col_min = -((mi_col * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND));
+ x->mv_limits.col_max =
+ ((cm->mi_cols - 1 - mi_col) * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND);
+}
+
void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
struct scale_factors *sf, GF_PICTURE *gf_picture,
- int frame_idx, int16_t *src_diff, tran_low_t *coeff,
- tran_low_t *qcoeff, tran_low_t *dqcoeff, int mi_row,
- int mi_col, BLOCK_SIZE bsize, TX_SIZE tx_size,
+ int frame_idx, TplDepFrame *tpl_frame, int16_t *src_diff,
+ tran_low_t *coeff, tran_low_t *qcoeff, tran_low_t *dqcoeff,
+ int mi_row, int mi_col, BLOCK_SIZE bsize, TX_SIZE tx_size,
YV12_BUFFER_CONFIG *ref_frame[], uint8_t *predictor,
- int64_t *recon_error, int64_t *sse,
- TplDepStats *tpl_stats) {
+ int64_t *recon_error, int64_t *sse) {
VP9_COMMON *cm = &cpi->common;
ThreadData *td = &cpi->td;
@@ -5789,8 +5937,10 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
PREDICTION_MODE mode;
int mb_y_offset = mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
MODE_INFO mi_above, mi_left;
-
- memset(tpl_stats, 0, sizeof(*tpl_stats));
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ TplDepStats *tpl_stats =
+ &tpl_frame->tpl_stats_ptr[mi_row * tpl_frame->stride + mi_col];
xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
xd->mb_to_bottom_edge = ((cm->mi_rows - 1 - mi_row) * MI_SIZE) * 8;
@@ -5816,11 +5966,24 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
vp9_predict_intra_block(xd, b_width_log2_lookup[bsize], tx_size, mode, src,
src_stride, dst, dst_stride, 0, 0, 0);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ vpx_highbd_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst,
+ dst_stride, xd->bd);
+ highbd_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
+ // TODO(sdeng): Implement SIMD based high bit-depth satd.
+ intra_cost = vpx_satd_c(coeff, pix_num);
+ } else {
+ vpx_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst,
+ dst_stride);
+ wht_fwd_txfm(src_diff, bw, coeff, tx_size);
+ intra_cost = vpx_satd(coeff, pix_num);
+ }
+#else
vpx_subtract_block(bh, bw, src_diff, bw, src, src_stride, dst, dst_stride);
-
wht_fwd_txfm(src_diff, bw, coeff, tx_size);
-
intra_cost = vpx_satd(coeff, pix_num);
+#endif // CONFIG_VP9_HIGHBITDEPTH
if (intra_cost < best_intra_cost) best_intra_cost = intra_cost;
}
@@ -5828,31 +5991,14 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
// Motion compensated prediction
best_mv.as_int = 0;
- (void)mb_y_offset;
- // Motion estimation column boundary
- x->mv_limits.col_min = -((mi_col * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND));
- x->mv_limits.col_max =
- ((cm->mi_cols - 1 - mi_col) * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND);
-
-#if CONFIG_NON_GREEDY_MV
- tpl_stats->feature_score = get_feature_score(
- xd->cur_buf->y_buffer + mb_y_offset, xd->cur_buf->y_stride, bw, bh);
-#endif
+ set_mv_limits(cm, x, mi_row, mi_col);
for (rf_idx = 0; rf_idx < 3; ++rf_idx) {
int_mv mv;
- if (ref_frame[rf_idx] == NULL) {
-#if CONFIG_NON_GREEDY_MV
- tpl_stats->inter_cost_arr[rf_idx] = -1;
-#endif
- continue;
- }
+ if (ref_frame[rf_idx] == NULL) continue;
#if CONFIG_NON_GREEDY_MV
- motion_compensated_prediction(
- cpi, td, frame_idx, xd->cur_buf->y_buffer + mb_y_offset,
- ref_frame[rf_idx]->y_buffer + mb_y_offset, xd->cur_buf->y_stride, bsize,
- mi_row, mi_col, tpl_stats, rf_idx);
+ (void)td;
mv.as_int = tpl_stats->mv_arr[rf_idx].as_int;
#else
motion_compensated_prediction(
@@ -5861,8 +6007,6 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
mi_row, mi_col, &mv.as_mv);
#endif
- // TODO(jingning): Not yet support high bit-depth in the next three
- // steps.
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_highbd_build_inter_predictor(
@@ -5873,6 +6017,8 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
vpx_highbd_subtract_block(
bh, bw, src_diff, bw, xd->cur_buf->y_buffer + mb_y_offset,
xd->cur_buf->y_stride, &predictor[0], bw, xd->bd);
+ highbd_wht_fwd_txfm(src_diff, bw, coeff, tx_size);
+ inter_cost = vpx_satd_c(coeff, pix_num);
} else {
vp9_build_inter_predictor(
ref_frame[rf_idx]->y_buffer + mb_y_offset,
@@ -5881,6 +6027,8 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
vpx_subtract_block(bh, bw, src_diff, bw,
xd->cur_buf->y_buffer + mb_y_offset,
xd->cur_buf->y_stride, &predictor[0], bw);
+ wht_fwd_txfm(src_diff, bw, coeff, tx_size);
+ inter_cost = vpx_satd(coeff, pix_num);
}
#else
vp9_build_inter_predictor(ref_frame[rf_idx]->y_buffer + mb_y_offset,
@@ -5890,10 +6038,9 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
vpx_subtract_block(bh, bw, src_diff, bw,
xd->cur_buf->y_buffer + mb_y_offset,
xd->cur_buf->y_stride, &predictor[0], bw);
-#endif
wht_fwd_txfm(src_diff, bw, coeff, tx_size);
-
inter_cost = vpx_satd(coeff, pix_num);
+#endif
#if CONFIG_NON_GREEDY_MV
tpl_stats->inter_cost_arr[rf_idx] = inter_cost;
@@ -5917,13 +6064,136 @@ void mode_estimation(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
}
best_intra_cost = VPXMAX(best_intra_cost, 1);
best_inter_cost = VPXMIN(best_intra_cost, best_inter_cost);
- tpl_stats->inter_cost = best_inter_cost << TPL_DEP_COST_SCALE_LOG2;
- tpl_stats->intra_cost = best_intra_cost << TPL_DEP_COST_SCALE_LOG2;
- tpl_stats->mc_dep_cost = tpl_stats->intra_cost + tpl_stats->mc_flow;
+ tpl_stats->inter_cost = VPXMAX(
+ 1, (best_inter_cost << TPL_DEP_COST_SCALE_LOG2) / (mi_height * mi_width));
+ tpl_stats->intra_cost = VPXMAX(
+ 1, (best_intra_cost << TPL_DEP_COST_SCALE_LOG2) / (mi_height * mi_width));
tpl_stats->ref_frame_index = gf_picture[frame_idx].ref_frame[best_rf_idx];
tpl_stats->mv.as_int = best_mv.as_int;
}
+#if CONFIG_NON_GREEDY_MV
+static int compare_feature_score(const void *a, const void *b) {
+ const FEATURE_SCORE_LOC *aa = *(FEATURE_SCORE_LOC *const *)a;
+ const FEATURE_SCORE_LOC *bb = *(FEATURE_SCORE_LOC *const *)b;
+ if (aa->feature_score < bb->feature_score) {
+ return 1;
+ } else if (aa->feature_score > bb->feature_score) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void do_motion_search(VP9_COMP *cpi, ThreadData *td, int frame_idx,
+ YV12_BUFFER_CONFIG **ref_frame, BLOCK_SIZE bsize,
+ int mi_row, int mi_col) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCK *x = &td->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+ TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
+ TplDepStats *tpl_stats =
+ &tpl_frame->tpl_stats_ptr[mi_row * tpl_frame->stride + mi_col];
+ const int mb_y_offset =
+ mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
+ int rf_idx;
+
+ set_mv_limits(cm, x, mi_row, mi_col);
+
+ for (rf_idx = 0; rf_idx < 3; ++rf_idx) {
+ if (ref_frame[rf_idx] == NULL) {
+ tpl_stats->ready[rf_idx] = 0;
+ continue;
+ } else {
+ tpl_stats->ready[rf_idx] = 1;
+ }
+ motion_compensated_prediction(
+ cpi, td, frame_idx, xd->cur_buf->y_buffer + mb_y_offset,
+ ref_frame[rf_idx]->y_buffer + mb_y_offset, xd->cur_buf->y_stride, bsize,
+ mi_row, mi_col, tpl_stats, rf_idx);
+ }
+}
+
+#define CHANGE_MV_SEARCH_ORDER 1
+#define USE_PQSORT 1
+#define RE_COMPUTE_MV_INCONSISTENCY 1
+
+#if CHANGE_MV_SEARCH_ORDER
+#if USE_PQSORT
+static void max_heap_pop(FEATURE_SCORE_LOC **heap, int *size,
+ FEATURE_SCORE_LOC **output) {
+ if (*size > 0) {
+ *output = heap[0];
+ --*size;
+ if (*size > 0) {
+ int p, l, r;
+ heap[0] = heap[*size];
+ p = 0;
+ l = 2 * p + 1;
+ r = 2 * p + 2;
+ while (l < *size) {
+ FEATURE_SCORE_LOC *tmp;
+ int c = l;
+ if (r < *size && heap[r]->feature_score > heap[l]->feature_score) {
+ c = r;
+ }
+ if (heap[p]->feature_score >= heap[c]->feature_score) {
+ break;
+ }
+ tmp = heap[p];
+ heap[p] = heap[c];
+ heap[c] = tmp;
+ p = c;
+ l = 2 * p + 1;
+ r = 2 * p + 2;
+ }
+ }
+ } else {
+ assert(0);
+ }
+}
+
+static void max_heap_push(FEATURE_SCORE_LOC **heap, int *size,
+ FEATURE_SCORE_LOC *input) {
+ int c, p;
+ FEATURE_SCORE_LOC *tmp;
+ heap[*size] = input;
+ ++*size;
+ c = *size - 1;
+ p = c >> 1;
+ while (c > 0 && heap[c]->feature_score > heap[p]->feature_score) {
+ tmp = heap[p];
+ heap[p] = heap[c];
+ heap[c] = tmp;
+ c = p;
+ p >>= 1;
+ }
+}
+
+static void add_nb_blocks_to_heap(VP9_COMP *cpi, const TplDepFrame *tpl_frame,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int *heap_size) {
+ const int mi_unit = num_8x8_blocks_wide_lookup[bsize];
+ const int dirs[NB_MVS_NUM][2] = { { -1, 0 }, { 0, -1 }, { 1, 0 }, { 0, 1 } };
+ int i;
+ for (i = 0; i < NB_MVS_NUM; ++i) {
+ int r = dirs[i][0] * mi_unit;
+ int c = dirs[i][1] * mi_unit;
+ if (mi_row + r >= 0 && mi_row + r < tpl_frame->mi_rows && mi_col + c >= 0 &&
+ mi_col + c < tpl_frame->mi_cols) {
+ FEATURE_SCORE_LOC *fs_loc =
+ &cpi->feature_score_loc_arr[(mi_row + r) * tpl_frame->stride +
+ (mi_col + c)];
+ if (fs_loc->visited == 0) {
+ max_heap_push(cpi->feature_score_loc_heap, heap_size, fs_loc);
+ }
+ }
+ }
+}
+#endif // USE_PQSORT
+#endif // CHANGE_MV_SEARCH_ORDER
+#endif // CONFIG_NON_GREEDY_MV
+
void mc_flow_dispenser(VP9_COMP *cpi, GF_PICTURE *gf_picture, int frame_idx,
BLOCK_SIZE bsize) {
TplDepFrame *tpl_frame = &cpi->tpl_stats[frame_idx];
@@ -5954,6 +6224,17 @@ void mc_flow_dispenser(VP9_COMP *cpi, GF_PICTURE *gf_picture, int frame_idx,
const int mi_height = num_8x8_blocks_high_lookup[bsize];
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
int64_t recon_error, sse;
+#if CONFIG_NON_GREEDY_MV
+ int rf_idx;
+ int fs_loc_sort_size;
+#if CHANGE_MV_SEARCH_ORDER
+#if USE_PQSORT
+ int fs_loc_heap_size;
+#else
+ int i;
+#endif // USE_PQSORT
+#endif // CHANGE_MV_SEARCH_ORDER
+#endif // CONFIG_NON_GREEDY_MV
// Setup scaling factor
#if CONFIG_VP9_HIGHBITDEPTH
@@ -5984,9 +6265,7 @@ void mc_flow_dispenser(VP9_COMP *cpi, GF_PICTURE *gf_picture, int frame_idx,
xd->cur_buf = this_frame;
// Get rd multiplier set up.
- rdmult =
- (int)vp9_compute_rd_mult_based_on_qindex(cpi, tpl_frame->base_qindex);
- if (rdmult < 1) rdmult = 1;
+ rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, tpl_frame->base_qindex);
set_error_per_bit(&cpi->td.mb, rdmult);
vp9_initialize_me_consts(cpi, &cpi->td.mb, tpl_frame->base_qindex);
@@ -5995,23 +6274,98 @@ void mc_flow_dispenser(VP9_COMP *cpi, GF_PICTURE *gf_picture, int frame_idx,
cm->base_qindex = tpl_frame->base_qindex;
vp9_frame_init_quantizer(cpi);
+#if CONFIG_NON_GREEDY_MV
+ tpl_frame->lambda = 250;
+ fs_loc_sort_size = 0;
+
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
- // Motion estimation row boundary
- x->mv_limits.row_min = -((mi_row * MI_SIZE) + (17 - 2 * VP9_INTERP_EXTEND));
- x->mv_limits.row_max =
- (cm->mi_rows - 1 - mi_row) * MI_SIZE + (17 - 2 * VP9_INTERP_EXTEND);
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
- TplDepStats tpl_stats;
- mode_estimation(cpi, x, xd, &sf, gf_picture, frame_idx, src_diff, coeff,
- qcoeff, dqcoeff, mi_row, mi_col, bsize, tx_size,
- ref_frame, predictor, &recon_error, &sse, &tpl_stats);
+ const int mb_y_offset =
+ mi_row * MI_SIZE * xd->cur_buf->y_stride + mi_col * MI_SIZE;
+ const int bw = 4 << b_width_log2_lookup[bsize];
+ const int bh = 4 << b_height_log2_lookup[bsize];
+ TplDepStats *tpl_stats =
+ &tpl_frame->tpl_stats_ptr[mi_row * tpl_frame->stride + mi_col];
+ FEATURE_SCORE_LOC *fs_loc =
+ &cpi->feature_score_loc_arr[mi_row * tpl_frame->stride + mi_col];
+ tpl_stats->feature_score = get_feature_score(
+ xd->cur_buf->y_buffer + mb_y_offset, xd->cur_buf->y_stride, bw, bh);
+ fs_loc->visited = 0;
+ fs_loc->feature_score = tpl_stats->feature_score;
+ fs_loc->mi_row = mi_row;
+ fs_loc->mi_col = mi_col;
+ cpi->feature_score_loc_sort[fs_loc_sort_size] = fs_loc;
+ ++fs_loc_sort_size;
+ }
+ }
+
+ qsort(cpi->feature_score_loc_sort, fs_loc_sort_size,
+ sizeof(*cpi->feature_score_loc_sort), compare_feature_score);
+#if CHANGE_MV_SEARCH_ORDER
+#if !USE_PQSORT
+ for (i = 0; i < fs_loc_sort_size; ++i) {
+ FEATURE_SCORE_LOC *fs_loc = cpi->feature_score_loc_sort[i];
+ do_motion_search(cpi, td, frame_idx, ref_frame, bsize, fs_loc->mi_row,
+ fs_loc->mi_col);
+ }
+#else // !USE_PQSORT
+ fs_loc_heap_size = 0;
+ max_heap_push(cpi->feature_score_loc_heap, &fs_loc_heap_size,
+ cpi->feature_score_loc_sort[0]);
+
+ while (fs_loc_heap_size > 0) {
+ FEATURE_SCORE_LOC *fs_loc;
+ max_heap_pop(cpi->feature_score_loc_heap, &fs_loc_heap_size, &fs_loc);
+
+ fs_loc->visited = 1;
+
+ do_motion_search(cpi, td, frame_idx, ref_frame, bsize, fs_loc->mi_row,
+ fs_loc->mi_col);
+
+ add_nb_blocks_to_heap(cpi, tpl_frame, bsize, fs_loc->mi_row, fs_loc->mi_col,
+ &fs_loc_heap_size);
+ }
+#endif // !USE_PQSORT
+#else // CHANGE_MV_SEARCH_ORDER
+ for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
+ do_motion_search(cpi, td, frame_idx, ref_frame, bsize, mi_row, mi_col);
+ }
+ }
+#endif // CHANGE_MV_SEARCH_ORDER
+#endif // CONFIG_NON_GREEDY_MV
+ for (mi_row = 0; mi_row < cm->mi_rows; mi_row += mi_height) {
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += mi_width) {
+ mode_estimation(cpi, x, xd, &sf, gf_picture, frame_idx, tpl_frame,
+ src_diff, coeff, qcoeff, dqcoeff, mi_row, mi_col, bsize,
+ tx_size, ref_frame, predictor, &recon_error, &sse);
// Motion flow dependency dispenser.
tpl_model_store(tpl_frame->tpl_stats_ptr, mi_row, mi_col, bsize,
- tpl_frame->stride, &tpl_stats);
+ tpl_frame->stride);
tpl_model_update(cpi->tpl_stats, tpl_frame->tpl_stats_ptr, mi_row, mi_col,
bsize);
+#if CONFIG_NON_GREEDY_MV
+ {
+ TplDepStats *this_tpl_stats =
+ &tpl_frame->tpl_stats_ptr[mi_row * tpl_frame->stride + mi_col];
+ for (rf_idx = 0; rf_idx < 3; ++rf_idx) {
+#if RE_COMPUTE_MV_INCONSISTENCY
+ MV full_mv;
+ int_mv nb_full_mvs[NB_MVS_NUM];
+ prepare_nb_full_mvs(tpl_frame, mi_row, mi_col, rf_idx, bsize,
+ nb_full_mvs);
+ full_mv.row = this_tpl_stats->mv_arr[rf_idx].as_mv.row >> 3;
+ full_mv.col = this_tpl_stats->mv_arr[rf_idx].as_mv.col >> 3;
+ this_tpl_stats->mv_cost[rf_idx] =
+ av1_nb_mvs_inconsistency(&full_mv, nb_full_mvs);
+#endif // RE_COMPUTE_MV_INCONSISTENCY
+ tpl_frame->mv_dist_sum[rf_idx] += this_tpl_stats->mv_dist[rf_idx];
+ tpl_frame->mv_cost_sum[rf_idx] += this_tpl_stats->mv_cost[rf_idx];
+ }
+ }
+#endif // CONFIG_NON_GREEDY_MV
}
}
}
@@ -6088,7 +6442,7 @@ static void dump_tpl_stats(const VP9_COMP *cpi, int tpl_group_frames,
#endif // CONFIG_NON_GREEDY_MV
static void setup_tpl_stats(VP9_COMP *cpi) {
- GF_PICTURE gf_picture[MAX_LAG_BUFFERS];
+ GF_PICTURE gf_picture[MAX_ARF_GOP_SIZE];
const GF_GROUP *gf_group = &cpi->twopass.gf_group;
int tpl_group_frames = 0;
int frame_idx;
@@ -6100,6 +6454,7 @@ static void setup_tpl_stats(VP9_COMP *cpi) {
// Backward propagation from tpl_group_frames to 1.
for (frame_idx = tpl_group_frames - 1; frame_idx > 0; --frame_idx) {
+ if (gf_picture[frame_idx].update_type == USE_BUF_FRAME) continue;
mc_flow_dispenser(cpi, gf_picture, frame_idx, bsize);
}
#if CONFIG_NON_GREEDY_MV
@@ -6121,6 +6476,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
struct lookahead_entry *last_source = NULL;
struct lookahead_entry *source = NULL;
int arf_src_index;
+ const int gf_group_index = cpi->twopass.gf_group.index;
int i;
if (is_one_pass_cbr_svc(cpi)) {
@@ -6168,7 +6524,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
}
// Clear arf index stack before group of pictures processing starts.
- if (cpi->twopass.gf_group.index == 1) {
+ if (gf_group_index == 1) {
stack_init(cpi->twopass.gf_group.arf_index_stack, MAX_LAG_BUFFERS * 2);
cpi->twopass.gf_group.stack_size = 0;
}
@@ -6316,10 +6672,12 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
level_rc_framerate(cpi, arf_src_index);
if (cpi->oxcf.pass != 0 || cpi->use_svc || frame_is_intra_only(cm) == 1) {
- for (i = 0; i < MAX_REF_FRAMES; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
+ for (i = 0; i < REFS_PER_FRAME; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
}
- if (arf_src_index && cpi->sf.enable_tpl_model) {
+ if (gf_group_index == 1 &&
+ cpi->twopass.gf_group.update_type[gf_group_index] == ARF_UPDATE &&
+ cpi->sf.enable_tpl_model) {
vp9_estimate_qp_gop(cpi);
setup_tpl_stats(cpi);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.h
index 75f177fcc16..02814599d03 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_encoder.h
@@ -291,7 +291,7 @@ typedef struct TplDepStats {
int_mv mv;
#if CONFIG_NON_GREEDY_MV
- int ready;
+ int ready[3];
double mv_dist[3];
double mv_cost[3];
int64_t inter_cost_arr[3];
@@ -311,6 +311,11 @@ typedef struct TplDepFrame {
int mi_rows;
int mi_cols;
int base_qindex;
+#if CONFIG_NON_GREEDY_MV
+ double lambda;
+ double mv_dist_sum[3];
+ double mv_cost_sum[3];
+#endif
} TplDepFrame;
#define TPL_DEP_COST_SCALE_LOG2 4
@@ -490,6 +495,23 @@ typedef struct ARNRFilterData {
struct scale_factors sf;
} ARNRFilterData;
+typedef struct EncFrameBuf {
+ int mem_valid;
+ int released;
+ YV12_BUFFER_CONFIG frame;
+} EncFrameBuf;
+
+// Maximum operating frame buffer size needed for a GOP using ARF reference.
+#define MAX_ARF_GOP_SIZE (2 * MAX_LAG_BUFFERS)
+#if CONFIG_NON_GREEDY_MV
+typedef struct FEATURE_SCORE_LOC {
+ int visited;
+ double feature_score;
+ int mi_row;
+ int mi_col;
+} FEATURE_SCORE_LOC;
+#endif
+
typedef struct VP9_COMP {
QUANTS quants;
ThreadData td;
@@ -513,8 +535,14 @@ typedef struct VP9_COMP {
#endif
YV12_BUFFER_CONFIG *raw_source_frame;
- TplDepFrame tpl_stats[MAX_LAG_BUFFERS];
- YV12_BUFFER_CONFIG *tpl_recon_frames[REFS_PER_FRAME + 1];
+ TplDepFrame tpl_stats[MAX_ARF_GOP_SIZE];
+ YV12_BUFFER_CONFIG *tpl_recon_frames[REF_FRAMES];
+ EncFrameBuf enc_frame_buf[REF_FRAMES];
+#if CONFIG_NON_GREEDY_MV
+ FEATURE_SCORE_LOC *feature_score_loc_arr;
+ FEATURE_SCORE_LOC **feature_score_loc_sort;
+ FEATURE_SCORE_LOC **feature_score_loc_heap;
+#endif
TileDataEnc *tile_data;
int allocated_tiles; // Keep track of memory allocated for tiles.
@@ -522,13 +550,12 @@ typedef struct VP9_COMP {
// For a still frame, this flag is set to 1 to skip partition search.
int partition_search_skippable_frame;
- int scaled_ref_idx[MAX_REF_FRAMES];
+ int scaled_ref_idx[REFS_PER_FRAME];
int lst_fb_idx;
int gld_fb_idx;
int alt_fb_idx;
int ref_fb_idx[REF_FRAMES];
- int last_show_frame_buf_idx; // last show frame buffer index
int refresh_last_frame;
int refresh_golden_frame;
@@ -600,6 +627,7 @@ typedef struct VP9_COMP {
ActiveMap active_map;
fractional_mv_step_fp *find_fractional_mv_step;
+ struct scale_factors me_sf;
vp9_diamond_search_fn_t diamond_search_sad;
vp9_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
uint64_t time_receive_data;
@@ -783,7 +811,7 @@ void vp9_change_config(VP9_COMP *cpi, const VP9EncoderConfig *oxcf);
// frame is made and not just a copy of the pointer..
int vp9_receive_raw_frame(VP9_COMP *cpi, vpx_enc_frame_flags_t frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
- int64_t end_time_stamp);
+ int64_t end_time);
int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
size_t *size, uint8_t *dest, int64_t *time_stamp,
@@ -804,9 +832,11 @@ int vp9_set_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag,
int vp9_update_entropy(VP9_COMP *cpi, int update);
-int vp9_set_active_map(VP9_COMP *cpi, unsigned char *map, int rows, int cols);
+int vp9_set_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
+ int cols);
-int vp9_get_active_map(VP9_COMP *cpi, unsigned char *map, int rows, int cols);
+int vp9_get_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows,
+ int cols);
int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode,
VPX_SCALING vert_mode);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.c
index 58c3a435d9f..e29e86576d2 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.c
@@ -49,9 +49,6 @@
#define MIN_DECAY_FACTOR 0.01
#define NEW_MV_MODE_PENALTY 32
#define DARK_THRESH 64
-#define DEFAULT_GRP_WEIGHT 1.0
-#define RC_FACTOR_MIN 0.75
-#define RC_FACTOR_MAX 1.75
#define SECTION_NOISE_DEF 250.0
#define LOW_I_THRESH 24000
@@ -1828,10 +1825,12 @@ static int detect_flash(const TWO_PASS *twopass, int offset) {
// brief break in prediction (such as a flash) but subsequent frames
// are reasonably well predicted by an earlier (pre flash) frame.
// The recovery after a flash is indicated by a high pcnt_second_ref
- // compared to pcnt_inter.
+ // useage or a second ref coded error notabley lower than the last
+ // frame coded error.
return next_frame != NULL &&
- next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
- next_frame->pcnt_second_ref >= 0.5;
+ ((next_frame->sr_coded_error < next_frame->coded_error) ||
+ ((next_frame->pcnt_second_ref > next_frame->pcnt_inter) &&
+ (next_frame->pcnt_second_ref >= 0.5)));
}
// Update the motion related elements to the GF arf boost calculation.
@@ -2113,21 +2112,23 @@ static void find_arf_order(VP9_COMP *cpi, GF_GROUP *gf_group,
TWO_PASS *twopass = &cpi->twopass;
const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
FIRSTPASS_STATS fpf_frame;
- const int mid = (start + end) >> 1;
- const int min_frame_interval = 3;
+ const int mid = (start + end + 1) >> 1;
+ const int min_frame_interval = 2;
int idx;
// Process regular P frames
if ((end - start < min_frame_interval) ||
- (depth > cpi->oxcf.enable_auto_arf)) {
- int idx;
- for (idx = start; idx < end; ++idx) {
+ (depth > gf_group->allowed_max_layer_depth)) {
+ for (idx = start; idx <= end; ++idx) {
gf_group->update_type[*index_counter] = LF_UPDATE;
gf_group->arf_src_offset[*index_counter] = 0;
+ gf_group->frame_gop_index[*index_counter] = idx;
gf_group->rf_level[*index_counter] = INTER_NORMAL;
gf_group->layer_depth[*index_counter] = depth;
+ gf_group->gfu_boost[*index_counter] = NORMAL_BOOST;
++(*index_counter);
}
+ gf_group->max_layer_depth = VPXMAX(gf_group->max_layer_depth, depth);
return;
}
@@ -2137,22 +2138,25 @@ static void find_arf_order(VP9_COMP *cpi, GF_GROUP *gf_group,
gf_group->layer_depth[*index_counter] = depth;
gf_group->update_type[*index_counter] = ARF_UPDATE;
gf_group->arf_src_offset[*index_counter] = mid - start;
+ gf_group->frame_gop_index[*index_counter] = mid;
gf_group->rf_level[*index_counter] = GF_ARF_LOW;
for (idx = 0; idx <= mid; ++idx)
if (EOF == input_stats(twopass, &fpf_frame)) break;
- gf_group->gfu_boost[*index_counter] = VPXMAX(
- MIN_ARF_GF_BOOST, calc_arf_boost(cpi, end - mid, mid - start) >> depth);
+ gf_group->gfu_boost[*index_counter] =
+ VPXMAX(MIN_ARF_GF_BOOST,
+ calc_arf_boost(cpi, end - mid + 1, mid - start) >> depth);
reset_fpf_position(twopass, start_pos);
++(*index_counter);
- find_arf_order(cpi, gf_group, index_counter, depth + 1, start, mid);
+ find_arf_order(cpi, gf_group, index_counter, depth + 1, start, mid - 1);
gf_group->update_type[*index_counter] = USE_BUF_FRAME;
gf_group->arf_src_offset[*index_counter] = 0;
+ gf_group->frame_gop_index[*index_counter] = mid;
gf_group->rf_level[*index_counter] = INTER_NORMAL;
gf_group->layer_depth[*index_counter] = depth;
++(*index_counter);
@@ -2167,6 +2171,7 @@ static INLINE void set_gf_overlay_frame_type(GF_GROUP *gf_group,
gf_group->update_type[frame_index] = OVERLAY_UPDATE;
gf_group->rf_level[frame_index] = INTER_NORMAL;
gf_group->layer_depth[frame_index] = MAX_ARF_LAYERS - 1;
+ gf_group->gfu_boost[frame_index] = NORMAL_BOOST;
} else {
gf_group->update_type[frame_index] = GF_UPDATE;
gf_group->rf_level[frame_index] = GF_ARF_STD;
@@ -2174,19 +2179,20 @@ static INLINE void set_gf_overlay_frame_type(GF_GROUP *gf_group,
}
}
-static int define_gf_group_structure(VP9_COMP *cpi) {
+static void define_gf_group_structure(VP9_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc;
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group;
- int i;
int frame_index = 0;
- int key_frame;
- int normal_frames;
-
- key_frame = cpi->common.frame_type == KEY_FRAME;
+ int key_frame = cpi->common.frame_type == KEY_FRAME;
+ int layer_depth = 1;
+ int gop_frames =
+ rc->baseline_gf_interval - (key_frame || rc->source_alt_ref_pending);
gf_group->frame_start = cpi->common.current_video_frame;
- gf_group->frame_end = gf_group->frame_start + rc->baseline_gf_interval - 1;
+ gf_group->frame_end = gf_group->frame_start + rc->baseline_gf_interval;
+ gf_group->max_layer_depth = 0;
+ gf_group->allowed_max_layer_depth = 0;
// For key frames the frame target rate is already set and it
// is also the golden frame.
@@ -2200,55 +2206,24 @@ static int define_gf_group_structure(VP9_COMP *cpi) {
if (rc->source_alt_ref_pending) {
gf_group->update_type[frame_index] = ARF_UPDATE;
gf_group->rf_level[frame_index] = GF_ARF_STD;
- gf_group->layer_depth[frame_index] = 1;
+ gf_group->layer_depth[frame_index] = layer_depth;
gf_group->arf_src_offset[frame_index] =
(unsigned char)(rc->baseline_gf_interval - 1);
+ gf_group->frame_gop_index[frame_index] = rc->baseline_gf_interval;
+ gf_group->max_layer_depth = 1;
++frame_index;
+ ++layer_depth;
+ gf_group->allowed_max_layer_depth = cpi->oxcf.enable_auto_arf;
}
- if (rc->source_alt_ref_pending && cpi->multi_layer_arf) {
- find_arf_order(cpi, gf_group, &frame_index, 2, 0,
- rc->baseline_gf_interval - 1);
-
- set_gf_overlay_frame_type(gf_group, frame_index,
- rc->source_alt_ref_pending);
-
- gf_group->arf_src_offset[frame_index] = 0;
-
- return frame_index;
- }
-
- normal_frames =
- rc->baseline_gf_interval - (key_frame || rc->source_alt_ref_pending);
-
- for (i = 0; i < normal_frames; ++i) {
- if (twopass->stats_in >= twopass->stats_in_end) break;
-
- gf_group->update_type[frame_index] = LF_UPDATE;
- gf_group->rf_level[frame_index] = INTER_NORMAL;
- gf_group->arf_src_offset[frame_index] = 0;
- gf_group->layer_depth[frame_index] = MAX_ARF_LAYERS - 1;
-
- ++frame_index;
- }
-
- // Note:
- // We need to configure the frame at the end of the sequence + 1 that will be
- // the start frame for the next group. Otherwise prior to the call to
- // vp9_rc_get_second_pass_params() the data will be undefined.
+ find_arf_order(cpi, gf_group, &frame_index, layer_depth, 1, gop_frames);
set_gf_overlay_frame_type(gf_group, frame_index, rc->source_alt_ref_pending);
-
- if (rc->source_alt_ref_pending) {
- gf_group->update_type[frame_index] = OVERLAY_UPDATE;
- gf_group->rf_level[frame_index] = INTER_NORMAL;
- } else {
- gf_group->update_type[frame_index] = GF_UPDATE;
- gf_group->rf_level[frame_index] = GF_ARF_STD;
- }
gf_group->arf_src_offset[frame_index] = 0;
+ gf_group->frame_gop_index[frame_index] = rc->baseline_gf_interval;
- return frame_index;
+ // Set the frame ops number.
+ gf_group->gf_group_size = frame_index;
}
static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits,
@@ -2273,7 +2248,7 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits,
double this_frame_score = 1.0;
// Define the GF structure and specify
- int gop_frames = define_gf_group_structure(cpi);
+ int gop_frames = gf_group->gf_group_size;
key_frame = cpi->common.frame_type == KEY_FRAME;
@@ -2326,8 +2301,9 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits,
for (idx = 2; idx < MAX_ARF_LAYERS; ++idx) {
if (arf_depth_boost[idx] == 0) break;
- arf_depth_bits[idx] = calculate_boost_bits(
- rc->baseline_gf_interval, arf_depth_boost[idx], total_group_bits);
+ arf_depth_bits[idx] =
+ calculate_boost_bits(rc->baseline_gf_interval - total_arfs,
+ arf_depth_boost[idx], total_group_bits);
total_group_bits -= arf_depth_bits[idx];
total_arfs += arf_depth_count[idx];
@@ -2570,17 +2546,17 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
&next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
&abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+ // Monitor for static sections.
+ if ((rc->frames_since_key + i - 1) > 1) {
+ zero_motion_accumulator = VPXMIN(
+ zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
+ }
+
// Accumulate the effect of prediction quality decay.
if (!flash_detected) {
last_loop_decay_rate = loop_decay_rate;
loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
- // Monitor for static sections.
- if ((rc->frames_since_key + i - 1) > 1) {
- zero_motion_accumulator = VPXMIN(
- zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
- }
-
// Break clause to detect very still sections after motion. For example,
// a static image after a fade or other transition.
if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
@@ -2705,6 +2681,9 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
// Adjust KF group bits and error remaining.
twopass->kf_group_error_left -= gf_group_err;
+ // Decide GOP structure.
+ define_gf_group_structure(cpi);
+
// Allocate bits to each of the frames in the GF group.
allocate_gf_group_bits(cpi, gf_group_bits, gf_arf_bits);
@@ -2748,17 +2727,11 @@ static int slide_transition(const FIRSTPASS_STATS *this_frame,
(this_frame->coded_error > (next_frame->coded_error * ERROR_SPIKE));
}
-// Threshold for use of the lagging second reference frame. High second ref
-// usage may point to a transient event like a flash or occlusion rather than
-// a real scene cut.
-#define SECOND_REF_USEAGE_THRESH 0.1
// Minimum % intra coding observed in first pass (1.0 = 100%)
#define MIN_INTRA_LEVEL 0.25
-// Minimum ratio between the % of intra coding and inter coding in the first
-// pass after discounting neutral blocks (discounting neutral blocks in this
-// way helps catch scene cuts in clips with very flat areas or letter box
-// format clips with image padding.
-#define INTRA_VS_INTER_THRESH 2.0
+// Threshold for use of the lagging second reference frame. Scene cuts do not
+// usually have a high second ref useage.
+#define SECOND_REF_USEAGE_THRESH 0.125
// Hard threshold where the first pass chooses intra for almost all blocks.
// In such a case even if the frame is not a scene cut coding a key frame
// may be a good option.
@@ -2766,12 +2739,6 @@ static int slide_transition(const FIRSTPASS_STATS *this_frame,
// Maximum threshold for the relative ratio of intra error score vs best
// inter error score.
#define KF_II_ERR_THRESHOLD 2.5
-// In real scene cuts there is almost always a sharp change in the intra
-// or inter error score.
-#define ERR_CHANGE_THRESHOLD 0.4
-// For real scene cuts we expect an improvment in the intra inter error
-// ratio in the next frame.
-#define II_IMPROVEMENT_THRESHOLD 3.5
#define KF_II_MAX 128.0
#define II_FACTOR 12.5
// Test for very low intra complexity which could cause false key frames
@@ -2783,30 +2750,21 @@ static int test_candidate_kf(TWO_PASS *twopass,
const FIRSTPASS_STATS *next_frame) {
int is_viable_kf = 0;
double pcnt_intra = 1.0 - this_frame->pcnt_inter;
- double modified_pcnt_inter =
- this_frame->pcnt_inter - this_frame->pcnt_neutral;
// Does the frame satisfy the primary criteria of a key frame?
// See above for an explanation of the test criteria.
// If so, then examine how well it predicts subsequent frames.
- if ((this_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) &&
- (next_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) &&
+ if (!detect_flash(twopass, -1) && !detect_flash(twopass, 0) &&
+ (this_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) &&
((this_frame->pcnt_inter < VERY_LOW_INTER_THRESH) ||
(slide_transition(this_frame, last_frame, next_frame)) ||
- ((pcnt_intra > MIN_INTRA_LEVEL) &&
- (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
+ (((this_frame->coded_error > (next_frame->coded_error * 1.1)) &&
+ (this_frame->coded_error > (last_frame->coded_error * 1.1))) &&
+ (pcnt_intra > MIN_INTRA_LEVEL) &&
+ ((pcnt_intra + this_frame->pcnt_neutral) > 0.5) &&
((this_frame->intra_error /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) <
- KF_II_ERR_THRESHOLD) &&
- ((fabs(last_frame->coded_error - this_frame->coded_error) /
- DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
- ERR_CHANGE_THRESHOLD) ||
- (fabs(last_frame->intra_error - this_frame->intra_error) /
- DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
- ERR_CHANGE_THRESHOLD) ||
- ((next_frame->intra_error /
- DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
- II_IMPROVEMENT_THRESHOLD))))) {
+ KF_II_ERR_THRESHOLD)))) {
int i;
const FIRSTPASS_STATS *start_pos = twopass->stats_in;
FIRSTPASS_STATS local_next_frame = *next_frame;
@@ -3247,9 +3205,9 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
FILE *fpfile;
fpfile = fopen("arf.stt", "a");
++arf_count;
- fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", cm->current_video_frame,
- rc->frames_till_gf_update_due, rc->kf_boost, arf_count,
- rc->gfu_boost);
+ fprintf(fpfile, "%10d %10ld %10d %10d %10ld %10ld\n",
+ cm->current_video_frame, rc->frames_till_gf_update_due,
+ rc->kf_boost, arf_count, rc->gfu_boost, cm->frame_type);
fclose(fpfile);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.h
index b5f21eacb97..0807097ac1a 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_firstpass.h
@@ -43,12 +43,6 @@ typedef struct {
#define INVALID_ROW -1
-// Length of the bi-predictive frame group (BFG)
-// NOTE: Currently each BFG contains one backward ref (BWF) frame plus a certain
-// number of bi-predictive frames.
-#define BFG_INTERVAL 2
-#define MAX_EXT_ARFS 2
-#define MIN_EXT_ARF_INTERVAL 4
#define MAX_ARF_LAYERS 6
typedef struct {
@@ -135,6 +129,7 @@ typedef struct {
FRAME_UPDATE_TYPE update_type[MAX_STATIC_GF_GROUP_LENGTH + 2];
unsigned char arf_src_offset[MAX_STATIC_GF_GROUP_LENGTH + 2];
unsigned char layer_depth[MAX_STATIC_GF_GROUP_LENGTH + 2];
+ unsigned char frame_gop_index[MAX_STATIC_GF_GROUP_LENGTH + 2];
int bit_allocation[MAX_STATIC_GF_GROUP_LENGTH + 2];
int gfu_boost[MAX_STATIC_GF_GROUP_LENGTH + 2];
@@ -144,6 +139,9 @@ typedef struct {
int arf_index_stack[MAX_LAG_BUFFERS * 2];
int top_arf_idx;
int stack_size;
+ int gf_group_size;
+ int max_layer_depth;
+ int allowed_max_layer_depth;
} GF_GROUP;
typedef struct {
@@ -200,7 +198,6 @@ struct ThreadData;
struct TileDataEnc;
void vp9_init_first_pass(struct VP9_COMP *cpi);
-void vp9_rc_get_first_pass_params(struct VP9_COMP *cpi);
void vp9_first_pass(struct VP9_COMP *cpi, const struct lookahead_entry *source);
void vp9_end_first_pass(struct VP9_COMP *cpi);
@@ -219,17 +216,6 @@ void vp9_twopass_postencode_update(struct VP9_COMP *cpi);
void calculate_coded_size(struct VP9_COMP *cpi, int *scaled_frame_width,
int *scaled_frame_height);
-static INLINE int get_number_of_extra_arfs(int interval, int arf_pending) {
- assert(MAX_EXT_ARFS > 0);
- if (arf_pending) {
- if (interval >= MIN_EXT_ARF_INTERVAL * (MAX_EXT_ARFS + 1))
- return MAX_EXT_ARFS;
- else if (interval >= MIN_EXT_ARF_INTERVAL * MAX_EXT_ARFS)
- return MAX_EXT_ARFS - 1;
- }
- return 0;
-}
-
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mbgraph.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mbgraph.c
index 2ec048b5314..831c79c1753 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mbgraph.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mbgraph.c
@@ -57,11 +57,12 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv,
{
uint32_t distortion;
uint32_t sse;
+ // TODO(yunqing): may use higher tap interp filter than 2 taps if needed.
cpi->find_fractional_mv_step(
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
&v_fn_ptr, 0, mv_sf->subpel_search_level,
cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
- 0);
+ 0, USE_2_TAPS);
}
xd->mi[0]->mode = NEWMV;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.c
index 995c54fc74c..a2543035c59 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.c
@@ -367,14 +367,12 @@ static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
*ir = (int)divide_and_round(x1 * b, y1);
}
-uint32_t vp9_skip_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
- const MV *ref_mv, int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop, int iters_per_step,
- int *cost_list, int *mvjcost, int *mvcost[2],
- uint32_t *distortion, uint32_t *sse1,
- const uint8_t *second_pred, int w, int h) {
+uint32_t vp9_skip_sub_pixel_tree(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
+ int h, int use_accurate_subpel_search) {
SETUP_SUBPEL_SEARCH;
besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
src_stride, y, y_stride, second_pred, w, h,
@@ -397,6 +395,7 @@ uint32_t vp9_skip_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
(void)sse;
(void)thismse;
(void)cost_list;
+ (void)use_accurate_subpel_search;
return besterr;
}
@@ -406,7 +405,7 @@ uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore(
int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h) {
+ int h, int use_accurate_subpel_search) {
SETUP_SUBPEL_SEARCH;
besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
src_stride, y, y_stride, second_pred, w, h,
@@ -418,6 +417,7 @@ uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore(
(void)allow_hp;
(void)forced_stop;
(void)hstep;
+ (void)use_accurate_subpel_search;
if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
@@ -471,8 +471,10 @@ uint32_t vp9_find_best_sub_pixel_tree_pruned_more(
int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h) {
+ int h, int use_accurate_subpel_search) {
SETUP_SUBPEL_SEARCH;
+ (void)use_accurate_subpel_search;
+
besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
src_stride, y, y_stride, second_pred, w, h,
offset, mvjcost, mvcost, sse1, distortion);
@@ -531,8 +533,10 @@ uint32_t vp9_find_best_sub_pixel_tree_pruned(
int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h) {
+ int h, int use_accurate_subpel_search) {
SETUP_SUBPEL_SEARCH;
+ (void)use_accurate_subpel_search;
+
besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
src_stride, y, y_stride, second_pred, w, h,
offset, mvjcost, mvcost, sse1, distortion);
@@ -617,12 +621,119 @@ static const MV search_step_table[12] = {
};
/* clang-format on */
+static int accurate_sub_pel_search(
+ const MACROBLOCKD *xd, const MV *this_mv, const struct scale_factors *sf,
+ const InterpKernel *kernel, const vp9_variance_fn_ptr_t *vfp,
+ const uint8_t *const src_address, const int src_stride,
+ const uint8_t *const pre_address, int y_stride, const uint8_t *second_pred,
+ int w, int h, uint32_t *sse) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint64_t besterr;
+ assert(sf->x_step_q4 == 16 && sf->y_step_q4 == 16);
+ assert(w != 0 && h != 0);
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ DECLARE_ALIGNED(16, uint16_t, pred16[64 * 64]);
+ vp9_highbd_build_inter_predictor(CONVERT_TO_SHORTPTR(pre_address), y_stride,
+ pred16, w, this_mv, sf, w, h, 0, kernel,
+ MV_PRECISION_Q3, 0, 0, xd->bd);
+ if (second_pred != NULL) {
+ DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
+ vpx_highbd_comp_avg_pred(comp_pred16, CONVERT_TO_SHORTPTR(second_pred), w,
+ h, pred16, w);
+ besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src_address,
+ src_stride, sse);
+ } else {
+ besterr =
+ vfp->vf(CONVERT_TO_BYTEPTR(pred16), w, src_address, src_stride, sse);
+ }
+ } else {
+ DECLARE_ALIGNED(16, uint8_t, pred[64 * 64]);
+ vp9_build_inter_predictor(pre_address, y_stride, pred, w, this_mv, sf, w, h,
+ 0, kernel, MV_PRECISION_Q3, 0, 0);
+ if (second_pred != NULL) {
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
+ vpx_comp_avg_pred(comp_pred, second_pred, w, h, pred, w);
+ besterr = vfp->vf(comp_pred, w, src_address, src_stride, sse);
+ } else {
+ besterr = vfp->vf(pred, w, src_address, src_stride, sse);
+ }
+ }
+ if (besterr >= UINT_MAX) return UINT_MAX;
+ return (int)besterr;
+#else
+ int besterr;
+ DECLARE_ALIGNED(16, uint8_t, pred[64 * 64]);
+ assert(sf->x_step_q4 == 16 && sf->y_step_q4 == 16);
+ assert(w != 0 && h != 0);
+ (void)xd;
+
+ vp9_build_inter_predictor(pre_address, y_stride, pred, w, this_mv, sf, w, h,
+ 0, kernel, MV_PRECISION_Q3, 0, 0);
+ if (second_pred != NULL) {
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
+ vpx_comp_avg_pred(comp_pred, second_pred, w, h, pred, w);
+ besterr = vfp->vf(comp_pred, w, src_address, src_stride, sse);
+ } else {
+ besterr = vfp->vf(pred, w, src_address, src_stride, sse);
+ }
+ return besterr;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
+// TODO(yunqing): this part can be further refactored.
+#if CONFIG_VP9_HIGHBITDEPTH
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER1(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ int64_t tmpmse; \
+ const MV mv = { r, c }; \
+ const MV ref_mv = { rr, rc }; \
+ thismse = \
+ accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, src_stride, \
+ y, y_stride, second_pred, w, h, &sse); \
+ tmpmse = thismse; \
+ tmpmse += mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit); \
+ if (tmpmse >= INT_MAX) { \
+ v = INT_MAX; \
+ } else if ((v = (uint32_t)tmpmse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ }
+#else
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER1(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ const MV mv = { r, c }; \
+ const MV ref_mv = { rr, rc }; \
+ thismse = \
+ accurate_sub_pel_search(xd, &mv, x->me_sf, kernel, vfp, z, src_stride, \
+ y, y_stride, second_pred, w, h, &sse); \
+ if ((v = mv_err_cost(&mv, &ref_mv, mvjcost, mvcost, error_per_bit) + \
+ thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ }
+
+#endif
+
uint32_t vp9_find_best_sub_pixel_tree(
const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h) {
+ int h, int use_accurate_subpel_search) {
const uint8_t *const z = x->plane[0].src.buf;
const uint8_t *const src_address = z;
const int src_stride = x->plane[0].src.stride;
@@ -650,6 +761,17 @@ uint32_t vp9_find_best_sub_pixel_tree(
int kr, kc;
MvLimits subpel_mv_limits;
+ // TODO(yunqing): need to add 4-tap filter optimization to speed up the
+ // encoder.
+ const InterpKernel *kernel =
+ (use_accurate_subpel_search > 0)
+ ? ((use_accurate_subpel_search == USE_4_TAPS)
+ ? vp9_filter_kernels[FOURTAP]
+ : ((use_accurate_subpel_search == USE_8_TAPS)
+ ? vp9_filter_kernels[EIGHTTAP]
+ : vp9_filter_kernels[EIGHTTAP_SHARP]))
+ : vp9_filter_kernels[BILINEAR];
+
vp9_set_subpel_mv_search_range(&subpel_mv_limits, &x->mv_limits, ref_mv);
minc = subpel_mv_limits.col_min;
maxc = subpel_mv_limits.col_max;
@@ -674,16 +796,25 @@ uint32_t vp9_find_best_sub_pixel_tree(
tr = br + search_step[idx].row;
tc = bc + search_step[idx].col;
if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
- const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
MV this_mv;
this_mv.row = tr;
this_mv.col = tc;
- if (second_pred == NULL)
- thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
- src_stride, &sse);
- else
- thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse, second_pred);
+
+ if (use_accurate_subpel_search) {
+ thismse = accurate_sub_pel_search(xd, &this_mv, x->me_sf, kernel, vfp,
+ src_address, src_stride, y,
+ y_stride, second_pred, w, h, &sse);
+ } else {
+ const uint8_t *const pre_address =
+ y + (tr >> 3) * y_stride + (tc >> 3);
+ if (second_pred == NULL)
+ thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
+ src_address, src_stride, &sse);
+ else
+ thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
+ src_address, src_stride, &sse, second_pred);
+ }
+
cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
mvcost, error_per_bit);
@@ -705,14 +836,21 @@ uint32_t vp9_find_best_sub_pixel_tree(
tc = bc + kc;
tr = br + kr;
if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
- const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
MV this_mv = { tr, tc };
- if (second_pred == NULL)
- thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
- src_stride, &sse);
- else
- thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), src_address,
- src_stride, &sse, second_pred);
+ if (use_accurate_subpel_search) {
+ thismse = accurate_sub_pel_search(xd, &this_mv, x->me_sf, kernel, vfp,
+ src_address, src_stride, y, y_stride,
+ second_pred, w, h, &sse);
+ } else {
+ const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
+ if (second_pred == NULL)
+ thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse);
+ else
+ thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
+ src_address, src_stride, &sse, second_pred);
+ }
+
cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit);
@@ -743,20 +881,36 @@ uint32_t vp9_find_best_sub_pixel_tree(
if (tr == br && tc != bc) {
kc = bc - tc;
if (iters_per_step == 1) {
- CHECK_BETTER(second, br0, bc0 + kc);
+ if (use_accurate_subpel_search) {
+ CHECK_BETTER1(second, br0, bc0 + kc);
+ } else {
+ CHECK_BETTER(second, br0, bc0 + kc);
+ }
}
} else if (tr != br && tc == bc) {
kr = br - tr;
if (iters_per_step == 1) {
- CHECK_BETTER(second, br0 + kr, bc0);
+ if (use_accurate_subpel_search) {
+ CHECK_BETTER1(second, br0 + kr, bc0);
+ } else {
+ CHECK_BETTER(second, br0 + kr, bc0);
+ }
}
}
if (iters_per_step > 1) {
- CHECK_BETTER(second, br0 + kr, bc0);
- CHECK_BETTER(second, br0, bc0 + kc);
- if (br0 != br || bc0 != bc) {
- CHECK_BETTER(second, br0 + kr, bc0 + kc);
+ if (use_accurate_subpel_search) {
+ CHECK_BETTER1(second, br0 + kr, bc0);
+ CHECK_BETTER1(second, br0, bc0 + kc);
+ if (br0 != br || bc0 != bc) {
+ CHECK_BETTER1(second, br0 + kr, bc0 + kc);
+ }
+ } else {
+ CHECK_BETTER(second, br0 + kr, bc0);
+ CHECK_BETTER(second, br0, bc0 + kc);
+ if (br0 != br || bc0 != bc) {
+ CHECK_BETTER(second, br0 + kr, bc0 + kc);
+ }
}
}
}
@@ -781,6 +935,7 @@ uint32_t vp9_find_best_sub_pixel_tree(
}
#undef CHECK_BETTER
+#undef CHECK_BETTER1
static INLINE int check_bounds(const MvLimits *mv_limits, int row, int col,
int range) {
@@ -1578,9 +1733,10 @@ static int exhuastive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv,
}
#if CONFIG_NON_GREEDY_MV
-static double nb_mvs_inconsistency(const MV *mv, const int_mv *nb_mvs) {
+double av1_nb_mvs_inconsistency(const MV *mv, const int_mv *nb_mvs) {
int i;
- double best_cost = -1;
+ int update = 0;
+ double best_cost = 0;
vpx_clear_system_state();
for (i = 0; i < NB_MVS_NUM; ++i) {
if (nb_mvs[i].as_int != INVALID_MV) {
@@ -1589,18 +1745,15 @@ static double nb_mvs_inconsistency(const MV *mv, const int_mv *nb_mvs) {
const double col_diff = mv->col - nb_mv.col;
double cost = row_diff * row_diff + col_diff * col_diff;
cost = log2(1 + cost);
- if (best_cost < 0) {
+ if (update == 0) {
best_cost = cost;
+ update = 1;
} else {
best_cost = cost < best_cost ? cost : best_cost;
}
}
}
- if (best_cost < 0) {
- return 0;
- } else {
- return best_cost;
- }
+ return best_cost;
}
double vp9_diamond_search_sad_new(const MACROBLOCK *x,
@@ -1646,7 +1799,7 @@ double vp9_diamond_search_sad_new(const MACROBLOCK *x,
// Check the starting position
*best_mv_dist = fn_ptr->sdf(what, what_stride, in_what, in_what_stride);
- *best_mv_cost = nb_mvs_inconsistency(best_full_mv, nb_full_mvs);
+ *best_mv_cost = av1_nb_mvs_inconsistency(best_full_mv, nb_full_mvs);
bestsad = (*best_mv_dist) + lambda * (*best_mv_cost);
i = 0;
@@ -1679,7 +1832,8 @@ double vp9_diamond_search_sad_new(const MACROBLOCK *x,
const MV this_mv = { best_full_mv->row + ss_mv[i].row,
best_full_mv->col + ss_mv[i].col };
const double mv_dist = sad_array[t];
- const double mv_cost = nb_mvs_inconsistency(&this_mv, nb_full_mvs);
+ const double mv_cost =
+ av1_nb_mvs_inconsistency(&this_mv, nb_full_mvs);
double thissad = mv_dist + lambda * mv_cost;
if (thissad < bestsad) {
bestsad = thissad;
@@ -1699,7 +1853,8 @@ double vp9_diamond_search_sad_new(const MACROBLOCK *x,
const uint8_t *const check_here = ss_os[i] + best_address;
const double mv_dist =
fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
- const double mv_cost = nb_mvs_inconsistency(&this_mv, nb_full_mvs);
+ const double mv_cost =
+ av1_nb_mvs_inconsistency(&this_mv, nb_full_mvs);
double thissad = mv_dist + lambda * mv_cost;
if (thissad < bestsad) {
bestsad = thissad;
@@ -2285,7 +2440,7 @@ double vp9_refining_search_sad_new(const MACROBLOCK *x, MV *best_full_mv,
vpx_clear_system_state();
*best_mv_dist =
fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride);
- *best_mv_cost = nb_mvs_inconsistency(best_full_mv, nb_full_mvs);
+ *best_mv_cost = av1_nb_mvs_inconsistency(best_full_mv, nb_full_mvs);
best_sad = (*best_mv_dist) + lambda * (*best_mv_cost);
for (i = 0; i < search_range; i++) {
@@ -2307,7 +2462,7 @@ double vp9_refining_search_sad_new(const MACROBLOCK *x, MV *best_full_mv,
const MV mv = { best_full_mv->row + neighbors[j].row,
best_full_mv->col + neighbors[j].col };
const double mv_dist = sads[j];
- const double mv_cost = nb_mvs_inconsistency(&mv, nb_full_mvs);
+ const double mv_cost = av1_nb_mvs_inconsistency(&mv, nb_full_mvs);
const double thissad = mv_dist + lambda * mv_cost;
if (thissad < best_sad) {
best_sad = thissad;
@@ -2325,7 +2480,7 @@ double vp9_refining_search_sad_new(const MACROBLOCK *x, MV *best_full_mv,
const double mv_dist =
fn_ptr->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &mv), in_what->stride);
- const double mv_cost = nb_mvs_inconsistency(&mv, nb_full_mvs);
+ const double mv_cost = av1_nb_mvs_inconsistency(&mv, nb_full_mvs);
const double thissad = mv_dist + lambda * mv_cost;
if (thissad < best_sad) {
best_sad = thissad;
@@ -2587,7 +2742,8 @@ int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
(void)tc; \
(void)sse; \
(void)thismse; \
- (void)cost_list;
+ (void)cost_list; \
+ (void)use_accurate_subpel_search;
// Return the maximum MV.
uint32_t vp9_return_max_sub_pixel_mv(
@@ -2595,7 +2751,7 @@ uint32_t vp9_return_max_sub_pixel_mv(
int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h) {
+ int h, int use_accurate_subpel_search) {
COMMON_MV_TEST;
(void)minr;
@@ -2617,7 +2773,7 @@ uint32_t vp9_return_min_sub_pixel_mv(
int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h) {
+ int h, int use_accurate_subpel_search) {
COMMON_MV_TEST;
(void)maxr;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.h
index adb02bc1abd..a159cb288ed 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_mcomp.h
@@ -59,7 +59,7 @@ struct SPEED_FEATURES;
int vp9_init_search_range(int size);
int vp9_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
- int sad_per_bit, int distance,
+ int error_per_bit, int search_range,
const struct vp9_variance_vtable *fn_ptr,
const struct mv *center_mv);
@@ -75,7 +75,7 @@ typedef uint32_t(fractional_mv_step_fp)(
int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
- int h);
+ int h, int use_accurate_subpel_search);
extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree;
extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree_pruned;
@@ -134,6 +134,8 @@ double vp9_full_pixel_diamond_new(const struct VP9_COMP *cpi, MACROBLOCK *x,
const vp9_variance_fn_ptr_t *fn_ptr,
const int_mv *nb_full_mvs,
struct TplDepStats *tpl_stats, int rf_idx);
+
+double av1_nb_mvs_inconsistency(const MV *mv, const int_mv *nb_mvs);
#endif // CONFIG_NON_GREEDY_MV
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_noise_estimate.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_noise_estimate.c
index 249e03760fa..8c9a40f5586 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_noise_estimate.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_noise_estimate.c
@@ -148,7 +148,9 @@ void vp9_update_noise_estimate(VP9_COMP *const cpi) {
ne->last_h = cm->height;
}
return;
- } else if (cm->current_video_frame > 60 &&
+ } else if (frame_counter > 60 && cpi->svc.num_encoded_top_layer > 1 &&
+ cpi->rc.frames_since_key > cpi->svc.number_spatial_layers &&
+ cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1 &&
cpi->rc.avg_frame_low_motion < (low_res ? 70 : 50)) {
// Force noise estimation to 0 and denoiser off if content has high motion.
ne->level = kLowLow;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_pickmode.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_pickmode.c
index 416d437e07d..1324b5bc8aa 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_pickmode.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_pickmode.c
@@ -247,7 +247,8 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize], subpel_force_stop,
cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0,
+ cpi->sf.use_accurate_subpel_search);
*rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
x->mvcost, MV_COST_WEIGHT);
}
@@ -1539,7 +1540,8 @@ static int search_new_mv(VP9_COMP *cpi, MACROBLOCK *x,
cpi->common.allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0, 0);
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0, 0,
+ cpi->sf.use_accurate_subpel_search);
} else if (svc->use_base_mv && svc->spatial_layer_id) {
if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV) {
const int pre_stride = xd->plane[0].pre[0].stride;
@@ -1730,11 +1732,21 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
if (!cpi->use_svc ||
(svc->use_gf_temporal_ref_current_layer &&
!svc->layer_context[svc->temporal_layer_id].is_key_frame)) {
+ struct scale_factors *const sf_last = &cm->frame_refs[LAST_FRAME - 1].sf;
+ struct scale_factors *const sf_golden =
+ &cm->frame_refs[GOLDEN_FRAME - 1].sf;
gf_temporal_ref = 1;
- if (cpi->rc.avg_frame_low_motion > 70)
- thresh_svc_skip_golden = 500;
- else
- thresh_svc_skip_golden = 0;
+ // For temporal long term prediction, check that the golden reference
+ // is same scale as last reference, otherwise disable.
+ if ((sf_last->x_scale_fp != sf_golden->x_scale_fp) ||
+ (sf_last->y_scale_fp != sf_golden->y_scale_fp)) {
+ gf_temporal_ref = 0;
+ } else {
+ if (cpi->rc.avg_frame_low_motion > 70)
+ thresh_svc_skip_golden = 500;
+ else
+ thresh_svc_skip_golden = 0;
+ }
}
init_ref_frame_cost(cm, xd, ref_frame_cost);
@@ -2758,7 +2770,8 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost, &dummy_dist,
- &x->pred_sse[ref_frame], NULL, 0, 0);
+ &x->pred_sse[ref_frame], NULL, 0, 0,
+ cpi->sf.use_accurate_subpel_search);
xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv;
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.c
index 76e310ac274..cdd824358cd 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.c
@@ -247,20 +247,65 @@ int vp9_rc_clamp_iframe_target_size(const VP9_COMP *const cpi, int target) {
return target;
}
+// Update the buffer level before encoding with the per-frame-bandwidth,
+static void update_buffer_level_preencode(VP9_COMP *cpi) {
+ RATE_CONTROL *const rc = &cpi->rc;
+ rc->bits_off_target += rc->avg_frame_bandwidth;
+ // Clip the buffer level to the maximum specified buffer size.
+ rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
+ rc->buffer_level = rc->bits_off_target;
+}
+
+// Update the buffer level before encoding with the per-frame-bandwidth
+// for SVC. The current and all upper temporal layers are updated, needed
+// for the layered rate control which involves cumulative buffer levels for
+// the temporal layers. Allow for using the timestamp(pts) delta for the
+// framerate when the set_ref_frame_config is used.
+static void update_buffer_level_svc_preencode(VP9_COMP *cpi) {
+ SVC *const svc = &cpi->svc;
+ int i;
+ // Set this to 1 to use timestamp delta for "framerate" under
+ // ref_frame_config usage.
+ int use_timestamp = 1;
+ const int64_t ts_delta =
+ svc->time_stamp_superframe - svc->time_stamp_prev[svc->spatial_layer_id];
+ for (i = svc->temporal_layer_id; i < svc->number_temporal_layers; ++i) {
+ const int layer =
+ LAYER_IDS_TO_IDX(svc->spatial_layer_id, i, svc->number_temporal_layers);
+ LAYER_CONTEXT *const lc = &svc->layer_context[layer];
+ RATE_CONTROL *const lrc = &lc->rc;
+ if (use_timestamp && cpi->svc.use_set_ref_frame_config &&
+ svc->number_temporal_layers == 1 && ts_delta > 0 &&
+ svc->current_superframe > 0) {
+ // TODO(marpan): This may need to be modified for temporal layers.
+ const double framerate_pts = 10000000.0 / ts_delta;
+ lrc->bits_off_target += (int)(lc->target_bandwidth / framerate_pts);
+ } else {
+ lrc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
+ }
+ // Clip buffer level to maximum buffer size for the layer.
+ lrc->bits_off_target =
+ VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size);
+ lrc->buffer_level = lrc->bits_off_target;
+ if (i == svc->temporal_layer_id) {
+ cpi->rc.bits_off_target = lrc->bits_off_target;
+ cpi->rc.buffer_level = lrc->buffer_level;
+ }
+ }
+}
+
// Update the buffer level for higher temporal layers, given the encoded current
// temporal layer.
-static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) {
+static void update_layer_buffer_level_postencode(SVC *svc,
+ int encoded_frame_size) {
int i = 0;
- int current_temporal_layer = svc->temporal_layer_id;
+ const int current_temporal_layer = svc->temporal_layer_id;
for (i = current_temporal_layer + 1; i < svc->number_temporal_layers; ++i) {
const int layer =
LAYER_IDS_TO_IDX(svc->spatial_layer_id, i, svc->number_temporal_layers);
LAYER_CONTEXT *lc = &svc->layer_context[layer];
RATE_CONTROL *lrc = &lc->rc;
- int bits_off_for_this_layer =
- (int)(lc->target_bandwidth / lc->framerate - encoded_frame_size);
- lrc->bits_off_target += bits_off_for_this_layer;
-
+ lrc->bits_off_target -= encoded_frame_size;
// Clip buffer level to maximum buffer size for the layer.
lrc->bits_off_target =
VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size);
@@ -268,29 +313,13 @@ static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) {
}
}
-// Update the buffer level: leaky bucket model.
-static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) {
- const VP9_COMMON *const cm = &cpi->common;
+// Update the buffer level after encoding with encoded frame size.
+static void update_buffer_level_postencode(VP9_COMP *cpi,
+ int encoded_frame_size) {
RATE_CONTROL *const rc = &cpi->rc;
-
- // On dropped frame, don't update buffer if its currently stable
- // (above optimal level). This can cause issues when full superframe
- // can drop (!= LAYER_DROP), since QP is adjusted downwards with buffer
- // overflow, which can cause more frame drops.
- if (cpi->svc.framedrop_mode != LAYER_DROP && encoded_frame_size == 0 &&
- rc->buffer_level > rc->optimal_buffer_level)
- return;
-
- // Non-viewable frames are a special case and are treated as pure overhead.
- if (!cm->show_frame) {
- rc->bits_off_target -= encoded_frame_size;
- } else {
- rc->bits_off_target += rc->avg_frame_bandwidth - encoded_frame_size;
- }
-
+ rc->bits_off_target -= encoded_frame_size;
// Clip the buffer level to the maximum specified buffer size.
rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
-
// For screen-content mode, and if frame-dropper is off, don't let buffer
// level go below threshold, given here as -rc->maximum_ buffer_size.
if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
@@ -300,7 +329,7 @@ static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) {
rc->buffer_level = rc->bits_off_target;
if (is_one_pass_cbr_svc(cpi)) {
- update_layer_buffer_level(&cpi->svc, encoded_frame_size);
+ update_layer_buffer_level_postencode(&cpi->svc, encoded_frame_size);
}
}
@@ -363,6 +392,7 @@ void vp9_rc_init(const VP9EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
rc->high_source_sad = 0;
rc->reset_high_source_sad = 0;
rc->high_source_sad_lagindex = -1;
+ rc->high_num_blocks_with_motion = 0;
rc->hybrid_intra_scene_change = 0;
rc->re_encode_maxq_scene_change = 0;
rc->alt_ref_gf_group = 0;
@@ -398,6 +428,11 @@ void vp9_rc_init(const VP9EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
rc->max_gf_interval = vp9_rc_get_default_max_gf_interval(
oxcf->init_framerate, rc->min_gf_interval);
rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
+
+ rc->force_max_q = 0;
+ rc->last_post_encode_dropped_scene_change = 0;
+ rc->use_post_encode_drop = 0;
+ rc->ext_use_post_encode_drop = 0;
}
static int check_buffer_above_thresh(VP9_COMP *cpi, int drop_mark) {
@@ -515,6 +550,39 @@ static int drop_frame(VP9_COMP *cpi) {
}
}
+int post_encode_drop_screen_content(VP9_COMP *cpi, size_t *size) {
+ size_t frame_size = *size << 3;
+ int64_t new_buffer_level =
+ cpi->rc.buffer_level + cpi->rc.avg_frame_bandwidth - (int64_t)frame_size;
+
+ // For now we drop if new buffer level (given the encoded frame size) goes
+ // below 0.
+ if (new_buffer_level < 0) {
+ *size = 0;
+ vp9_rc_postencode_update_drop_frame(cpi);
+ // Update flag to use for next frame.
+ if (cpi->rc.high_source_sad ||
+ (cpi->use_svc && cpi->svc.high_source_sad_superframe))
+ cpi->rc.last_post_encode_dropped_scene_change = 1;
+ // Force max_q on next fame.
+ cpi->rc.force_max_q = 1;
+ cpi->rc.avg_frame_qindex[INTER_FRAME] = cpi->rc.worst_quality;
+ cpi->last_frame_dropped = 1;
+ cpi->ext_refresh_frame_flags_pending = 0;
+ if (cpi->use_svc) {
+ cpi->svc.last_layer_dropped[cpi->svc.spatial_layer_id] = 1;
+ cpi->svc.drop_spatial_layer[cpi->svc.spatial_layer_id] = 1;
+ cpi->svc.drop_count[cpi->svc.spatial_layer_id]++;
+ cpi->svc.skip_enhancement_layer = 1;
+ }
+ return 1;
+ }
+
+ cpi->rc.force_max_q = 0;
+ cpi->rc.last_post_encode_dropped_scene_change = 0;
+ return 0;
+}
+
int vp9_rc_drop_frame(VP9_COMP *cpi) {
SVC *svc = &cpi->svc;
int svc_prev_layer_dropped = 0;
@@ -834,7 +902,7 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) {
int active_worst_quality;
int ambient_qp;
unsigned int num_frames_weight_key = 5 * cpi->svc.number_temporal_layers;
- if (frame_is_intra_only(cm) || rc->reset_high_source_sad)
+ if (frame_is_intra_only(cm) || rc->reset_high_source_sad || rc->force_max_q)
return rc->worst_quality;
// For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME]
// for the first few frames following key frame. These are both initialized
@@ -845,6 +913,7 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) {
? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
rc->avg_frame_qindex[KEY_FRAME])
: rc->avg_frame_qindex[INTER_FRAME];
+ active_worst_quality = VPXMIN(rc->worst_quality, (ambient_qp * 5) >> 2);
// For SVC if the current base spatial layer was key frame, use the QP from
// that base layer for ambient_qp.
if (cpi->use_svc && cpi->svc.spatial_layer_id > 0) {
@@ -854,9 +923,9 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) {
if (lc->is_key_frame) {
const RATE_CONTROL *lrc = &lc->rc;
ambient_qp = VPXMIN(ambient_qp, lrc->last_q[KEY_FRAME]);
+ active_worst_quality = VPXMIN(rc->worst_quality, (ambient_qp * 9) >> 3);
}
}
- active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 >> 2);
if (rc->buffer_level > rc->optimal_buffer_level) {
// Adjust down.
// Maximum limit for down adjustment ~30%; make it lower for screen content.
@@ -1216,10 +1285,16 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
if (frame_is_intra_only(cm)) {
- // Handle the special case for key frames forced when we have reached
- // the maximum key frame interval. Here force the Q to a range
- // based on the ambient Q to reduce the risk of popping.
- if (rc->this_key_frame_forced) {
+ if (rc->frames_to_key == 1 && oxcf->rc_mode == VPX_Q) {
+ // If the next frame is also a key frame or the current frame is the
+ // only frame in the sequence in AOM_Q mode, just use the cq_level
+ // as q.
+ active_best_quality = cq_level;
+ active_worst_quality = cq_level;
+ } else if (rc->this_key_frame_forced) {
+ // Handle the special case for key frames forced when we have reached
+ // the maximum key frame interval. Here force the Q to a range
+ // based on the ambient Q to reduce the risk of popping.
double last_boosted_q;
int delta_qindex;
int qindex;
@@ -1289,6 +1364,16 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
// Constrained quality use slightly lower active best.
active_best_quality = active_best_quality * 15 / 16;
+ // Modify best quality for second level arfs. For mode VPX_Q this
+ // becomes the baseline frame q.
+ if (gf_group->rf_level[gf_group_index] == GF_ARF_LOW) {
+ const int layer_depth = gf_group->layer_depth[gf_group_index];
+ // linearly fit the frame q depending on the layer depth index from
+ // the base layer ARF.
+ active_best_quality =
+ ((layer_depth - 1) * q + active_best_quality + layer_depth / 2) /
+ layer_depth;
+ }
} else if (oxcf->rc_mode == VPX_Q) {
if (!cpi->refresh_alt_ref_frame) {
active_best_quality = cq_level;
@@ -1297,8 +1382,14 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
// Modify best quality for second level arfs. For mode VPX_Q this
// becomes the baseline frame q.
- if (gf_group->rf_level[gf_group_index] == GF_ARF_LOW)
- active_best_quality = (active_best_quality + cq_level + 1) / 2;
+ if (gf_group->rf_level[gf_group_index] == GF_ARF_LOW) {
+ const int layer_depth = gf_group->layer_depth[gf_group_index];
+ // linearly fit the frame q depending on the layer depth index from
+ // the base layer ARF.
+ active_best_quality = ((layer_depth - 1) * cq_level +
+ active_best_quality + layer_depth / 2) /
+ layer_depth;
+ }
}
} else {
active_best_quality = get_gf_active_quality(cpi, q, cm->bit_depth);
@@ -1475,12 +1566,14 @@ void vp9_configure_buffer_updates(VP9_COMP *cpi, int gf_group_index) {
}
void vp9_estimate_qp_gop(VP9_COMP *cpi) {
- int gop_length = cpi->rc.baseline_gf_interval;
+ int gop_length = cpi->twopass.gf_group.gf_group_size;
int bottom_index, top_index;
int idx;
const int gf_index = cpi->twopass.gf_group.index;
+ const int is_src_frame_alt_ref = cpi->rc.is_src_frame_alt_ref;
+ const int refresh_frame_context = cpi->common.refresh_frame_context;
- for (idx = 1; idx <= gop_length + 1 && idx < MAX_LAG_BUFFERS; ++idx) {
+ for (idx = 1; idx <= gop_length; ++idx) {
TplDepFrame *tpl_frame = &cpi->tpl_stats[idx];
int target_rate = cpi->twopass.gf_group.bit_allocation[idx];
cpi->twopass.gf_group.index = idx;
@@ -1492,6 +1585,8 @@ void vp9_estimate_qp_gop(VP9_COMP *cpi) {
}
// Reset the actual index and frame update
cpi->twopass.gf_group.index = gf_index;
+ cpi->rc.is_src_frame_alt_ref = is_src_frame_alt_ref;
+ cpi->common.refresh_frame_context = refresh_frame_context;
vp9_configure_buffer_updates(cpi, gf_index);
}
@@ -1672,7 +1767,7 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) {
}
if (frame_is_intra_only(cm)) rc->last_kf_qindex = qindex;
- update_buffer_level(cpi, rc->projected_frame_size);
+ update_buffer_level_postencode(cpi, rc->projected_frame_size);
// Rolling monitors of whether we are over or underspending used to help
// regulate min and Max Q in two pass.
@@ -1769,14 +1864,20 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) {
}
void vp9_rc_postencode_update_drop_frame(VP9_COMP *cpi) {
- // Update buffer level with zero size, update frame counters, and return.
- update_buffer_level(cpi, 0);
cpi->common.current_video_frame++;
cpi->rc.frames_since_key++;
cpi->rc.frames_to_key--;
cpi->rc.rc_2_frame = 0;
cpi->rc.rc_1_frame = 0;
cpi->rc.last_avg_frame_bandwidth = cpi->rc.avg_frame_bandwidth;
+ // For SVC on dropped frame when framedrop_mode != LAYER_DROP:
+ // in this mode the whole superframe may be dropped if only a single layer
+ // has buffer underflow (below threshold). Since this can then lead to
+ // increasing buffer levels/overflow for certain layers even though whole
+ // superframe is dropped, we cap buffer level if its already stable.
+ if (cpi->use_svc && cpi->svc.framedrop_mode != LAYER_DROP &&
+ cpi->rc.buffer_level > cpi->rc.optimal_buffer_level)
+ cpi->rc.buffer_level = cpi->rc.optimal_buffer_level;
}
static int calc_pframe_target_size_one_pass_vbr(const VP9_COMP *const cpi) {
@@ -1822,10 +1923,9 @@ void vp9_rc_get_one_pass_vbr_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
- // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
if (!cpi->refresh_alt_ref_frame &&
(cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
- rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
+ rc->frames_to_key == 0)) {
cm->frame_type = KEY_FRAME;
rc->this_key_frame_forced =
cm->current_video_frame != 0 && rc->frames_to_key == 0;
@@ -2031,7 +2131,7 @@ void vp9_rc_get_svc_params(VP9_COMP *cpi) {
cm->frame_type = KEY_FRAME;
rc->source_alt_ref_active = 0;
if (is_one_pass_cbr_svc(cpi)) {
- if (cm->current_video_frame > 0) vp9_svc_reset_key_frame(cpi);
+ if (cm->current_video_frame > 0) vp9_svc_reset_temporal_layers(cpi, 1);
layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, svc->temporal_layer_id,
svc->number_temporal_layers);
svc->layer_context[layer].is_key_frame = 1;
@@ -2110,15 +2210,15 @@ void vp9_rc_get_svc_params(VP9_COMP *cpi) {
vp9_cyclic_refresh_update_parameters(cpi);
vp9_rc_set_frame_target(cpi, target);
+ if (cm->show_frame) update_buffer_level_svc_preencode(cpi);
}
void vp9_rc_get_one_pass_cbr_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
int target;
- // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
if ((cm->current_video_frame == 0) || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
- rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0)) {
+ rc->frames_to_key == 0) {
cm->frame_type = KEY_FRAME;
rc->frames_to_key = cpi->oxcf.key_freq;
rc->kf_boost = DEFAULT_KF_BOOST;
@@ -2151,6 +2251,9 @@ void vp9_rc_get_one_pass_cbr_params(VP9_COMP *cpi) {
target = calc_pframe_target_size_one_pass_cbr(cpi);
vp9_rc_set_frame_target(cpi, target);
+
+ if (cm->show_frame) update_buffer_level_preencode(cpi);
+
if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC)
cpi->resize_pending = vp9_resize_one_pass_cbr(cpi);
else
@@ -2654,8 +2757,11 @@ void vp9_scene_detection_onepass(VP9_COMP *cpi) {
if (cm->use_highbitdepth) return;
#endif
rc->high_source_sad = 0;
- if (cpi->svc.spatial_layer_id == 0 && src_width == last_src_width &&
- src_height == last_src_height) {
+ rc->high_num_blocks_with_motion = 0;
+ // For SVC: scene detection is only checked on first spatial layer of
+ // the superframe using the original/unscaled resolutions.
+ if (cpi->svc.spatial_layer_id == cpi->svc.first_spatial_layer_to_encode &&
+ src_width == last_src_width && src_height == last_src_height) {
YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL };
int num_mi_cols = cm->mi_cols;
int num_mi_rows = cm->mi_rows;
@@ -2772,6 +2878,8 @@ void vp9_scene_detection_onepass(VP9_COMP *cpi) {
} else {
rc->avg_source_sad[lagframe_idx] = avg_sad;
}
+ if (num_zero_temp_sad < (num_samples >> 1))
+ rc->high_num_blocks_with_motion = 1;
}
}
// For CBR non-screen content mode, check if we should reset the rate
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.h
index 3b441bf1f50..16aa08137ee 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_ratectrl.h
@@ -175,6 +175,7 @@ typedef struct {
uint64_t avg_source_sad[MAX_LAG_BUFFERS];
uint64_t prev_avg_source_sad_lag;
int high_source_sad_lagindex;
+ int high_num_blocks_with_motion;
int alt_ref_gf_group;
int last_frame_is_src_altref;
int high_source_sad;
@@ -186,6 +187,14 @@ typedef struct {
int force_qpmin;
int reset_high_source_sad;
double perc_arf_usage;
+ int force_max_q;
+ // Last frame was dropped post encode on scene change.
+ int last_post_encode_dropped_scene_change;
+ // Enable post encode frame dropping for screen content. Only enabled when
+ // ext_use_post_encode_drop is enabled by user.
+ int use_post_encode_drop;
+ // External flag to enable post encode frame dropping, controlled by user.
+ int ext_use_post_encode_drop;
} RATE_CONTROL;
struct VP9_COMP;
@@ -194,7 +203,7 @@ struct VP9EncoderConfig;
void vp9_rc_init(const struct VP9EncoderConfig *oxcf, int pass,
RATE_CONTROL *rc);
-int vp9_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
+int vp9_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
double correction_factor, vpx_bit_depth_t bit_depth);
double vp9_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
@@ -205,9 +214,9 @@ void vp9_rc_init_minq_luts(void);
int vp9_rc_get_default_min_gf_interval(int width, int height, double framerate);
// Note vp9_rc_get_default_max_gf_interval() requires the min_gf_interval to
-// be passed in to ensure that the max_gf_interval returned is at least as bis
+// be passed in to ensure that the max_gf_interval returned is at least as big
// as that.
-int vp9_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
+int vp9_rc_get_default_max_gf_interval(double framerate, int min_gf_interval);
// Generally at the high level, the following flow is expected
// to be enforced for rate control:
@@ -247,13 +256,16 @@ void vp9_rc_postencode_update_drop_frame(struct VP9_COMP *cpi);
// Changes only the rate correction factors in the rate control structure.
void vp9_rc_update_rate_correction_factors(struct VP9_COMP *cpi);
+// Post encode drop for CBR screen-content mode.
+int post_encode_drop_screen_content(struct VP9_COMP *cpi, size_t *size);
+
// Decide if we should drop this frame: For 1-pass CBR.
// Changes only the decimation count in the rate control structure
int vp9_rc_drop_frame(struct VP9_COMP *cpi);
// Computes frame size bounds.
void vp9_rc_compute_frame_size_bounds(const struct VP9_COMP *cpi,
- int this_frame_target,
+ int frame_target,
int *frame_under_shoot_limit,
int *frame_over_shoot_limit);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.c
index 2e4a4fe9fa2..8323f3af4ee 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.c
@@ -173,69 +173,61 @@ static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12,
static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
128, 144, 144 };
-int64_t vp9_compute_rd_mult_based_on_qindex(const VP9_COMP *cpi, int qindex) {
- const int64_t q = vp9_dc_quant(qindex, 0, cpi->common.bit_depth);
+int vp9_compute_rd_mult_based_on_qindex(const VP9_COMP *cpi, int qindex) {
+ // largest dc_quant is 21387, therefore rdmult should always fit in uint32_t
+ // i.e. 21387 * 21387 * 8 = 3659230152 = 0xDA1B6BC8
+ const int q = vp9_dc_quant(qindex, 0, cpi->common.bit_depth);
+ uint32_t rdmult = q * q;
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ rdmult = rdmult * 3 + (rdmult * 2 / 3);
+ } else {
+ if (qindex < 64)
+ rdmult = rdmult * 4;
+ else if (qindex <= 128)
+ rdmult = rdmult * 3 + rdmult / 2;
+ else if (qindex < 190)
+ rdmult = rdmult * 4 + rdmult / 2;
+ else
+ rdmult = rdmult * 7 + rdmult / 2;
+ }
#if CONFIG_VP9_HIGHBITDEPTH
- int64_t rdmult = 0;
switch (cpi->common.bit_depth) {
- case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
- case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
- default:
- assert(cpi->common.bit_depth == VPX_BITS_12);
- rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8);
- break;
+ case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(rdmult, 4); break;
+ case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(rdmult, 8); break;
+ default: break;
}
-#else
- int64_t rdmult = 88 * q * q / 24;
#endif // CONFIG_VP9_HIGHBITDEPTH
- return rdmult;
+ return rdmult > 0 ? rdmult : 1;
}
-int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) {
- int64_t rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, qindex);
-
+static int modulate_rdmult(const VP9_COMP *cpi, int rdmult) {
+ int64_t rdmult_64 = rdmult;
if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
- const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100));
+ const int gfu_boost = cpi->multi_layer_arf
+ ? gf_group->gfu_boost[gf_group->index]
+ : cpi->rc.gfu_boost;
+ const int boost_index = VPXMIN(15, (gfu_boost / 100));
- rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
- rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
+ rdmult_64 = (rdmult_64 * rd_frame_type_factor[frame_type]) >> 7;
+ rdmult_64 += ((rdmult_64 * rd_boost_factor[boost_index]) >> 7);
}
- if (rdmult < 1) rdmult = 1;
- return (int)rdmult;
+ return (int)rdmult_64;
}
-int vp9_get_adaptive_rdmult(const VP9_COMP *cpi, double beta) {
- const VP9_COMMON *cm = &cpi->common;
- int64_t q = vp9_dc_quant(cm->base_qindex, 0, cpi->common.bit_depth);
-
-#if CONFIG_VP9_HIGHBITDEPTH
- int64_t rdmult = 0;
- switch (cpi->common.bit_depth) {
- case VPX_BITS_8: rdmult = (int)((88 * q * q / beta) / 24); break;
- case VPX_BITS_10:
- rdmult = ROUND_POWER_OF_TWO((int)((88 * q * q / beta) / 24), 4);
- break;
- default:
- assert(cpi->common.bit_depth == VPX_BITS_12);
- rdmult = ROUND_POWER_OF_TWO((int)((88 * q * q / beta) / 24), 8);
- break;
- }
-#else
- int64_t rdmult = (int)((88 * q * q / beta) / 24);
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
- if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
- const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
- const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
- const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100));
+int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) {
+ int rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, qindex);
+ return modulate_rdmult(cpi, rdmult);
+}
- rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
- rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
- }
- if (rdmult < 1) rdmult = 1;
- return (int)rdmult;
+int vp9_get_adaptive_rdmult(const VP9_COMP *cpi, double beta) {
+ int rdmult =
+ vp9_compute_rd_mult_based_on_qindex(cpi, cpi->common.base_qindex);
+ rdmult = (int)((double)rdmult / beta);
+ rdmult = rdmult > 0 ? rdmult : 1;
+ return modulate_rdmult(cpi, rdmult);
}
static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) {
@@ -631,6 +623,7 @@ YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi,
const VP9_COMMON *const cm = &cpi->common;
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
+ assert(ref_frame >= LAST_FRAME && ref_frame <= ALTREF_FRAME);
return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
? &cm->buffer_pool->frame_bufs[scaled_idx].buf
: NULL;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.h
index f2fc776a4aa..fa85f2176f5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rd.h
@@ -134,8 +134,7 @@ struct TileDataEnc;
struct VP9_COMP;
struct macroblock;
-int64_t vp9_compute_rd_mult_based_on_qindex(const struct VP9_COMP *cpi,
- int qindex);
+int vp9_compute_rd_mult_based_on_qindex(const struct VP9_COMP *cpi, int qindex);
int vp9_compute_rd_mult(const struct VP9_COMP *cpi, int qindex);
@@ -145,7 +144,7 @@ void vp9_initialize_rd_consts(struct VP9_COMP *cpi);
void vp9_initialize_me_consts(struct VP9_COMP *cpi, MACROBLOCK *x, int qindex);
-void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
+void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
unsigned int qstep, int *rate, int64_t *dist);
void vp9_model_rd_from_var_lapndz_vec(unsigned int var[MAX_MB_PLANE],
@@ -176,8 +175,8 @@ void vp9_set_rd_speed_thresholds(struct VP9_COMP *cpi);
void vp9_set_rd_speed_thresholds_sub8x8(struct VP9_COMP *cpi);
-void vp9_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh, int bsize,
- int best_mode_index);
+void vp9_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
+ int bsize, int best_mode_index);
static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
const int *const thresh_fact) {
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rdopt.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rdopt.c
index 698faa343bb..9cde479cd6f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rdopt.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_rdopt.c
@@ -1821,7 +1821,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize], 0,
cpi->sf.mv.subpel_search_level, NULL, x->nmvjointcost, x->mvcost,
- &dis, &sse, second_pred, pw, ph);
+ &dis, &sse, second_pred, pw, ph, cpi->sf.use_accurate_subpel_search);
}
// Restore the pointer to the first (possibly scaled) prediction buffer.
@@ -1875,6 +1875,8 @@ static int64_t rd_pick_best_sub8x8_mode(
const BLOCK_SIZE bsize = mi->sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ const int pw = num_4x4_blocks_wide << 2;
+ const int ph = num_4x4_blocks_high << 2;
ENTROPY_CONTEXT t_above[2], t_left[2];
int subpelmv = 1, have_ref = 0;
SPEED_FEATURES *const sf = &cpi->sf;
@@ -2011,7 +2013,8 @@ static int64_t rd_pick_best_sub8x8_mode(
x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop,
sf->mv.subpel_search_level, cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost, &distortion,
- &x->pred_sse[mi->ref_frame[0]], NULL, 0, 0);
+ &x->pred_sse[mi->ref_frame[0]], NULL, pw, ph,
+ cpi->sf.use_accurate_subpel_search);
// save motion search result for use in compound prediction
seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv;
@@ -2330,6 +2333,8 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
const int best_predmv_idx = x->mv_best_ref_index[ref];
const YV12_BUFFER_CONFIG *scaled_ref_frame =
vp9_get_scaled_ref_frame(cpi, ref);
+ const int pw = num_4x4_blocks_wide_lookup[bsize] << 2;
+ const int ph = num_4x4_blocks_high_lookup[bsize] << 2;
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2452,7 +2457,8 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph,
+ cpi->sf.use_accurate_subpel_search);
}
*rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
x->mvcost, MV_COST_WEIGHT);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_resize.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_resize.c
index 6ac77aeef28..23a320ae553 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_resize.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_resize.c
@@ -424,11 +424,11 @@ void vp9_resize_plane(const uint8_t *const input, int height, int width,
int in_stride, uint8_t *output, int height2, int width2,
int out_stride) {
int i;
- uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
+ uint8_t *intbuf = (uint8_t *)calloc(width2 * height, sizeof(*intbuf));
uint8_t *tmpbuf =
- (uint8_t *)malloc(sizeof(uint8_t) * (width < height ? height : width));
- uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * height);
- uint8_t *arrbuf2 = (uint8_t *)malloc(sizeof(uint8_t) * height2);
+ (uint8_t *)calloc(width < height ? height : width, sizeof(*tmpbuf));
+ uint8_t *arrbuf = (uint8_t *)calloc(height, sizeof(*arrbuf));
+ uint8_t *arrbuf2 = (uint8_t *)calloc(height2, sizeof(*arrbuf2));
if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL)
goto Error;
assert(width > 0);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.c
index 44909239d32..9b6c69a73fd 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.c
@@ -116,17 +116,13 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi,
sf->ml_partition_search_breakout_thresh[1] = -1.0f;
sf->ml_partition_search_breakout_thresh[2] = -1.0f;
}
-
#if CONFIG_VP9_HIGHBITDEPTH
if (cpi->Source->flags & YV12_FLAG_HIGHBITDEPTH) {
- sf->use_square_only_thresh_high = BLOCK_4X4;
- sf->use_square_only_thresh_low = BLOCK_SIZES;
- if (is_720p_or_larger) {
- sf->partition_search_breakout_thr.dist = (1 << 23);
- sf->use_ml_partition_search_breakout = 0;
- }
+ sf->ml_partition_search_breakout_thresh[0] -= 1.0f;
+ sf->ml_partition_search_breakout_thresh[1] -= 1.0f;
+ sf->ml_partition_search_breakout_thresh[2] -= 1.0f;
}
-#endif
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
if (speed >= 2) {
@@ -242,14 +238,10 @@ static void set_good_speed_feature_framesize_independent(VP9_COMP *cpi,
if (speed >= 1) {
sf->enable_tpl_model = 0;
- sf->ml_var_partition_pruning = 0;
+ sf->ml_var_partition_pruning = !boosted;
sf->ml_prune_rect_partition_threhold[1] = 200;
sf->ml_prune_rect_partition_threhold[2] = 200;
sf->ml_prune_rect_partition_threhold[3] = 200;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (cpi->Source->flags & YV12_FLAG_HIGHBITDEPTH)
- sf->prune_ref_frame_for_rect_partitions = 0;
-#endif // CONFIG_VP9_HIGHBITDEPTH
if (oxcf->pass == 2) {
TWO_PASS *const twopass = &cpi->twopass;
@@ -288,9 +280,11 @@ static void set_good_speed_feature_framesize_independent(VP9_COMP *cpi,
sf->exhaustive_searches_thresh =
(cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ? (1 << 23)
: INT_MAX;
+ sf->use_accurate_subpel_search = USE_4_TAPS;
}
if (speed >= 2) {
+ sf->ml_var_partition_pruning = 0;
if (oxcf->vbr_corpus_complexity)
sf->recode_loop = ALLOW_RECODE_FIRST;
else
@@ -328,6 +322,8 @@ static void set_good_speed_feature_framesize_independent(VP9_COMP *cpi,
good_quality_mesh_patterns[mesh_density_level][i].interval;
}
}
+
+ sf->use_accurate_subpel_search = USE_2_TAPS;
}
if (speed >= 3) {
@@ -450,6 +446,7 @@ static void set_rt_speed_feature_framesize_independent(
sf->disable_golden_ref = 0;
sf->enable_tpl_model = 0;
sf->enhanced_full_pixel_motion_search = 0;
+ sf->use_accurate_subpel_search = USE_2_TAPS;
if (speed >= 1) {
sf->allow_txfm_domain_distortion = 1;
@@ -565,6 +562,16 @@ static void set_rt_speed_feature_framesize_independent(
(frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1);
sf->max_delta_qindex = is_keyframe ? 20 : 15;
sf->partition_search_type = REFERENCE_PARTITION;
+#if CONFIG_ML_VAR_PARTITION
+ if (!frame_is_intra_only(cm) && cm->width >= 360 && cm->height >= 360)
+ sf->partition_search_type = ML_BASED_PARTITION;
+ else
+ sf->partition_search_type = REFERENCE_PARTITION;
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (cpi->Source->flags & YV12_FLAG_HIGHBITDEPTH)
+ sf->partition_search_type = REFERENCE_PARTITION;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_ML_VAR_PARTITION
if (cpi->oxcf.rc_mode == VPX_VBR && cpi->oxcf.lag_in_frames > 0 &&
cpi->rc.is_src_frame_alt_ref) {
sf->partition_search_type = VAR_BASED_PARTITION;
@@ -626,9 +633,7 @@ static void set_rt_speed_feature_framesize_independent(
sf->use_compound_nonrd_pickmode = 1;
}
#if CONFIG_ML_VAR_PARTITION
- if (!frame_is_intra_only(cm) && cm->width >= 360 && cm->height >= 360)
- sf->partition_search_type = ML_BASED_PARTITION;
- else
+ if (frame_is_intra_only(cm) || cm->width < 360 || cm->height < 360)
sf->partition_search_type = VAR_BASED_PARTITION;
#if CONFIG_VP9_HIGHBITDEPTH
if (cpi->Source->flags & YV12_FLAG_HIGHBITDEPTH)
@@ -705,6 +710,7 @@ static void set_rt_speed_feature_framesize_independent(
// For SVC: enable use of lower resolution partition for higher resolution,
// only for 3 spatial layers and when config/top resolution is above VGA.
// Enable only for non-base temporal layer frames.
+ // TODO(jianj): Investigate webm:1578
if (cpi->use_svc && cpi->svc.use_partition_reuse &&
cpi->svc.number_spatial_layers == 3 && cpi->svc.temporal_layer_id > 0 &&
cpi->oxcf.width * cpi->oxcf.height > 640 * 480)
@@ -789,6 +795,21 @@ static void set_rt_speed_feature_framesize_independent(
sf->partition_search_type = FIXED_PARTITION;
sf->always_this_block_size = BLOCK_64X64;
}
+ // Special case for screen content: increase motion search on base spatial
+ // layer when high motion is detected or previous SL0 frame was dropped.
+ // Avoid speed 5 for as there is an issue with SVC datarate test.
+ // TODO(marpan/jianj): Investigate issue at speed 5.
+ if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && cpi->oxcf.speed > 5 &&
+ cpi->svc.spatial_layer_id == 0 &&
+ (cpi->rc.high_num_blocks_with_motion || cpi->svc.last_layer_dropped[0])) {
+ sf->mv.search_method = NSTEP;
+ sf->mv.fullpel_search_step_param = 2;
+ // TODO(marpan/jianj): Investigate issue for lower setting of step_param
+ // for spatial layers (namely on lower layers).
+ if (cpi->use_svc && cm->width != cpi->oxcf.width &&
+ cm->height != cpi->oxcf.height)
+ sf->mv.fullpel_search_step_param = 4;
+ }
}
void vp9_set_speed_features_framesize_dependent(VP9_COMP *cpi) {
@@ -897,12 +918,7 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) {
sf->allow_quant_coeff_opt = sf->optimize_coefficients;
sf->quant_opt_thresh = 99.0;
sf->allow_acl = 1;
-#if CONFIG_VP9_HIGHBITDEPTH
- // TODO(jingning): Make the model support high bit-depth route.
- sf->enable_tpl_model = !cm->use_highbitdepth && oxcf->enable_tpl_model;
-#else
sf->enable_tpl_model = oxcf->enable_tpl_model;
-#endif
sf->prune_ref_frame_for_rect_partitions = 0;
for (i = 0; i < TX_SIZES; i++) {
@@ -942,6 +958,7 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) {
sf->ml_prune_rect_partition_threhold[2] = -1;
sf->ml_prune_rect_partition_threhold[3] = -1;
sf->ml_var_partition_pruning = 0;
+ sf->use_accurate_subpel_search = USE_8_TAPS;
// Some speed-up features even for best quality as minimal impact on quality.
sf->adaptive_rd_thresh = 1;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.h
index a895ed2354b..02673e60200 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_speed_features.h
@@ -243,6 +243,13 @@ typedef enum {
RE_ENCODE_MAXQ = 2
} OVERSHOOT_DETECTION_CBR_RT;
+typedef enum {
+ USE_2_TAPS = 0,
+ USE_4_TAPS,
+ USE_8_TAPS,
+ USE_8_TAPS_SHARP,
+} SUBPEL_SEARCH_TYPE;
+
typedef struct SPEED_FEATURES {
MV_SPEED_FEATURES mv;
@@ -586,6 +593,10 @@ typedef struct SPEED_FEATURES {
// Allow for disabling golden reference.
int disable_golden_ref;
+
+ // Allow sub-pixel search to use interpolation filters with different taps in
+ // order to achieve accurate motion search result.
+ SUBPEL_SEARCH_TYPE use_accurate_subpel_search;
} SPEED_FEATURES;
struct VP9_COMP;
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.c
index 1321c457575..21b920f11ae 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.c
@@ -53,6 +53,7 @@ void vp9_init_layer_context(VP9_COMP *const cpi) {
svc->previous_frame_is_intra_only = 0;
svc->superframe_has_layer_sync = 0;
svc->use_set_ref_frame_config = 0;
+ svc->num_encoded_top_layer = 0;
for (i = 0; i < REF_FRAMES; ++i) {
svc->fb_idx_spatial_layer_id[i] = -1;
@@ -329,6 +330,7 @@ void vp9_restore_layer_context(VP9_COMP *const cpi) {
LAYER_CONTEXT *const lc = get_layer_context(cpi);
const int old_frame_since_key = cpi->rc.frames_since_key;
const int old_frame_to_key = cpi->rc.frames_to_key;
+ const int old_ext_use_post_encode_drop = cpi->rc.ext_use_post_encode_drop;
cpi->rc = lc->rc;
cpi->twopass = lc->twopass;
@@ -346,7 +348,7 @@ void vp9_restore_layer_context(VP9_COMP *const cpi) {
cpi->rc.frames_since_key = old_frame_since_key;
cpi->rc.frames_to_key = old_frame_to_key;
}
-
+ cpi->rc.ext_use_post_encode_drop = old_ext_use_post_encode_drop;
// For spatial-svc, allow cyclic-refresh to be applied on the spatial layers,
// for the base temporal layer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
@@ -736,6 +738,8 @@ int vp9_one_pass_cbr_svc_start_layer(VP9_COMP *const cpi) {
}
svc->force_zero_mode_spatial_ref = 1;
svc->mi_stride[svc->spatial_layer_id] = cpi->common.mi_stride;
+ svc->mi_rows[svc->spatial_layer_id] = cpi->common.mi_rows;
+ svc->mi_cols[svc->spatial_layer_id] = cpi->common.mi_cols;
if (svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0212) {
set_flags_and_fb_idx_for_temporal_mode3(cpi);
@@ -931,7 +935,7 @@ void vp9_free_svc_cyclic_refresh(VP9_COMP *const cpi) {
}
// Reset on key frame: reset counters, references and buffer updates.
-void vp9_svc_reset_key_frame(VP9_COMP *const cpi) {
+void vp9_svc_reset_temporal_layers(VP9_COMP *const cpi, int is_key) {
int sl, tl;
SVC *const svc = &cpi->svc;
LAYER_CONTEXT *lc = NULL;
@@ -939,7 +943,7 @@ void vp9_svc_reset_key_frame(VP9_COMP *const cpi) {
for (tl = 0; tl < svc->number_temporal_layers; ++tl) {
lc = &cpi->svc.layer_context[sl * svc->number_temporal_layers + tl];
lc->current_video_frame_in_layer = 0;
- lc->frames_from_key_frame = 0;
+ if (is_key) lc->frames_from_key_frame = 0;
}
}
if (svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0212) {
@@ -1089,13 +1093,16 @@ void vp9_svc_assert_constraints_pattern(VP9_COMP *const cpi) {
}
} else if (svc->use_gf_temporal_ref_current_layer &&
!svc->layer_context[svc->temporal_layer_id].is_key_frame) {
- // If the usage of golden as second long term reference is enabled for this
- // layer, then temporal_layer_id of that reference must be base temporal
- // layer 0, and spatial_layer_id of that reference must be same as current
- // spatial_layer_id.
- assert(svc->fb_idx_spatial_layer_id[cpi->gld_fb_idx] ==
- svc->spatial_layer_id);
- assert(svc->fb_idx_temporal_layer_id[cpi->gld_fb_idx] == 0);
+ // For the usage of golden as second long term reference: the
+ // temporal_layer_id of that reference must be base temporal layer 0, and
+ // spatial_layer_id of that reference must be same as current
+ // spatial_layer_id. If not, disable feature.
+ // TODO(marpan): Investigate when this can happen, and maybe put this check
+ // and reset in a different place.
+ if (svc->fb_idx_spatial_layer_id[cpi->gld_fb_idx] !=
+ svc->spatial_layer_id ||
+ svc->fb_idx_temporal_layer_id[cpi->gld_fb_idx] != 0)
+ svc->use_gf_temporal_ref_current_layer = 0;
}
}
@@ -1107,7 +1114,8 @@ void vp9_svc_check_spatial_layer_sync(VP9_COMP *const cpi) {
if (svc->spatial_layer_id == 0) {
// On base spatial layer: if the current superframe has a layer sync then
// reset the pattern counters and reset to base temporal layer.
- if (svc->superframe_has_layer_sync) vp9_svc_reset_key_frame(cpi);
+ if (svc->superframe_has_layer_sync)
+ vp9_svc_reset_temporal_layers(cpi, cpi->common.frame_type == KEY_FRAME);
}
// If the layer sync is set for this current spatial layer then
// disable the temporal reference.
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.h b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.h
index fceab7780bb..94531204497 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_svc_layercontext.h
@@ -125,6 +125,8 @@ typedef struct SVC {
BLOCK_SIZE *prev_partition_svc;
int mi_stride[VPX_MAX_LAYERS];
+ int mi_rows[VPX_MAX_LAYERS];
+ int mi_cols[VPX_MAX_LAYERS];
int first_layer_denoise;
@@ -178,9 +180,14 @@ typedef struct SVC {
int first_spatial_layer_to_encode;
+ // Parameters for allowing framerate per spatial layer, and buffer
+ // update based on timestamps.
int64_t duration[VPX_SS_MAX_LAYERS];
-
int64_t timebase_fac;
+ int64_t time_stamp_superframe;
+ int64_t time_stamp_prev[VPX_SS_MAX_LAYERS];
+
+ int num_encoded_top_layer;
} SVC;
struct VP9_COMP;
@@ -234,7 +241,7 @@ int vp9_one_pass_cbr_svc_start_layer(struct VP9_COMP *const cpi);
void vp9_free_svc_cyclic_refresh(struct VP9_COMP *const cpi);
-void vp9_svc_reset_key_frame(struct VP9_COMP *const cpi);
+void vp9_svc_reset_temporal_layers(struct VP9_COMP *const cpi, int is_key);
void vp9_svc_check_reset_layer_rc_flag(struct VP9_COMP *const cpi);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_temporal_filter.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_temporal_filter.c
index 51668d01d61..4c1d8894b41 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_temporal_filter.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/vp9_temporal_filter.c
@@ -119,8 +119,13 @@ static void apply_temporal_filter(
unsigned int i, j, k, m;
int modifier;
const int rounding = (1 << strength) >> 1;
- const int uv_block_width = block_width >> ss_x;
- const int uv_block_height = block_height >> ss_y;
+ const unsigned int uv_block_width = block_width >> ss_x;
+ const unsigned int uv_block_height = block_height >> ss_y;
+ DECLARE_ALIGNED(16, uint16_t, y_diff_sse[256]);
+ DECLARE_ALIGNED(16, uint16_t, u_diff_sse[256]);
+ DECLARE_ALIGNED(16, uint16_t, v_diff_sse[256]);
+
+ int idx = 0, idy;
assert(strength >= 0);
assert(strength <= 6);
@@ -128,19 +133,42 @@ static void apply_temporal_filter(
assert(filter_weight >= 0);
assert(filter_weight <= 2);
+ memset(y_diff_sse, 0, 256 * sizeof(uint16_t));
+ memset(u_diff_sse, 0, 256 * sizeof(uint16_t));
+ memset(v_diff_sse, 0, 256 * sizeof(uint16_t));
+
+ // Calculate diff^2 for each pixel of the 16x16 block.
+ // TODO(yunqing): the following code needs to be optimized.
+ for (i = 0; i < block_height; i++) {
+ for (j = 0; j < block_width; j++) {
+ const int16_t diff =
+ y_frame1[i * (int)y_stride + j] - y_pred[i * (int)block_width + j];
+ y_diff_sse[idx++] = diff * diff;
+ }
+ }
+ idx = 0;
+ for (i = 0; i < uv_block_height; i++) {
+ for (j = 0; j < uv_block_width; j++) {
+ const int16_t diffu =
+ u_frame1[i * uv_stride + j] - u_pred[i * uv_buf_stride + j];
+ const int16_t diffv =
+ v_frame1[i * uv_stride + j] - v_pred[i * uv_buf_stride + j];
+ u_diff_sse[idx] = diffu * diffu;
+ v_diff_sse[idx] = diffv * diffv;
+ idx++;
+ }
+ }
+
for (i = 0, k = 0, m = 0; i < block_height; i++) {
for (j = 0; j < block_width; j++) {
const int pixel_value = y_pred[i * y_buf_stride + j];
// non-local mean approach
- int diff_sse[9] = { 0 };
- int idx, idy;
int y_index = 0;
const int uv_r = i >> ss_y;
const int uv_c = j >> ss_x;
-
- int diff;
+ modifier = 0;
for (idy = -1; idy <= 1; ++idy) {
for (idx = -1; idx <= 1; ++idx) {
@@ -149,9 +177,7 @@ static void apply_temporal_filter(
if (row >= 0 && row < (int)block_height && col >= 0 &&
col < (int)block_width) {
- const int diff = y_frame1[row * (int)y_stride + col] -
- y_pred[row * (int)block_width + col];
- diff_sse[y_index] = diff * diff;
+ modifier += y_diff_sse[row * (int)block_width + col];
++y_index;
}
}
@@ -159,16 +185,8 @@ static void apply_temporal_filter(
assert(y_index > 0);
- modifier = 0;
- for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx];
-
- diff = u_frame1[uv_r * uv_stride + uv_c] -
- u_pred[uv_r * uv_buf_stride + uv_c];
- modifier += diff * diff;
-
- diff = v_frame1[uv_r * uv_stride + uv_c] -
- v_pred[uv_r * uv_buf_stride + uv_c];
- modifier += diff * diff;
+ modifier += u_diff_sse[uv_r * uv_block_width + uv_c];
+ modifier += v_diff_sse[uv_r * uv_block_width + uv_c];
y_index += 2;
@@ -186,9 +204,6 @@ static void apply_temporal_filter(
const int v_pixel_value = v_pred[uv_r * uv_buf_stride + uv_c];
// non-local mean approach
- int u_diff_sse[9] = { 0 };
- int v_diff_sse[9] = { 0 };
- int idx, idy;
int cr_index = 0;
int u_mod = 0, v_mod = 0;
int y_diff = 0;
@@ -198,16 +213,10 @@ static void apply_temporal_filter(
const int row = uv_r + idy;
const int col = uv_c + idx;
- if (row >= 0 && row < uv_block_height && col >= 0 &&
- col < uv_block_width) {
- int diff = u_frame1[row * uv_stride + col] -
- u_pred[row * uv_buf_stride + col];
- u_diff_sse[cr_index] = diff * diff;
-
- diff = v_frame1[row * uv_stride + col] -
- v_pred[row * uv_buf_stride + col];
- v_diff_sse[cr_index] = diff * diff;
-
+ if (row >= 0 && row < (int)uv_block_height && col >= 0 &&
+ col < (int)uv_block_width) {
+ u_mod += u_diff_sse[row * uv_block_width + col];
+ v_mod += v_diff_sse[row * uv_block_width + col];
++cr_index;
}
}
@@ -215,18 +224,11 @@ static void apply_temporal_filter(
assert(cr_index > 0);
- for (idx = 0; idx < 9; ++idx) {
- u_mod += u_diff_sse[idx];
- v_mod += v_diff_sse[idx];
- }
-
for (idy = 0; idy < 1 + ss_y; ++idy) {
for (idx = 0; idx < 1 + ss_x; ++idx) {
const int row = (uv_r << ss_y) + idy;
const int col = (uv_c << ss_x) + idx;
- const int diff = y_frame1[row * (int)y_stride + col] -
- y_pred[row * (int)block_width + col];
- y_diff += diff * diff;
+ y_diff += y_diff_sse[row * (int)block_width + col];
++cr_index;
}
}
@@ -325,13 +327,23 @@ void vp9_highbd_temporal_filter_apply_c(
const uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
unsigned int i, j, k;
int modifier;
- int byte = 0;
const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
+ int diff_sse[256] = { 0 };
+ int this_idx = 0;
+
+ for (i = 0; i < block_height; i++) {
+ for (j = 0; j < block_width; j++) {
+ const int diff =
+ frame1[i * (int)stride + j] - frame2[i * (int)block_width + j];
+ diff_sse[this_idx++] = diff * diff;
+ }
+ }
+
+ modifier = 0;
for (i = 0, k = 0; i < block_height; i++) {
for (j = 0; j < block_width; j++, k++) {
- int pixel_value = *frame2;
- int diff_sse[9] = { 0 };
+ int pixel_value = frame2[i * (int)block_width + j];
int idx, idy, index = 0;
for (idy = -1; idy <= 1; ++idy) {
@@ -341,22 +353,16 @@ void vp9_highbd_temporal_filter_apply_c(
if (row >= 0 && row < (int)block_height && col >= 0 &&
col < (int)block_width) {
- int diff = frame1[byte + idy * (int)stride + idx] -
- frame2[idy * (int)block_width + idx];
- diff_sse[index] = diff * diff;
+ modifier += diff_sse[row * (int)block_width + col];
++index;
}
}
}
assert(index > 0);
- modifier = 0;
- for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx];
-
modifier *= 3;
modifier /= index;
- ++frame2;
modifier += rounding;
modifier >>= strength;
@@ -367,11 +373,7 @@ void vp9_highbd_temporal_filter_apply_c(
count[k] += modifier;
accumulator[k] += modifier * pixel_value;
-
- byte++;
}
-
- byte += stride - block_width;
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -421,12 +423,13 @@ static uint32_t temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
/* restore UMV window */
x->mv_limits = tmp_mv_limits;
+ // TODO(yunqing): may use higher tap interp filter than 2 taps if needed.
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme = cpi->find_fractional_mv_step(
x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], 0, mv_sf->subpel_search_level,
- cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
- 0);
+ cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 16,
+ 16, USE_8_TAPS_SHARP);
// Restore input state
x->plane[0].src = src;
@@ -949,8 +952,7 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
}
// Initialize errorperbit and sabperbit.
- rdmult = (int)vp9_compute_rd_mult_based_on_qindex(cpi, ARNR_FILT_QINDEX);
- if (rdmult < 1) rdmult = 1;
+ rdmult = vp9_compute_rd_mult_based_on_qindex(cpi, ARNR_FILT_QINDEX);
set_error_per_bit(&cpi->td.mb, rdmult);
vp9_initialize_me_consts(cpi, &cpi->td.mb, ARNR_FILT_QINDEX);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c
index 293cdcd675a..0cecd654019 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c
@@ -185,8 +185,8 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
int skip_block, const int16_t *round_ptr,
const int16_t *quant_ptr, int16_t *qcoeff_ptr,
int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
__m128i zero;
int pass;
@@ -215,7 +215,7 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
__m128i *in[8];
int index = 0;
- (void)scan_ptr;
+ (void)scan;
(void)coeff_ptr;
// Pre-condition input (shift by two)
@@ -449,7 +449,7 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
in7 = _mm_srai_epi16(in7, 1);
}
- iscan_ptr += n_coeffs;
+ iscan += n_coeffs;
qcoeff_ptr += n_coeffs;
dqcoeff_ptr += n_coeffs;
n_coeffs = -n_coeffs;
@@ -518,8 +518,8 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -582,8 +582,8 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride,
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c
index bf874a09ec5..99c19389486 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c
@@ -18,11 +18,13 @@
#include "vpx_dsp/x86/inv_txfm_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
-void vp9_fdct8x8_quant_ssse3(
- const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t *round_ptr, const int16_t *quant_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride,
+ tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *round_ptr,
+ const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
__m128i zero;
int pass;
@@ -52,7 +54,7 @@ void vp9_fdct8x8_quant_ssse3(
__m128i *in[8];
int index = 0;
- (void)scan_ptr;
+ (void)scan;
(void)coeff_ptr;
// Pre-condition input (shift by two)
@@ -280,7 +282,7 @@ void vp9_fdct8x8_quant_ssse3(
in7 = _mm_srai_epi16(in7, 1);
}
- iscan_ptr += n_coeffs;
+ iscan += n_coeffs;
qcoeff_ptr += n_coeffs;
dqcoeff_ptr += n_coeffs;
n_coeffs = -n_coeffs;
@@ -350,8 +352,8 @@ void vp9_fdct8x8_quant_ssse3(
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -427,8 +429,8 @@ void vp9_fdct8x8_quant_ssse3(
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_avx2.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_avx2.c
index 4bebc34d676..8dfdbd50f6c 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_avx2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_avx2.c
@@ -15,7 +15,7 @@
#include "vpx/vpx_integer.h"
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_dsp/x86/bitdepth_conversion_avx2.h"
-#include "vpx_dsp/x86/quantize_x86.h"
+#include "vpx_dsp/x86/quantize_sse2.h"
// Zero fill 8 positions in the output buffer.
static INLINE void store_zero_tran_low(tran_low_t *a) {
@@ -50,18 +50,18 @@ void vp9_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
__m128i eob;
__m256i round256, quant256, dequant256;
__m256i eob256, thr256;
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
coeff_ptr += n_coeffs;
- iscan_ptr += n_coeffs;
+ iscan += n_coeffs;
qcoeff_ptr += n_coeffs;
dqcoeff_ptr += n_coeffs;
n_coeffs = -n_coeffs;
@@ -97,7 +97,7 @@ void vp9_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff256, dqcoeff_ptr + n_coeffs);
}
- eob256 = scan_eob_256((const __m256i *)(iscan_ptr + n_coeffs), &coeff256);
+ eob256 = scan_eob_256((const __m256i *)(iscan + n_coeffs), &coeff256);
n_coeffs += 8 * 2;
}
@@ -124,8 +124,7 @@ void vp9_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
coeff256 = _mm256_mullo_epi16(qcoeff256, dequant256);
store_tran_low(coeff256, dqcoeff_ptr + n_coeffs);
eob256 = _mm256_max_epi16(
- eob256,
- scan_eob_256((const __m256i *)(iscan_ptr + n_coeffs), &coeff256));
+ eob256, scan_eob_256((const __m256i *)(iscan + n_coeffs), &coeff256));
} else {
store_zero_tran_low(qcoeff_ptr + n_coeffs);
store_zero_tran_low(dqcoeff_ptr + n_coeffs);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c
index ca0ad4407e5..885220a7129 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c
@@ -21,20 +21,20 @@ void vp9_quantize_fp_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
__m128i zero;
__m128i thr;
int16_t nzflag;
__m128i eob;
__m128i round, quant, dequant;
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
coeff_ptr += n_coeffs;
- iscan_ptr += n_coeffs;
+ iscan += n_coeffs;
qcoeff_ptr += n_coeffs;
dqcoeff_ptr += n_coeffs;
n_coeffs = -n_coeffs;
@@ -100,8 +100,8 @@ void vp9_quantize_fp_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -175,8 +175,8 @@ void vp9_quantize_fp_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/vp9_cx_iface.c b/chromium/third_party/libvpx/source/libvpx/vp9/vp9_cx_iface.c
index 3b2d9a86617..85f83a66249 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/vp9_cx_iface.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/vp9_cx_iface.c
@@ -1151,6 +1151,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
unsigned char *cx_data;
cpi->svc.timebase_fac = timebase_units_to_ticks(timebase, 1);
+ cpi->svc.time_stamp_superframe = dst_time_stamp;
// Set up internal flags
if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
@@ -1625,6 +1626,14 @@ static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx,
return update_extra_cfg(ctx, &extra_cfg);
}
+static vpx_codec_err_t ctrl_set_postencode_drop(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ VP9_COMP *const cpi = ctx->cpi;
+ const unsigned int data = va_arg(args, unsigned int);
+ cpi->rc.ext_use_post_encode_drop = data;
+ return VPX_CODEC_OK;
+}
+
static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
{ VP8_COPY_REFERENCE, ctrl_copy_reference },
@@ -1668,6 +1677,7 @@ static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
{ VP9E_SET_RENDER_SIZE, ctrl_set_render_size },
{ VP9E_SET_TARGET_LEVEL, ctrl_set_target_level },
{ VP9E_SET_ROW_MT, ctrl_set_row_mt },
+ { VP9E_SET_POSTENCODE_DROP, ctrl_set_postencode_drop },
{ VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST, ctrl_enable_motion_vector_unit_test },
{ VP9E_SET_SVC_INTER_LAYER_PRED, ctrl_set_svc_inter_layer_pred },
{ VP9E_SET_SVC_FRAME_DROP_LAYER, ctrl_set_svc_frame_drop_layer },
@@ -1690,7 +1700,7 @@ static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
{ 0,
{
// NOLINT
- 0, // g_usage
+ 0, // g_usage (unused)
8, // g_threads
0, // g_profile
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.c b/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.c
index fdff877682a..6a4cb9acf6f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.c
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.c
@@ -270,6 +270,9 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
RANGE_CHECK(ctx, row_mt, 0, 1);
ctx->pbi->row_mt = ctx->row_mt;
+ RANGE_CHECK(ctx, lpf_opt, 0, 1);
+ ctx->pbi->lpf_mt_opt = ctx->lpf_opt;
+
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
@@ -658,6 +661,13 @@ static vpx_codec_err_t ctrl_set_row_mt(vpx_codec_alg_priv_t *ctx,
return VPX_CODEC_OK;
}
+static vpx_codec_err_t ctrl_enable_lpf_opt(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ ctx->lpf_opt = va_arg(args, int);
+
+ return VPX_CODEC_OK;
+}
+
static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
{ VP8_COPY_REFERENCE, ctrl_copy_reference },
@@ -670,6 +680,7 @@ static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
{ VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
{ VP9_DECODE_SVC_SPATIAL_LAYER, ctrl_set_spatial_layer_svc },
{ VP9D_SET_ROW_MT, ctrl_set_row_mt },
+ { VP9D_SET_LOOP_FILTER_OPT, ctrl_enable_lpf_opt },
// Getters
{ VPXD_GET_LAST_QUANTIZER, ctrl_get_quantizer },
diff --git a/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.h b/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.h
index a1c335278d2..f60688c4db2 100644
--- a/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.h
+++ b/chromium/third_party/libvpx/source/libvpx/vp9/vp9_dx_iface.h
@@ -46,6 +46,7 @@ struct vpx_codec_alg_priv {
int svc_decoding;
int svc_spatial_layer;
int row_mt;
+ int lpf_opt;
};
#endif // VPX_VP9_VP9_DX_IFACE_H_
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx/src/vpx_encoder.c b/chromium/third_party/libvpx/source/libvpx/vpx/src/vpx_encoder.c
index 1cf2dca695a..ac1e3d061fb 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx/src/vpx_encoder.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx/src/vpx_encoder.c
@@ -154,7 +154,7 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_map_t *map;
int i;
- if (!iface || !cfg || usage > INT_MAX)
+ if (!iface || !cfg || usage != 0)
res = VPX_CODEC_INVALID_PARAM;
else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
res = VPX_CODEC_INCAPABLE;
@@ -163,12 +163,9 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
for (i = 0; i < iface->enc.cfg_map_count; ++i) {
map = iface->enc.cfg_maps + i;
- if (map->usage == (int)usage) {
- *cfg = map->cfg;
- cfg->g_usage = usage;
- res = VPX_CODEC_OK;
- break;
- }
+ *cfg = map->cfg;
+ res = VPX_CODEC_OK;
+ break;
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx/vp8cx.h b/chromium/third_party/libvpx/source/libvpx/vpx/vp8cx.h
index 7a232b0ef90..2f54223cae5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx/vp8cx.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx/vp8cx.h
@@ -227,8 +227,8 @@ enum vp8e_enc_control_id {
/*!\brief Codec control function to set constrained quality level.
*
- * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be
- * set to #VPX_CQ.
+ * \attention For this value to be used vpx_codec_enc_cfg_t::rc_end_usage must
+ * be set to #VPX_CQ
* \note Valid range: 0..63
*
* Supported in codecs: VP8, VP9
@@ -660,6 +660,16 @@ enum vp8e_enc_control_id {
* 1. The default value is set to be 1.
*/
VP9E_SET_TPL,
+
+ /*!\brief Codec control function to enable postencode frame drop.
+ *
+ * This will allow encoder to drop frame after it's encoded.
+ *
+ * 0: Off (default), 1: Enabled
+ *
+ * Supported in codecs: VP9
+ */
+ VP9E_SET_POSTENCODE_DROP,
};
/*!\brief vpx 1-D scaling mode
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx/vp8dx.h b/chromium/third_party/libvpx/source/libvpx/vpx/vp8dx.h
index fd6030107d9..c31afc1e604 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx/vp8dx.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx/vp8dx.h
@@ -132,6 +132,16 @@ enum vp8_dec_control_id {
*/
VP9D_SET_ROW_MT,
+ /*!\brief Codec control function to set loopfilter optimization.
+ *
+ * 0 : off, Loop filter is done after all tiles have been decoded
+ * 1 : on, Loop filter is done immediately after decode without
+ * waiting for all threads to sync.
+ *
+ * Supported in codecs: VP9
+ */
+ VP9D_SET_LOOP_FILTER_OPT,
+
VP8_DECODER_CTRL_ID_MAX
};
@@ -191,6 +201,8 @@ VPX_CTRL_USE_TYPE(VP9_DECODE_SVC_SPATIAL_LAYER, int)
VPX_CTRL_USE_TYPE(VP9_SET_SKIP_LOOP_FILTER, int)
#define VPX_CTRL_VP9_DECODE_SET_ROW_MT
VPX_CTRL_USE_TYPE(VP9D_SET_ROW_MT, int)
+#define VPX_CTRL_VP9_SET_LOOP_FILTER_OPT
+VPX_CTRL_USE_TYPE(VP9D_SET_LOOP_FILTER_OPT, int)
/*!\endcond */
/*! @} - end defgroup vp8_decoder */
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx/vpx_encoder.h b/chromium/third_party/libvpx/source/libvpx/vpx/vpx_encoder.h
index bddd72191be..050d6955209 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx/vpx_encoder.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx/vpx_encoder.h
@@ -278,12 +278,9 @@ typedef struct vpx_codec_enc_cfg {
* generic settings (g)
*/
- /*!\brief Algorithm specific "usage" value
+ /*!\brief Deprecated: Algorithm specific "usage" value
*
- * Algorithms may define multiple values for usage, which may convey the
- * intent of how the application intends to use the stream. If this value
- * is non-zero, consult the documentation for the codec to determine its
- * meaning.
+ * This value must be zero.
*/
unsigned int g_usage;
@@ -482,8 +479,7 @@ typedef struct vpx_codec_enc_cfg {
* The quantizer is the most direct control over the quality of the
* encoded image. The range of valid values for the quantizer is codec
* specific. Consult the documentation for the codec to determine the
- * values to use. To determine the range programmatically, call
- * vpx_codec_enc_config_default() with a usage value of 0.
+ * values to use.
*/
unsigned int rc_min_quantizer;
@@ -492,8 +488,7 @@ typedef struct vpx_codec_enc_cfg {
* The quantizer is the most direct control over the quality of the
* encoded image. The range of valid values for the quantizer is codec
* specific. Consult the documentation for the codec to determine the
- * values to use. To determine the range programmatically, call
- * vpx_codec_enc_config_default() with a usage value of 0.
+ * values to use.
*/
unsigned int rc_max_quantizer;
@@ -799,7 +794,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(
*
* \param[in] iface Pointer to the algorithm interface to use.
* \param[out] cfg Configuration buffer to populate.
- * \param[in] reserved Must set to 0 for VP8 and VP9.
+ * \param[in] usage Must be set to 0.
*
* \retval #VPX_CODEC_OK
* The configuration was populated.
@@ -810,7 +805,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(
*/
vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_t *cfg,
- unsigned int reserved);
+ unsigned int usage);
/*!\brief Set or change configuration
*
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx/vpx_image.h b/chromium/third_party/libvpx/source/libvpx/vpx/vpx_image.h
index a1b7ce688c2..98be5966a24 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx/vpx_image.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx/vpx_image.h
@@ -150,21 +150,21 @@ vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
* storage for descriptor has been allocated elsewhere, and a descriptor is
* desired to "wrap" that storage.
*
- * \param[in] img Pointer to storage for descriptor. If this parameter
- * is NULL, the storage for the descriptor will be
- * allocated on the heap.
- * \param[in] fmt Format for the image
- * \param[in] d_w Width of the image
- * \param[in] d_h Height of the image
- * \param[in] align Alignment, in bytes, of each row in the image.
- * \param[in] img_data Storage to use for the image
+ * \param[in] img Pointer to storage for descriptor. If this
+ * parameter is NULL, the storage for the descriptor
+ * will be allocated on the heap.
+ * \param[in] fmt Format for the image
+ * \param[in] d_w Width of the image
+ * \param[in] d_h Height of the image
+ * \param[in] stride_align Alignment, in bytes, of each row in the image.
+ * \param[in] img_data Storage to use for the image
*
* \return Returns a pointer to the initialized image descriptor. If the img
* parameter is non-null, the value of the img parameter will be
* returned.
*/
vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
- unsigned int d_h, unsigned int align,
+ unsigned int d_h, unsigned int stride_align,
unsigned char *img_data);
/*!\brief Set the rectangle identifying the displayed portion of the image
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/add_noise.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/add_noise.c
index cda6ae8814a..6839e979284 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/add_noise.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/add_noise.c
@@ -52,6 +52,7 @@ int vpx_setup_noise(double sigma, int8_t *noise, int size) {
const int a_i = (int)(0.5 + 256 * gaussian(sigma, 0, i));
if (a_i) {
for (j = 0; j < a_i; ++j) {
+ if (next + j >= 256) goto set_noise;
char_dist[next + j] = (int8_t)i;
}
next = next + j;
@@ -63,6 +64,7 @@ int vpx_setup_noise(double sigma, int8_t *noise, int size) {
char_dist[next] = 0;
}
+set_noise:
for (i = 0; i < size; ++i) {
noise[i] = char_dist[rand() & 0xff]; // NOLINT
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/quantize_neon.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/quantize_neon.c
index a0a1e6dd5ad..b5d1e7ecb58 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/quantize_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/quantize_neon.c
@@ -20,12 +20,12 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
const int16x8_t one = vdupq_n_s16(1);
const int16x8_t neg_one = vdupq_n_s16(-1);
uint16x8_t eob_max;
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
@@ -38,8 +38,8 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int16x8_t quant_shift = vld1q_s16(quant_shift_ptr);
const int16x8_t dequant = vld1q_s16(dequant_ptr);
// Add one because the eob does not index from 0.
- const uint16x8_t iscan =
- vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one));
+ const uint16x8_t v_iscan =
+ vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan), one));
const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr);
const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
@@ -65,10 +65,10 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
qcoeff = vandq_s16(qcoeff, zbin_mask);
// Set non-zero elements to -1 and use that to extract values for eob.
- eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), iscan);
+ eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan);
coeff_ptr += 8;
- iscan_ptr += 8;
+ iscan += 8;
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
@@ -90,8 +90,8 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
do {
// Add one because the eob is not its index.
- const uint16x8_t iscan =
- vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one));
+ const uint16x8_t v_iscan =
+ vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan), one));
const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr);
const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
@@ -118,10 +118,10 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
// Set non-zero elements to -1 and use that to extract values for eob.
eob_max =
- vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), iscan));
+ vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan));
coeff_ptr += 8;
- iscan_ptr += 8;
+ iscan += 8;
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
@@ -135,6 +135,9 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
} while (n_coeffs > 0);
}
+#ifdef __aarch64__
+ *eob_ptr = vmaxvq_u16(eob_max);
+#else
{
const uint16x4_t eob_max_0 =
vmax_u16(vget_low_u16(eob_max), vget_high_u16(eob_max));
@@ -142,6 +145,7 @@ void vpx_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const uint16x4_t eob_max_2 = vpmax_u16(eob_max_1, eob_max_1);
vst1_lane_u16(eob_ptr, eob_max_2, 0);
}
+#endif // __aarch64__
}
static INLINE int32x4_t extract_sign_bit(int32x4_t a) {
@@ -150,17 +154,19 @@ static INLINE int32x4_t extract_sign_bit(int32x4_t a) {
// Main difference is that zbin values are halved before comparison and dqcoeff
// values are divided by 2. zbin is rounded but dqcoeff is not.
-void vpx_quantize_b_32x32_neon(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void vpx_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
const int16x8_t one = vdupq_n_s16(1);
const int16x8_t neg_one = vdupq_n_s16(-1);
uint16x8_t eob_max;
int i;
- (void)scan_ptr;
+ (void)scan;
(void)n_coeffs; // Because we will always calculate 32*32.
(void)skip_block;
assert(!skip_block);
@@ -174,8 +180,8 @@ void vpx_quantize_b_32x32_neon(
const int16x8_t quant_shift = vld1q_s16(quant_shift_ptr);
const int16x8_t dequant = vld1q_s16(dequant_ptr);
// Add one because the eob does not index from 0.
- const uint16x8_t iscan =
- vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one));
+ const uint16x8_t v_iscan =
+ vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan), one));
const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr);
const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
@@ -203,10 +209,10 @@ void vpx_quantize_b_32x32_neon(
qcoeff = vandq_s16(qcoeff, zbin_mask);
// Set non-zero elements to -1 and use that to extract values for eob.
- eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), iscan);
+ eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan);
coeff_ptr += 8;
- iscan_ptr += 8;
+ iscan += 8;
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
@@ -234,8 +240,8 @@ void vpx_quantize_b_32x32_neon(
for (i = 1; i < 32 * 32 / 8; ++i) {
// Add one because the eob is not its index.
- const uint16x8_t iscan =
- vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan_ptr), one));
+ const uint16x8_t v_iscan =
+ vreinterpretq_u16_s16(vaddq_s16(vld1q_s16(iscan), one));
const int16x8_t coeff = load_tran_low_to_s16q(coeff_ptr);
const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
@@ -264,10 +270,10 @@ void vpx_quantize_b_32x32_neon(
// Set non-zero elements to -1 and use that to extract values for eob.
eob_max =
- vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), iscan));
+ vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan));
coeff_ptr += 8;
- iscan_ptr += 8;
+ iscan += 8;
store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
qcoeff_ptr += 8;
@@ -286,6 +292,9 @@ void vpx_quantize_b_32x32_neon(
}
}
+#ifdef __aarch64__
+ *eob_ptr = vmaxvq_u16(eob_max);
+#else
{
const uint16x4_t eob_max_0 =
vmax_u16(vget_low_u16(eob_max), vget_high_u16(eob_max));
@@ -293,4 +302,5 @@ void vpx_quantize_b_32x32_neon(
const uint16x4_t eob_max_2 = vpmax_u16(eob_max_1, eob_max_1);
vst1_lane_u16(eob_ptr, eob_max_2, 0);
}
+#endif // __aarch64__
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad4d_neon.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad4d_neon.c
index 535ec0f0d6d..06443c69956 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad4d_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad4d_neon.c
@@ -28,24 +28,25 @@ static INLINE uint8x8_t load_unaligned_2_buffers(const void *const buf0,
return vreinterpret_u8_u32(aa);
}
-static INLINE void sad4x_4d(const uint8_t *const src, const int src_stride,
- const uint8_t *const ref[4], const int ref_stride,
- const int height, uint32_t *const res) {
+static INLINE void sad4x_4d(const uint8_t *const src_ptr, const int src_stride,
+ const uint8_t *const ref_array[4],
+ const int ref_stride, const int height,
+ uint32_t *const res) {
int i;
uint16x8_t abs[2] = { vdupq_n_u16(0), vdupq_n_u16(0) };
uint16x4_t a[2];
uint32x4_t r;
- assert(!((intptr_t)src % sizeof(uint32_t)));
+ assert(!((intptr_t)src_ptr % sizeof(uint32_t)));
assert(!(src_stride % sizeof(uint32_t)));
for (i = 0; i < height; ++i) {
const uint8x8_t s = vreinterpret_u8_u32(
- vld1_dup_u32((const uint32_t *)(src + i * src_stride)));
- const uint8x8_t ref01 = load_unaligned_2_buffers(ref[0] + i * ref_stride,
- ref[1] + i * ref_stride);
- const uint8x8_t ref23 = load_unaligned_2_buffers(ref[2] + i * ref_stride,
- ref[3] + i * ref_stride);
+ vld1_dup_u32((const uint32_t *)(src_ptr + i * src_stride)));
+ const uint8x8_t ref01 = load_unaligned_2_buffers(
+ ref_array[0] + i * ref_stride, ref_array[1] + i * ref_stride);
+ const uint8x8_t ref23 = load_unaligned_2_buffers(
+ ref_array[2] + i * ref_stride, ref_array[3] + i * ref_stride);
abs[0] = vabal_u8(abs[0], s, ref01);
abs[1] = vabal_u8(abs[1], s, ref23);
}
@@ -56,16 +57,16 @@ static INLINE void sad4x_4d(const uint8_t *const src, const int src_stride,
vst1q_u32(res, r);
}
-void vpx_sad4x4x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad4x4x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad4x_4d(src, src_stride, ref, ref_stride, 4, res);
+ sad4x_4d(src_ptr, src_stride, ref_array, ref_stride, 4, res);
}
-void vpx_sad4x8x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad4x8x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad4x_4d(src, src_stride, ref, ref_stride, 8, res);
+ sad4x_4d(src_ptr, src_stride, ref_array, ref_stride, 8, res);
}
////////////////////////////////////////////////////////////////////////////////
@@ -137,17 +138,18 @@ static INLINE void sad_4096_pel_final_neon(const uint16x8_t *sum /*[8]*/,
vst1q_u32(res, vcombine_u32(d0, d1));
}
-static INLINE void sad8x_4d(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+static INLINE void sad8x_4d(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res, const int height) {
int i, j;
- const uint8_t *ref_loop[4] = { ref[0], ref[1], ref[2], ref[3] };
+ const uint8_t *ref_loop[4] = { ref_array[0], ref_array[1], ref_array[2],
+ ref_array[3] };
uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0),
vdupq_n_u16(0) };
for (i = 0; i < height; ++i) {
- const uint8x8_t s = vld1_u8(src);
- src += src_stride;
+ const uint8x8_t s = vld1_u8(src_ptr);
+ src_ptr += src_stride;
for (j = 0; j < 4; ++j) {
const uint8x8_t b_u8 = vld1_u8(ref_loop[j]);
ref_loop[j] += ref_stride;
@@ -158,44 +160,45 @@ static INLINE void sad8x_4d(const uint8_t *src, int src_stride,
sad_512_pel_final_neon(sum, res);
}
-void vpx_sad8x4x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad8x4x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad8x_4d(src, src_stride, ref, ref_stride, res, 4);
+ sad8x_4d(src_ptr, src_stride, ref_array, ref_stride, res, 4);
}
-void vpx_sad8x8x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad8x8x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad8x_4d(src, src_stride, ref, ref_stride, res, 8);
+ sad8x_4d(src_ptr, src_stride, ref_array, ref_stride, res, 8);
}
-void vpx_sad8x16x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad8x16x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad8x_4d(src, src_stride, ref, ref_stride, res, 16);
+ sad8x_4d(src_ptr, src_stride, ref_array, ref_stride, res, 16);
}
////////////////////////////////////////////////////////////////////////////////
-static INLINE void sad16_neon(const uint8_t *ref, const uint8x16_t src,
+static INLINE void sad16_neon(const uint8_t *ref_ptr, const uint8x16_t src_ptr,
uint16x8_t *const sum) {
- const uint8x16_t r = vld1q_u8(ref);
- *sum = vabal_u8(*sum, vget_low_u8(src), vget_low_u8(r));
- *sum = vabal_u8(*sum, vget_high_u8(src), vget_high_u8(r));
+ const uint8x16_t r = vld1q_u8(ref_ptr);
+ *sum = vabal_u8(*sum, vget_low_u8(src_ptr), vget_low_u8(r));
+ *sum = vabal_u8(*sum, vget_high_u8(src_ptr), vget_high_u8(r));
}
-static INLINE void sad16x_4d(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+static INLINE void sad16x_4d(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res, const int height) {
int i, j;
- const uint8_t *ref_loop[4] = { ref[0], ref[1], ref[2], ref[3] };
+ const uint8_t *ref_loop[4] = { ref_array[0], ref_array[1], ref_array[2],
+ ref_array[3] };
uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0),
vdupq_n_u16(0) };
for (i = 0; i < height; ++i) {
- const uint8x16_t s = vld1q_u8(src);
- src += src_stride;
+ const uint8x16_t s = vld1q_u8(src_ptr);
+ src_ptr += src_stride;
for (j = 0; j < 4; ++j) {
sad16_neon(ref_loop[j], s, &sum[j]);
ref_loop[j] += ref_stride;
@@ -205,50 +208,51 @@ static INLINE void sad16x_4d(const uint8_t *src, int src_stride,
sad_512_pel_final_neon(sum, res);
}
-void vpx_sad16x8x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad16x8x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad16x_4d(src, src_stride, ref, ref_stride, res, 8);
+ sad16x_4d(src_ptr, src_stride, ref_array, ref_stride, res, 8);
}
-void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad16x16x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad16x_4d(src, src_stride, ref, ref_stride, res, 16);
+ sad16x_4d(src_ptr, src_stride, ref_array, ref_stride, res, 16);
}
-void vpx_sad16x32x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad16x32x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
- sad16x_4d(src, src_stride, ref, ref_stride, res, 32);
+ sad16x_4d(src_ptr, src_stride, ref_array, ref_stride, res, 32);
}
////////////////////////////////////////////////////////////////////////////////
-static INLINE void sad32x_4d(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+static INLINE void sad32x_4d(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
const int height, uint16x8_t *const sum) {
int i;
- const uint8_t *ref_loop[4] = { ref[0], ref[1], ref[2], ref[3] };
+ const uint8_t *ref_loop[4] = { ref_array[0], ref_array[1], ref_array[2],
+ ref_array[3] };
sum[0] = sum[1] = sum[2] = sum[3] = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
uint8x16_t s;
- s = vld1q_u8(src + 0 * 16);
+ s = vld1q_u8(src_ptr + 0 * 16);
sad16_neon(ref_loop[0] + 0 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 0 * 16, s, &sum[1]);
sad16_neon(ref_loop[2] + 0 * 16, s, &sum[2]);
sad16_neon(ref_loop[3] + 0 * 16, s, &sum[3]);
- s = vld1q_u8(src + 1 * 16);
+ s = vld1q_u8(src_ptr + 1 * 16);
sad16_neon(ref_loop[0] + 1 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 1 * 16, s, &sum[1]);
sad16_neon(ref_loop[2] + 1 * 16, s, &sum[2]);
sad16_neon(ref_loop[3] + 1 * 16, s, &sum[3]);
- src += src_stride;
+ src_ptr += src_stride;
ref_loop[0] += ref_stride;
ref_loop[1] += ref_stride;
ref_loop[2] += ref_stride;
@@ -256,68 +260,69 @@ static INLINE void sad32x_4d(const uint8_t *src, int src_stride,
}
}
-void vpx_sad32x16x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad32x16x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
uint16x8_t sum[4];
- sad32x_4d(src, src_stride, ref, ref_stride, 16, sum);
+ sad32x_4d(src_ptr, src_stride, ref_array, ref_stride, 16, sum);
sad_512_pel_final_neon(sum, res);
}
-void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad32x32x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
uint16x8_t sum[4];
- sad32x_4d(src, src_stride, ref, ref_stride, 32, sum);
+ sad32x_4d(src_ptr, src_stride, ref_array, ref_stride, 32, sum);
sad_1024_pel_final_neon(sum, res);
}
-void vpx_sad32x64x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad32x64x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
uint16x8_t sum[4];
- sad32x_4d(src, src_stride, ref, ref_stride, 64, sum);
+ sad32x_4d(src_ptr, src_stride, ref_array, ref_stride, 64, sum);
sad_2048_pel_final_neon(sum, res);
}
////////////////////////////////////////////////////////////////////////////////
-void vpx_sad64x32x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad64x32x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
int i;
- const uint8_t *ref_loop[4] = { ref[0], ref[1], ref[2], ref[3] };
+ const uint8_t *ref_loop[4] = { ref_array[0], ref_array[1], ref_array[2],
+ ref_array[3] };
uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0),
vdupq_n_u16(0) };
for (i = 0; i < 32; ++i) {
uint8x16_t s;
- s = vld1q_u8(src + 0 * 16);
+ s = vld1q_u8(src_ptr + 0 * 16);
sad16_neon(ref_loop[0] + 0 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 0 * 16, s, &sum[1]);
sad16_neon(ref_loop[2] + 0 * 16, s, &sum[2]);
sad16_neon(ref_loop[3] + 0 * 16, s, &sum[3]);
- s = vld1q_u8(src + 1 * 16);
+ s = vld1q_u8(src_ptr + 1 * 16);
sad16_neon(ref_loop[0] + 1 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 1 * 16, s, &sum[1]);
sad16_neon(ref_loop[2] + 1 * 16, s, &sum[2]);
sad16_neon(ref_loop[3] + 1 * 16, s, &sum[3]);
- s = vld1q_u8(src + 2 * 16);
+ s = vld1q_u8(src_ptr + 2 * 16);
sad16_neon(ref_loop[0] + 2 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 2 * 16, s, &sum[1]);
sad16_neon(ref_loop[2] + 2 * 16, s, &sum[2]);
sad16_neon(ref_loop[3] + 2 * 16, s, &sum[3]);
- s = vld1q_u8(src + 3 * 16);
+ s = vld1q_u8(src_ptr + 3 * 16);
sad16_neon(ref_loop[0] + 3 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 3 * 16, s, &sum[1]);
sad16_neon(ref_loop[2] + 3 * 16, s, &sum[2]);
sad16_neon(ref_loop[3] + 3 * 16, s, &sum[3]);
- src += src_stride;
+ src_ptr += src_stride;
ref_loop[0] += ref_stride;
ref_loop[1] += ref_stride;
ref_loop[2] += ref_stride;
@@ -327,11 +332,12 @@ void vpx_sad64x32x4d_neon(const uint8_t *src, int src_stride,
sad_2048_pel_final_neon(sum, res);
}
-void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad64x64x4d_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t *res) {
int i;
- const uint8_t *ref_loop[4] = { ref[0], ref[1], ref[2], ref[3] };
+ const uint8_t *ref_loop[4] = { ref_array[0], ref_array[1], ref_array[2],
+ ref_array[3] };
uint16x8_t sum[8] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0),
vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0),
vdupq_n_u16(0), vdupq_n_u16(0) };
@@ -339,31 +345,31 @@ void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride,
for (i = 0; i < 64; ++i) {
uint8x16_t s;
- s = vld1q_u8(src + 0 * 16);
+ s = vld1q_u8(src_ptr + 0 * 16);
sad16_neon(ref_loop[0] + 0 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 0 * 16, s, &sum[2]);
sad16_neon(ref_loop[2] + 0 * 16, s, &sum[4]);
sad16_neon(ref_loop[3] + 0 * 16, s, &sum[6]);
- s = vld1q_u8(src + 1 * 16);
+ s = vld1q_u8(src_ptr + 1 * 16);
sad16_neon(ref_loop[0] + 1 * 16, s, &sum[0]);
sad16_neon(ref_loop[1] + 1 * 16, s, &sum[2]);
sad16_neon(ref_loop[2] + 1 * 16, s, &sum[4]);
sad16_neon(ref_loop[3] + 1 * 16, s, &sum[6]);
- s = vld1q_u8(src + 2 * 16);
+ s = vld1q_u8(src_ptr + 2 * 16);
sad16_neon(ref_loop[0] + 2 * 16, s, &sum[1]);
sad16_neon(ref_loop[1] + 2 * 16, s, &sum[3]);
sad16_neon(ref_loop[2] + 2 * 16, s, &sum[5]);
sad16_neon(ref_loop[3] + 2 * 16, s, &sum[7]);
- s = vld1q_u8(src + 3 * 16);
+ s = vld1q_u8(src_ptr + 3 * 16);
sad16_neon(ref_loop[0] + 3 * 16, s, &sum[1]);
sad16_neon(ref_loop[1] + 3 * 16, s, &sum[3]);
sad16_neon(ref_loop[2] + 3 * 16, s, &sum[5]);
sad16_neon(ref_loop[3] + 3 * 16, s, &sum[7]);
- src += src_stride;
+ src_ptr += src_stride;
ref_loop[0] += ref_stride;
ref_loop[1] += ref_stride;
ref_loop[2] += ref_stride;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad_neon.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad_neon.c
index 9518a166bbf..1ce66d3e833 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/sad_neon.c
@@ -73,128 +73,132 @@ uint32_t vpx_sad4x8_avg_neon(const uint8_t *src_ptr, int src_stride,
return vget_lane_u32(horizontal_add_uint16x8(abs), 0);
}
-static INLINE uint16x8_t sad8x(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, const int height) {
+static INLINE uint16x8_t sad8x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const int height) {
int i;
uint16x8_t abs = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x8_t a_u8 = vld1_u8(a);
- const uint8x8_t b_u8 = vld1_u8(b);
- a += a_stride;
- b += b_stride;
+ const uint8x8_t a_u8 = vld1_u8(src_ptr);
+ const uint8x8_t b_u8 = vld1_u8(ref_ptr);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
abs = vabal_u8(abs, a_u8, b_u8);
}
return abs;
}
-static INLINE uint16x8_t sad8x_avg(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- const uint8_t *c, const int height) {
+static INLINE uint16x8_t sad8x_avg(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred,
+ const int height) {
int i;
uint16x8_t abs = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x8_t a_u8 = vld1_u8(a);
- const uint8x8_t b_u8 = vld1_u8(b);
- const uint8x8_t c_u8 = vld1_u8(c);
+ const uint8x8_t a_u8 = vld1_u8(src_ptr);
+ const uint8x8_t b_u8 = vld1_u8(ref_ptr);
+ const uint8x8_t c_u8 = vld1_u8(second_pred);
const uint8x8_t avg = vrhadd_u8(b_u8, c_u8);
- a += a_stride;
- b += b_stride;
- c += 8;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ second_pred += 8;
abs = vabal_u8(abs, a_u8, avg);
}
return abs;
}
-#define sad8xN(n) \
- uint32_t vpx_sad8x##n##_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride) { \
- const uint16x8_t abs = sad8x(src, src_stride, ref, ref_stride, n); \
- return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
- } \
- \
- uint32_t vpx_sad8x##n##_avg_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred) { \
- const uint16x8_t abs = \
- sad8x_avg(src, src_stride, ref, ref_stride, second_pred, n); \
- return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
+#define sad8xN(n) \
+ uint32_t vpx_sad8x##n##_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ const uint16x8_t abs = sad8x(src_ptr, src_stride, ref_ptr, ref_stride, n); \
+ return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
+ } \
+ \
+ uint32_t vpx_sad8x##n##_avg_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ const uint8_t *second_pred) { \
+ const uint16x8_t abs = \
+ sad8x_avg(src_ptr, src_stride, ref_ptr, ref_stride, second_pred, n); \
+ return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
}
sad8xN(4);
sad8xN(8);
sad8xN(16);
-static INLINE uint16x8_t sad16x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static INLINE uint16x8_t sad16x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
const int height) {
int i;
uint16x8_t abs = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x16_t a_u8 = vld1q_u8(a);
- const uint8x16_t b_u8 = vld1q_u8(b);
- a += a_stride;
- b += b_stride;
+ const uint8x16_t a_u8 = vld1q_u8(src_ptr);
+ const uint8x16_t b_u8 = vld1q_u8(ref_ptr);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
abs = vabal_u8(abs, vget_low_u8(a_u8), vget_low_u8(b_u8));
abs = vabal_u8(abs, vget_high_u8(a_u8), vget_high_u8(b_u8));
}
return abs;
}
-static INLINE uint16x8_t sad16x_avg(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- const uint8_t *c, const int height) {
+static INLINE uint16x8_t sad16x_avg(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred,
+ const int height) {
int i;
uint16x8_t abs = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x16_t a_u8 = vld1q_u8(a);
- const uint8x16_t b_u8 = vld1q_u8(b);
- const uint8x16_t c_u8 = vld1q_u8(c);
+ const uint8x16_t a_u8 = vld1q_u8(src_ptr);
+ const uint8x16_t b_u8 = vld1q_u8(ref_ptr);
+ const uint8x16_t c_u8 = vld1q_u8(second_pred);
const uint8x16_t avg = vrhaddq_u8(b_u8, c_u8);
- a += a_stride;
- b += b_stride;
- c += 16;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ second_pred += 16;
abs = vabal_u8(abs, vget_low_u8(a_u8), vget_low_u8(avg));
abs = vabal_u8(abs, vget_high_u8(a_u8), vget_high_u8(avg));
}
return abs;
}
-#define sad16xN(n) \
- uint32_t vpx_sad16x##n##_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride) { \
- const uint16x8_t abs = sad16x(src, src_stride, ref, ref_stride, n); \
- return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
- } \
- \
- uint32_t vpx_sad16x##n##_avg_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred) { \
- const uint16x8_t abs = \
- sad16x_avg(src, src_stride, ref, ref_stride, second_pred, n); \
- return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
+#define sad16xN(n) \
+ uint32_t vpx_sad16x##n##_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ const uint16x8_t abs = \
+ sad16x(src_ptr, src_stride, ref_ptr, ref_stride, n); \
+ return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
+ } \
+ \
+ uint32_t vpx_sad16x##n##_avg_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ const uint8_t *second_pred) { \
+ const uint16x8_t abs = \
+ sad16x_avg(src_ptr, src_stride, ref_ptr, ref_stride, second_pred, n); \
+ return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
}
sad16xN(8);
sad16xN(16);
sad16xN(32);
-static INLINE uint16x8_t sad32x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static INLINE uint16x8_t sad32x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
const int height) {
int i;
uint16x8_t abs = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x16_t a_lo = vld1q_u8(a);
- const uint8x16_t a_hi = vld1q_u8(a + 16);
- const uint8x16_t b_lo = vld1q_u8(b);
- const uint8x16_t b_hi = vld1q_u8(b + 16);
- a += a_stride;
- b += b_stride;
+ const uint8x16_t a_lo = vld1q_u8(src_ptr);
+ const uint8x16_t a_hi = vld1q_u8(src_ptr + 16);
+ const uint8x16_t b_lo = vld1q_u8(ref_ptr);
+ const uint8x16_t b_hi = vld1q_u8(ref_ptr + 16);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
abs = vabal_u8(abs, vget_low_u8(a_lo), vget_low_u8(b_lo));
abs = vabal_u8(abs, vget_high_u8(a_lo), vget_high_u8(b_lo));
abs = vabal_u8(abs, vget_low_u8(a_hi), vget_low_u8(b_hi));
@@ -203,24 +207,25 @@ static INLINE uint16x8_t sad32x(const uint8_t *a, int a_stride,
return abs;
}
-static INLINE uint16x8_t sad32x_avg(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- const uint8_t *c, const int height) {
+static INLINE uint16x8_t sad32x_avg(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred,
+ const int height) {
int i;
uint16x8_t abs = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x16_t a_lo = vld1q_u8(a);
- const uint8x16_t a_hi = vld1q_u8(a + 16);
- const uint8x16_t b_lo = vld1q_u8(b);
- const uint8x16_t b_hi = vld1q_u8(b + 16);
- const uint8x16_t c_lo = vld1q_u8(c);
- const uint8x16_t c_hi = vld1q_u8(c + 16);
+ const uint8x16_t a_lo = vld1q_u8(src_ptr);
+ const uint8x16_t a_hi = vld1q_u8(src_ptr + 16);
+ const uint8x16_t b_lo = vld1q_u8(ref_ptr);
+ const uint8x16_t b_hi = vld1q_u8(ref_ptr + 16);
+ const uint8x16_t c_lo = vld1q_u8(second_pred);
+ const uint8x16_t c_hi = vld1q_u8(second_pred + 16);
const uint8x16_t avg_lo = vrhaddq_u8(b_lo, c_lo);
const uint8x16_t avg_hi = vrhaddq_u8(b_hi, c_hi);
- a += a_stride;
- b += b_stride;
- c += 32;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ second_pred += 32;
abs = vabal_u8(abs, vget_low_u8(a_lo), vget_low_u8(avg_lo));
abs = vabal_u8(abs, vget_high_u8(a_lo), vget_high_u8(avg_lo));
abs = vabal_u8(abs, vget_low_u8(a_hi), vget_low_u8(avg_hi));
@@ -229,43 +234,44 @@ static INLINE uint16x8_t sad32x_avg(const uint8_t *a, int a_stride,
return abs;
}
-#define sad32xN(n) \
- uint32_t vpx_sad32x##n##_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride) { \
- const uint16x8_t abs = sad32x(src, src_stride, ref, ref_stride, n); \
- return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
- } \
- \
- uint32_t vpx_sad32x##n##_avg_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred) { \
- const uint16x8_t abs = \
- sad32x_avg(src, src_stride, ref, ref_stride, second_pred, n); \
- return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
+#define sad32xN(n) \
+ uint32_t vpx_sad32x##n##_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ const uint16x8_t abs = \
+ sad32x(src_ptr, src_stride, ref_ptr, ref_stride, n); \
+ return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
+ } \
+ \
+ uint32_t vpx_sad32x##n##_avg_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ const uint8_t *second_pred) { \
+ const uint16x8_t abs = \
+ sad32x_avg(src_ptr, src_stride, ref_ptr, ref_stride, second_pred, n); \
+ return vget_lane_u32(horizontal_add_uint16x8(abs), 0); \
}
sad32xN(16);
sad32xN(32);
sad32xN(64);
-static INLINE uint32x4_t sad64x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static INLINE uint32x4_t sad64x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
const int height) {
int i;
uint16x8_t abs_0 = vdupq_n_u16(0);
uint16x8_t abs_1 = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x16_t a_0 = vld1q_u8(a);
- const uint8x16_t a_1 = vld1q_u8(a + 16);
- const uint8x16_t a_2 = vld1q_u8(a + 32);
- const uint8x16_t a_3 = vld1q_u8(a + 48);
- const uint8x16_t b_0 = vld1q_u8(b);
- const uint8x16_t b_1 = vld1q_u8(b + 16);
- const uint8x16_t b_2 = vld1q_u8(b + 32);
- const uint8x16_t b_3 = vld1q_u8(b + 48);
- a += a_stride;
- b += b_stride;
+ const uint8x16_t a_0 = vld1q_u8(src_ptr);
+ const uint8x16_t a_1 = vld1q_u8(src_ptr + 16);
+ const uint8x16_t a_2 = vld1q_u8(src_ptr + 32);
+ const uint8x16_t a_3 = vld1q_u8(src_ptr + 48);
+ const uint8x16_t b_0 = vld1q_u8(ref_ptr);
+ const uint8x16_t b_1 = vld1q_u8(ref_ptr + 16);
+ const uint8x16_t b_2 = vld1q_u8(ref_ptr + 32);
+ const uint8x16_t b_3 = vld1q_u8(ref_ptr + 48);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
abs_0 = vabal_u8(abs_0, vget_low_u8(a_0), vget_low_u8(b_0));
abs_0 = vabal_u8(abs_0, vget_high_u8(a_0), vget_high_u8(b_0));
abs_0 = vabal_u8(abs_0, vget_low_u8(a_1), vget_low_u8(b_1));
@@ -282,33 +288,34 @@ static INLINE uint32x4_t sad64x(const uint8_t *a, int a_stride,
}
}
-static INLINE uint32x4_t sad64x_avg(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- const uint8_t *c, const int height) {
+static INLINE uint32x4_t sad64x_avg(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred,
+ const int height) {
int i;
uint16x8_t abs_0 = vdupq_n_u16(0);
uint16x8_t abs_1 = vdupq_n_u16(0);
for (i = 0; i < height; ++i) {
- const uint8x16_t a_0 = vld1q_u8(a);
- const uint8x16_t a_1 = vld1q_u8(a + 16);
- const uint8x16_t a_2 = vld1q_u8(a + 32);
- const uint8x16_t a_3 = vld1q_u8(a + 48);
- const uint8x16_t b_0 = vld1q_u8(b);
- const uint8x16_t b_1 = vld1q_u8(b + 16);
- const uint8x16_t b_2 = vld1q_u8(b + 32);
- const uint8x16_t b_3 = vld1q_u8(b + 48);
- const uint8x16_t c_0 = vld1q_u8(c);
- const uint8x16_t c_1 = vld1q_u8(c + 16);
- const uint8x16_t c_2 = vld1q_u8(c + 32);
- const uint8x16_t c_3 = vld1q_u8(c + 48);
+ const uint8x16_t a_0 = vld1q_u8(src_ptr);
+ const uint8x16_t a_1 = vld1q_u8(src_ptr + 16);
+ const uint8x16_t a_2 = vld1q_u8(src_ptr + 32);
+ const uint8x16_t a_3 = vld1q_u8(src_ptr + 48);
+ const uint8x16_t b_0 = vld1q_u8(ref_ptr);
+ const uint8x16_t b_1 = vld1q_u8(ref_ptr + 16);
+ const uint8x16_t b_2 = vld1q_u8(ref_ptr + 32);
+ const uint8x16_t b_3 = vld1q_u8(ref_ptr + 48);
+ const uint8x16_t c_0 = vld1q_u8(second_pred);
+ const uint8x16_t c_1 = vld1q_u8(second_pred + 16);
+ const uint8x16_t c_2 = vld1q_u8(second_pred + 32);
+ const uint8x16_t c_3 = vld1q_u8(second_pred + 48);
const uint8x16_t avg_0 = vrhaddq_u8(b_0, c_0);
const uint8x16_t avg_1 = vrhaddq_u8(b_1, c_1);
const uint8x16_t avg_2 = vrhaddq_u8(b_2, c_2);
const uint8x16_t avg_3 = vrhaddq_u8(b_3, c_3);
- a += a_stride;
- b += b_stride;
- c += 64;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ second_pred += 64;
abs_0 = vabal_u8(abs_0, vget_low_u8(a_0), vget_low_u8(avg_0));
abs_0 = vabal_u8(abs_0, vget_high_u8(a_0), vget_high_u8(avg_0));
abs_0 = vabal_u8(abs_0, vget_low_u8(a_1), vget_low_u8(avg_1));
@@ -325,19 +332,20 @@ static INLINE uint32x4_t sad64x_avg(const uint8_t *a, int a_stride,
}
}
-#define sad64xN(n) \
- uint32_t vpx_sad64x##n##_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride) { \
- const uint32x4_t abs = sad64x(src, src_stride, ref, ref_stride, n); \
- return vget_lane_u32(horizontal_add_uint32x4(abs), 0); \
- } \
- \
- uint32_t vpx_sad64x##n##_avg_neon(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred) { \
- const uint32x4_t abs = \
- sad64x_avg(src, src_stride, ref, ref_stride, second_pred, n); \
- return vget_lane_u32(horizontal_add_uint32x4(abs), 0); \
+#define sad64xN(n) \
+ uint32_t vpx_sad64x##n##_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ const uint32x4_t abs = \
+ sad64x(src_ptr, src_stride, ref_ptr, ref_stride, n); \
+ return vget_lane_u32(horizontal_add_uint32x4(abs), 0); \
+ } \
+ \
+ uint32_t vpx_sad64x##n##_avg_neon(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ const uint8_t *second_pred) { \
+ const uint32x4_t abs = \
+ sad64x_avg(src_ptr, src_stride, ref_ptr, ref_stride, second_pred, n); \
+ return vget_lane_u32(horizontal_add_uint32x4(abs), 0); \
}
sad64xN(32);
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/subpel_variance_neon.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/subpel_variance_neon.c
index 4f58a7832a5..37bfd1cd1fd 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/subpel_variance_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/subpel_variance_neon.c
@@ -97,30 +97,30 @@ static void var_filter_block2d_bil_w16(const uint8_t *src_ptr,
// 4xM filter writes an extra row to fdata because it processes two rows at a
// time.
-#define sub_pixel_varianceNxM(n, m) \
- uint32_t vpx_sub_pixel_variance##n##x##m##_neon( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint8_t temp0[n * (m + (n == 4 ? 2 : 1))]; \
- uint8_t temp1[n * m]; \
- \
- if (n == 4) { \
- var_filter_block2d_bil_w4(a, temp0, a_stride, 1, (m + 2), \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_w4(temp0, temp1, n, n, m, \
- bilinear_filters[yoffset]); \
- } else if (n == 8) { \
- var_filter_block2d_bil_w8(a, temp0, a_stride, 1, (m + 1), \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_w8(temp0, temp1, n, n, m, \
- bilinear_filters[yoffset]); \
- } else { \
- var_filter_block2d_bil_w16(a, temp0, a_stride, 1, (m + 1), n, \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_w16(temp0, temp1, n, n, m, n, \
- bilinear_filters[yoffset]); \
- } \
- return vpx_variance##n##x##m(temp1, n, b, b_stride, sse); \
+#define sub_pixel_varianceNxM(n, m) \
+ uint32_t vpx_sub_pixel_variance##n##x##m##_neon( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint8_t temp0[n * (m + (n == 4 ? 2 : 1))]; \
+ uint8_t temp1[n * m]; \
+ \
+ if (n == 4) { \
+ var_filter_block2d_bil_w4(src_ptr, temp0, src_stride, 1, (m + 2), \
+ bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_w4(temp0, temp1, n, n, m, \
+ bilinear_filters[y_offset]); \
+ } else if (n == 8) { \
+ var_filter_block2d_bil_w8(src_ptr, temp0, src_stride, 1, (m + 1), \
+ bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_w8(temp0, temp1, n, n, m, \
+ bilinear_filters[y_offset]); \
+ } else { \
+ var_filter_block2d_bil_w16(src_ptr, temp0, src_stride, 1, (m + 1), n, \
+ bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_w16(temp0, temp1, n, n, m, n, \
+ bilinear_filters[y_offset]); \
+ } \
+ return vpx_variance##n##x##m(temp1, n, ref_ptr, ref_stride, sse); \
}
sub_pixel_varianceNxM(4, 4);
@@ -139,34 +139,34 @@ sub_pixel_varianceNxM(64, 64);
// 4xM filter writes an extra row to fdata because it processes two rows at a
// time.
-#define sub_pixel_avg_varianceNxM(n, m) \
- uint32_t vpx_sub_pixel_avg_variance##n##x##m##_neon( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint8_t temp0[n * (m + (n == 4 ? 2 : 1))]; \
- uint8_t temp1[n * m]; \
- \
- if (n == 4) { \
- var_filter_block2d_bil_w4(a, temp0, a_stride, 1, (m + 2), \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_w4(temp0, temp1, n, n, m, \
- bilinear_filters[yoffset]); \
- } else if (n == 8) { \
- var_filter_block2d_bil_w8(a, temp0, a_stride, 1, (m + 1), \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_w8(temp0, temp1, n, n, m, \
- bilinear_filters[yoffset]); \
- } else { \
- var_filter_block2d_bil_w16(a, temp0, a_stride, 1, (m + 1), n, \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_w16(temp0, temp1, n, n, m, n, \
- bilinear_filters[yoffset]); \
- } \
- \
- vpx_comp_avg_pred(temp0, second_pred, n, m, temp1, n); \
- \
- return vpx_variance##n##x##m(temp0, n, b, b_stride, sse); \
+#define sub_pixel_avg_varianceNxM(n, m) \
+ uint32_t vpx_sub_pixel_avg_variance##n##x##m##_neon( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint8_t temp0[n * (m + (n == 4 ? 2 : 1))]; \
+ uint8_t temp1[n * m]; \
+ \
+ if (n == 4) { \
+ var_filter_block2d_bil_w4(src_ptr, temp0, src_stride, 1, (m + 2), \
+ bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_w4(temp0, temp1, n, n, m, \
+ bilinear_filters[y_offset]); \
+ } else if (n == 8) { \
+ var_filter_block2d_bil_w8(src_ptr, temp0, src_stride, 1, (m + 1), \
+ bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_w8(temp0, temp1, n, n, m, \
+ bilinear_filters[y_offset]); \
+ } else { \
+ var_filter_block2d_bil_w16(src_ptr, temp0, src_stride, 1, (m + 1), n, \
+ bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_w16(temp0, temp1, n, n, m, n, \
+ bilinear_filters[y_offset]); \
+ } \
+ \
+ vpx_comp_avg_pred(temp0, second_pred, n, m, temp1, n); \
+ \
+ return vpx_variance##n##x##m(temp0, n, ref_ptr, ref_stride, sse); \
}
sub_pixel_avg_varianceNxM(4, 4);
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/variance_neon.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/variance_neon.c
index 61c2c16a724..77b1015b742 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/variance_neon.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/arm/variance_neon.c
@@ -27,8 +27,9 @@
// this limit.
// Process a block of width 4 four rows at a time.
-static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, int h, uint32_t *sse, int *sum) {
+static void variance_neon_w4x4(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride, int h,
+ uint32_t *sse, int *sum) {
int i;
int16x8_t sum_s16 = vdupq_n_s16(0);
int32x4_t sse_lo_s32 = vdupq_n_s32(0);
@@ -38,8 +39,8 @@ static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b,
assert(h <= 256);
for (i = 0; i < h; i += 4) {
- const uint8x16_t a_u8 = load_unaligned_u8q(a, a_stride);
- const uint8x16_t b_u8 = load_unaligned_u8q(b, b_stride);
+ const uint8x16_t a_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t b_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
const uint16x8_t diff_lo_u16 =
vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8));
const uint16x8_t diff_hi_u16 =
@@ -61,8 +62,8 @@ static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b,
sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_hi_s16),
vget_high_s16(diff_hi_s16));
- a += 4 * a_stride;
- b += 4 * b_stride;
+ src_ptr += 4 * src_stride;
+ ref_ptr += 4 * ref_stride;
}
*sum = vget_lane_s32(horizontal_add_int16x8(sum_s16), 0);
@@ -72,9 +73,9 @@ static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b,
}
// Process a block of any size where the width is divisible by 16.
-static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, int w, int h, uint32_t *sse,
- int *sum) {
+static void variance_neon_w16(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride, int w,
+ int h, uint32_t *sse, int *sum) {
int i, j;
int16x8_t sum_s16 = vdupq_n_s16(0);
int32x4_t sse_lo_s32 = vdupq_n_s32(0);
@@ -86,8 +87,8 @@ static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
for (i = 0; i < h; ++i) {
for (j = 0; j < w; j += 16) {
- const uint8x16_t a_u8 = vld1q_u8(a + j);
- const uint8x16_t b_u8 = vld1q_u8(b + j);
+ const uint8x16_t a_u8 = vld1q_u8(src_ptr + j);
+ const uint8x16_t b_u8 = vld1q_u8(ref_ptr + j);
const uint16x8_t diff_lo_u16 =
vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8));
@@ -110,8 +111,8 @@ static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_hi_s16),
vget_high_s16(diff_hi_s16));
}
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
*sum = vget_lane_s32(horizontal_add_int16x8(sum_s16), 0);
@@ -121,8 +122,9 @@ static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
}
// Process a block of width 8 two rows at a time.
-static void variance_neon_w8x2(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, int h, uint32_t *sse, int *sum) {
+static void variance_neon_w8x2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride, int h,
+ uint32_t *sse, int *sum) {
int i = 0;
int16x8_t sum_s16 = vdupq_n_s16(0);
int32x4_t sse_lo_s32 = vdupq_n_s32(0);
@@ -132,10 +134,10 @@ static void variance_neon_w8x2(const uint8_t *a, int a_stride, const uint8_t *b,
assert(h <= 128);
do {
- const uint8x8_t a_0_u8 = vld1_u8(a);
- const uint8x8_t a_1_u8 = vld1_u8(a + a_stride);
- const uint8x8_t b_0_u8 = vld1_u8(b);
- const uint8x8_t b_1_u8 = vld1_u8(b + b_stride);
+ const uint8x8_t a_0_u8 = vld1_u8(src_ptr);
+ const uint8x8_t a_1_u8 = vld1_u8(src_ptr + src_stride);
+ const uint8x8_t b_0_u8 = vld1_u8(ref_ptr);
+ const uint8x8_t b_1_u8 = vld1_u8(ref_ptr + ref_stride);
const uint16x8_t diff_0_u16 = vsubl_u8(a_0_u8, b_0_u8);
const uint16x8_t diff_1_u16 = vsubl_u8(a_1_u8, b_1_u8);
const int16x8_t diff_0_s16 = vreinterpretq_s16_u16(diff_0_u16);
@@ -150,8 +152,8 @@ static void variance_neon_w8x2(const uint8_t *a, int a_stride, const uint8_t *b,
vget_high_s16(diff_0_s16));
sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_1_s16),
vget_high_s16(diff_1_s16));
- a += a_stride + a_stride;
- b += b_stride + b_stride;
+ src_ptr += src_stride + src_stride;
+ ref_ptr += ref_stride + ref_stride;
i += 2;
} while (i < h);
@@ -161,31 +163,36 @@ static void variance_neon_w8x2(const uint8_t *a, int a_stride, const uint8_t *b,
0);
}
-void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, unsigned int *sse, int *sum) {
- variance_neon_w8x2(a, a_stride, b, b_stride, 8, sse, sum);
+void vpx_get8x8var_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance_neon_w8x2(src_ptr, src_stride, ref_ptr, ref_stride, 8, sse, sum);
}
-void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, unsigned int *sse, int *sum) {
- variance_neon_w16(a, a_stride, b, b_stride, 16, 16, sse, sum);
+void vpx_get16x16var_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance_neon_w16(src_ptr, src_stride, ref_ptr, ref_stride, 16, 16, sse, sum);
}
-#define varianceNxM(n, m, shift) \
- unsigned int vpx_variance##n##x##m##_neon(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- unsigned int *sse) { \
- int sum; \
- if (n == 4) \
- variance_neon_w4x4(a, a_stride, b, b_stride, m, sse, &sum); \
- else if (n == 8) \
- variance_neon_w8x2(a, a_stride, b, b_stride, m, sse, &sum); \
- else \
- variance_neon_w16(a, a_stride, b, b_stride, n, m, sse, &sum); \
- if (n * m < 16 * 16) \
- return *sse - ((sum * sum) >> shift); \
- else \
- return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
+#define varianceNxM(n, m, shift) \
+ unsigned int vpx_variance##n##x##m##_neon( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, unsigned int *sse) { \
+ int sum; \
+ if (n == 4) \
+ variance_neon_w4x4(src_ptr, src_stride, ref_ptr, ref_stride, m, sse, \
+ &sum); \
+ else if (n == 8) \
+ variance_neon_w8x2(src_ptr, src_stride, ref_ptr, ref_stride, m, sse, \
+ &sum); \
+ else \
+ variance_neon_w16(src_ptr, src_stride, ref_ptr, ref_stride, n, m, sse, \
+ &sum); \
+ if (n * m < 16 * 16) \
+ return *sse - ((sum * sum) >> shift); \
+ else \
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
}
varianceNxM(4, 4, 4);
@@ -199,58 +206,66 @@ varianceNxM(16, 32, 9);
varianceNxM(32, 16, 9);
varianceNxM(32, 32, 10);
-unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+unsigned int vpx_variance32x64_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum1, sum2;
uint32_t sse1, sse2;
- variance_neon_w16(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1);
- variance_neon_w16(a + (32 * a_stride), a_stride, b + (32 * b_stride),
- b_stride, 32, 32, &sse2, &sum2);
+ variance_neon_w16(src_ptr, src_stride, ref_ptr, ref_stride, 32, 32, &sse1,
+ &sum1);
+ variance_neon_w16(src_ptr + (32 * src_stride), src_stride,
+ ref_ptr + (32 * ref_stride), ref_stride, 32, 32, &sse2,
+ &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
}
-unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+unsigned int vpx_variance64x32_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum1, sum2;
uint32_t sse1, sse2;
- variance_neon_w16(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
- variance_neon_w16(a + (16 * a_stride), a_stride, b + (16 * b_stride),
- b_stride, 64, 16, &sse2, &sum2);
+ variance_neon_w16(src_ptr, src_stride, ref_ptr, ref_stride, 64, 16, &sse1,
+ &sum1);
+ variance_neon_w16(src_ptr + (16 * src_stride), src_stride,
+ ref_ptr + (16 * ref_stride), ref_stride, 64, 16, &sse2,
+ &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
}
-unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+unsigned int vpx_variance64x64_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum1, sum2;
uint32_t sse1, sse2;
- variance_neon_w16(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
- variance_neon_w16(a + (16 * a_stride), a_stride, b + (16 * b_stride),
- b_stride, 64, 16, &sse2, &sum2);
+ variance_neon_w16(src_ptr, src_stride, ref_ptr, ref_stride, 64, 16, &sse1,
+ &sum1);
+ variance_neon_w16(src_ptr + (16 * src_stride), src_stride,
+ ref_ptr + (16 * ref_stride), ref_stride, 64, 16, &sse2,
+ &sum2);
sse1 += sse2;
sum1 += sum2;
- variance_neon_w16(a + (16 * 2 * a_stride), a_stride, b + (16 * 2 * b_stride),
- b_stride, 64, 16, &sse2, &sum2);
+ variance_neon_w16(src_ptr + (16 * 2 * src_stride), src_stride,
+ ref_ptr + (16 * 2 * ref_stride), ref_stride, 64, 16, &sse2,
+ &sum2);
sse1 += sse2;
sum1 += sum2;
- variance_neon_w16(a + (16 * 3 * a_stride), a_stride, b + (16 * 3 * b_stride),
- b_stride, 64, 16, &sse2, &sum2);
+ variance_neon_w16(src_ptr + (16 * 3 * src_stride), src_stride,
+ ref_ptr + (16 * 3 * ref_stride), ref_stride, 64, 16, &sse2,
+ &sum2);
*sse = sse1 + sse2;
sum1 += sum2;
return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12);
}
-unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
- const unsigned char *ref_ptr, int recon_stride,
+unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int src_stride,
+ const unsigned char *ref_ptr, int ref_stride,
unsigned int *sse) {
int i;
int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
@@ -267,13 +282,13 @@ unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
for (i = 0; i < 8; i++) { // mse16x16_neon_loop
q0u8 = vld1q_u8(src_ptr);
- src_ptr += source_stride;
+ src_ptr += src_stride;
q1u8 = vld1q_u8(src_ptr);
- src_ptr += source_stride;
+ src_ptr += src_stride;
q2u8 = vld1q_u8(ref_ptr);
- ref_ptr += recon_stride;
+ ref_ptr += ref_stride;
q3u8 = vld1q_u8(ref_ptr);
- ref_ptr += recon_stride;
+ ref_ptr += ref_stride;
q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
@@ -312,10 +327,9 @@ unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
}
-unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr,
- int source_stride,
+unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr, int src_stride,
const unsigned char *ref_ptr,
- int recon_stride) {
+ int ref_stride) {
int16x4_t d22s16, d24s16, d26s16, d28s16;
int64x1_t d0s64;
uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
@@ -324,21 +338,21 @@ unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr,
int64x2_t q1s64;
d0u8 = vld1_u8(src_ptr);
- src_ptr += source_stride;
+ src_ptr += src_stride;
d4u8 = vld1_u8(ref_ptr);
- ref_ptr += recon_stride;
+ ref_ptr += ref_stride;
d1u8 = vld1_u8(src_ptr);
- src_ptr += source_stride;
+ src_ptr += src_stride;
d5u8 = vld1_u8(ref_ptr);
- ref_ptr += recon_stride;
+ ref_ptr += ref_stride;
d2u8 = vld1_u8(src_ptr);
- src_ptr += source_stride;
+ src_ptr += src_stride;
d6u8 = vld1_u8(ref_ptr);
- ref_ptr += recon_stride;
+ ref_ptr += ref_stride;
d3u8 = vld1_u8(src_ptr);
- src_ptr += source_stride;
+ src_ptr += src_stride;
d7u8 = vld1_u8(ref_ptr);
- ref_ptr += recon_stride;
+ ref_ptr += ref_stride;
q11u16 = vsubl_u8(d0u8, d4u8);
q12u16 = vsubl_u8(d1u8, d5u8);
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/avg.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/avg.c
index 2b687eed692..7ab98eab250 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/avg.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/avg.c
@@ -32,6 +32,166 @@ unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) {
return (sum + 8) >> 4;
}
+#if CONFIG_VP9_HIGHBITDEPTH
+// src_diff: 13 bit, dynamic range [-4095, 4095]
+// coeff: 16 bit
+static void hadamard_highbd_col8_first_pass(const int16_t *src_diff,
+ ptrdiff_t src_stride,
+ int32_t *coeff) {
+ int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride];
+ int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride];
+ int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride];
+ int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride];
+ int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride];
+ int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride];
+ int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride];
+ int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride];
+
+ int16_t c0 = b0 + b2;
+ int16_t c1 = b1 + b3;
+ int16_t c2 = b0 - b2;
+ int16_t c3 = b1 - b3;
+ int16_t c4 = b4 + b6;
+ int16_t c5 = b5 + b7;
+ int16_t c6 = b4 - b6;
+ int16_t c7 = b5 - b7;
+
+ coeff[0] = c0 + c4;
+ coeff[7] = c1 + c5;
+ coeff[3] = c2 + c6;
+ coeff[4] = c3 + c7;
+ coeff[2] = c0 - c4;
+ coeff[6] = c1 - c5;
+ coeff[1] = c2 - c6;
+ coeff[5] = c3 - c7;
+}
+
+// src_diff: 16 bit, dynamic range [-32760, 32760]
+// coeff: 19 bit
+static void hadamard_highbd_col8_second_pass(const int32_t *src_diff,
+ ptrdiff_t src_stride,
+ int32_t *coeff) {
+ int32_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride];
+ int32_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride];
+ int32_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride];
+ int32_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride];
+ int32_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride];
+ int32_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride];
+ int32_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride];
+ int32_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride];
+
+ int32_t c0 = b0 + b2;
+ int32_t c1 = b1 + b3;
+ int32_t c2 = b0 - b2;
+ int32_t c3 = b1 - b3;
+ int32_t c4 = b4 + b6;
+ int32_t c5 = b5 + b7;
+ int32_t c6 = b4 - b6;
+ int32_t c7 = b5 - b7;
+
+ coeff[0] = c0 + c4;
+ coeff[7] = c1 + c5;
+ coeff[3] = c2 + c6;
+ coeff[4] = c3 + c7;
+ coeff[2] = c0 - c4;
+ coeff[6] = c1 - c5;
+ coeff[1] = c2 - c6;
+ coeff[5] = c3 - c7;
+}
+
+// The order of the output coeff of the hadamard is not important. For
+// optimization purposes the final transpose may be skipped.
+void vpx_highbd_hadamard_8x8_c(const int16_t *src_diff, ptrdiff_t src_stride,
+ tran_low_t *coeff) {
+ int idx;
+ int32_t buffer[64];
+ int32_t buffer2[64];
+ int32_t *tmp_buf = &buffer[0];
+ for (idx = 0; idx < 8; ++idx) {
+ // src_diff: 13 bit
+ // buffer: 16 bit, dynamic range [-32760, 32760]
+ hadamard_highbd_col8_first_pass(src_diff, src_stride, tmp_buf);
+ tmp_buf += 8;
+ ++src_diff;
+ }
+
+ tmp_buf = &buffer[0];
+ for (idx = 0; idx < 8; ++idx) {
+ // buffer: 16 bit
+ // buffer2: 19 bit, dynamic range [-262080, 262080]
+ hadamard_highbd_col8_second_pass(tmp_buf, 8, buffer2 + 8 * idx);
+ ++tmp_buf;
+ }
+
+ for (idx = 0; idx < 64; ++idx) coeff[idx] = (tran_low_t)buffer2[idx];
+}
+
+// In place 16x16 2D Hadamard transform
+void vpx_highbd_hadamard_16x16_c(const int16_t *src_diff, ptrdiff_t src_stride,
+ tran_low_t *coeff) {
+ int idx;
+ for (idx = 0; idx < 4; ++idx) {
+ // src_diff: 13 bit, dynamic range [-4095, 4095]
+ const int16_t *src_ptr =
+ src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
+ vpx_highbd_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
+ }
+
+ // coeff: 19 bit, dynamic range [-262080, 262080]
+ for (idx = 0; idx < 64; ++idx) {
+ tran_low_t a0 = coeff[0];
+ tran_low_t a1 = coeff[64];
+ tran_low_t a2 = coeff[128];
+ tran_low_t a3 = coeff[192];
+
+ tran_low_t b0 = (a0 + a1) >> 1;
+ tran_low_t b1 = (a0 - a1) >> 1;
+ tran_low_t b2 = (a2 + a3) >> 1;
+ tran_low_t b3 = (a2 - a3) >> 1;
+
+ // new coeff dynamic range: 20 bit
+ coeff[0] = b0 + b2;
+ coeff[64] = b1 + b3;
+ coeff[128] = b0 - b2;
+ coeff[192] = b1 - b3;
+
+ ++coeff;
+ }
+}
+
+void vpx_highbd_hadamard_32x32_c(const int16_t *src_diff, ptrdiff_t src_stride,
+ tran_low_t *coeff) {
+ int idx;
+ for (idx = 0; idx < 4; ++idx) {
+ // src_diff: 13 bit, dynamic range [-4095, 4095]
+ const int16_t *src_ptr =
+ src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16;
+ vpx_highbd_hadamard_16x16_c(src_ptr, src_stride, coeff + idx * 256);
+ }
+
+ // coeff: 20 bit
+ for (idx = 0; idx < 256; ++idx) {
+ tran_low_t a0 = coeff[0];
+ tran_low_t a1 = coeff[256];
+ tran_low_t a2 = coeff[512];
+ tran_low_t a3 = coeff[768];
+
+ tran_low_t b0 = (a0 + a1) >> 2;
+ tran_low_t b1 = (a0 - a1) >> 2;
+ tran_low_t b2 = (a2 + a3) >> 2;
+ tran_low_t b3 = (a2 - a3) >> 2;
+
+ // new coeff dynamic range: 20 bit
+ coeff[0] = b0 + b2;
+ coeff[256] = b1 + b3;
+ coeff[512] = b0 - b2;
+ coeff[768] = b1 - b3;
+
+ ++coeff;
+ }
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
// src_diff: first pass, 9 bit, dynamic range [-255, 255]
// second pass, 12 bit, dynamic range [-2040, 2040]
static void hadamard_col8(const int16_t *src_diff, ptrdiff_t src_stride,
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/bitwriter.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/bitwriter.h
index ec3975e942c..11579c9a9c1 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/bitwriter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/bitwriter.h
@@ -27,8 +27,8 @@ typedef struct vpx_writer {
uint8_t *buffer;
} vpx_writer;
-void vpx_start_encode(vpx_writer *bc, uint8_t *buffer);
-void vpx_stop_encode(vpx_writer *bc);
+void vpx_start_encode(vpx_writer *br, uint8_t *source);
+void vpx_stop_encode(vpx_writer *br);
static INLINE void vpx_write(vpx_writer *br, int bit, int probability) {
unsigned int split;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/deblock.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/deblock.c
index 94acbb39195..455b73bbcea 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/deblock.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/deblock.c
@@ -39,11 +39,10 @@ const int16_t vpx_rv[] = {
9, 10, 13,
};
-void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
- unsigned char *dst_ptr,
- int src_pixels_per_line,
- int dst_pixels_per_line, int cols,
- unsigned char *f, int size) {
+void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src,
+ unsigned char *dst, int src_pitch,
+ int dst_pitch, int cols,
+ unsigned char *flimits, int size) {
unsigned char *p_src, *p_dst;
int row;
int col;
@@ -55,19 +54,21 @@ void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
for (row = 0; row < size; row++) {
/* post_proc_down for one row */
- p_src = src_ptr;
- p_dst = dst_ptr;
+ p_src = src;
+ p_dst = dst;
for (col = 0; col < cols; col++) {
- unsigned char p_above2 = p_src[col - 2 * src_pixels_per_line];
- unsigned char p_above1 = p_src[col - src_pixels_per_line];
- unsigned char p_below1 = p_src[col + src_pixels_per_line];
- unsigned char p_below2 = p_src[col + 2 * src_pixels_per_line];
+ unsigned char p_above2 = p_src[col - 2 * src_pitch];
+ unsigned char p_above1 = p_src[col - src_pitch];
+ unsigned char p_below1 = p_src[col + src_pitch];
+ unsigned char p_below2 = p_src[col + 2 * src_pitch];
v = p_src[col];
- if ((abs(v - p_above2) < f[col]) && (abs(v - p_above1) < f[col]) &&
- (abs(v - p_below1) < f[col]) && (abs(v - p_below2) < f[col])) {
+ if ((abs(v - p_above2) < flimits[col]) &&
+ (abs(v - p_above1) < flimits[col]) &&
+ (abs(v - p_below1) < flimits[col]) &&
+ (abs(v - p_below2) < flimits[col])) {
unsigned char k1, k2, k3;
k1 = (p_above2 + p_above1 + 1) >> 1;
k2 = (p_below2 + p_below1 + 1) >> 1;
@@ -79,8 +80,8 @@ void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
}
/* now post_proc_across */
- p_src = dst_ptr;
- p_dst = dst_ptr;
+ p_src = dst;
+ p_dst = dst;
p_src[-2] = p_src[-1] = p_src[0];
p_src[cols] = p_src[cols + 1] = p_src[cols - 1];
@@ -88,10 +89,10 @@ void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
for (col = 0; col < cols; col++) {
v = p_src[col];
- if ((abs(v - p_src[col - 2]) < f[col]) &&
- (abs(v - p_src[col - 1]) < f[col]) &&
- (abs(v - p_src[col + 1]) < f[col]) &&
- (abs(v - p_src[col + 2]) < f[col])) {
+ if ((abs(v - p_src[col - 2]) < flimits[col]) &&
+ (abs(v - p_src[col - 1]) < flimits[col]) &&
+ (abs(v - p_src[col + 1]) < flimits[col]) &&
+ (abs(v - p_src[col + 2]) < flimits[col])) {
unsigned char k1, k2, k3;
k1 = (p_src[col - 2] + p_src[col - 1] + 1) >> 1;
k2 = (p_src[col + 2] + p_src[col + 1] + 1) >> 1;
@@ -109,8 +110,8 @@ void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr,
p_dst[col - 1] = d[(col - 1) & 3];
/* next row */
- src_ptr += src_pixels_per_line;
- dst_ptr += dst_pixels_per_line;
+ src += src_pitch;
+ dst += dst_pitch;
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/fwd_txfm.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/fwd_txfm.c
index 6dcb3ba6681..ef66de0247a 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/fwd_txfm.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/fwd_txfm.c
@@ -87,11 +87,11 @@ void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
output[0] = sum * 2;
}
-void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
+void vpx_fdct8x8_c(const int16_t *input, tran_low_t *output, int stride) {
int i, j;
tran_low_t intermediate[64];
int pass;
- tran_low_t *output = intermediate;
+ tran_low_t *out = intermediate;
const tran_low_t *in = NULL;
// Transform columns
@@ -133,10 +133,10 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
t1 = (x0 - x1) * cospi_16_64;
t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
- output[0] = (tran_low_t)fdct_round_shift(t0);
- output[2] = (tran_low_t)fdct_round_shift(t2);
- output[4] = (tran_low_t)fdct_round_shift(t1);
- output[6] = (tran_low_t)fdct_round_shift(t3);
+ out[0] = (tran_low_t)fdct_round_shift(t0);
+ out[2] = (tran_low_t)fdct_round_shift(t2);
+ out[4] = (tran_low_t)fdct_round_shift(t1);
+ out[6] = (tran_low_t)fdct_round_shift(t3);
// Stage 2
t0 = (s6 - s5) * cospi_16_64;
@@ -155,19 +155,19 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
- output[1] = (tran_low_t)fdct_round_shift(t0);
- output[3] = (tran_low_t)fdct_round_shift(t2);
- output[5] = (tran_low_t)fdct_round_shift(t1);
- output[7] = (tran_low_t)fdct_round_shift(t3);
- output += 8;
+ out[1] = (tran_low_t)fdct_round_shift(t0);
+ out[3] = (tran_low_t)fdct_round_shift(t2);
+ out[5] = (tran_low_t)fdct_round_shift(t1);
+ out[7] = (tran_low_t)fdct_round_shift(t3);
+ out += 8;
}
in = intermediate;
- output = final_output;
+ out = output;
}
// Rows
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j) final_output[j + i * 8] /= 2;
+ for (j = 0; j < 8; ++j) output[j + i * 8] /= 2;
}
}
@@ -705,9 +705,9 @@ void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
}
-void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void vpx_fdct32x32_c(const int16_t *input, tran_low_t *output, int stride) {
int i, j;
- tran_high_t output[32 * 32];
+ tran_high_t out[32 * 32];
// Columns
for (i = 0; i < 32; ++i) {
@@ -715,16 +715,16 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
vpx_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
- output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ out[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
// Rows
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
- for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j + i * 32];
vpx_fdct32(temp_in, temp_out, 0);
for (j = 0; j < 32; ++j)
- out[j + i * 32] =
+ output[j + i * 32] =
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
}
}
@@ -732,9 +732,9 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
// Note that although we use dct_32_round in dct32 computation flow,
// this 2d fdct32x32 for rate-distortion optimization loop is operating
// within 16 bits precision.
-void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *output, int stride) {
int i, j;
- tran_high_t output[32 * 32];
+ tran_high_t out[32 * 32];
// Columns
for (i = 0; i < 32; ++i) {
@@ -745,15 +745,15 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
// TODO(cd): see quality impact of only doing
// output[j * 32 + i] = (temp_out[j] + 1) >> 2;
// PS: also change code in vpx_dsp/x86/vpx_dct_sse2.c
- output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ out[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
}
// Rows
for (i = 0; i < 32; ++i) {
tran_high_t temp_in[32], temp_out[32];
- for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j + i * 32];
vpx_fdct32(temp_in, temp_out, 1);
- for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
+ for (j = 0; j < 32; ++j) output[j + i * 32] = (tran_low_t)temp_out[j];
}
}
@@ -772,14 +772,14 @@ void vpx_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
vpx_fdct4x4_c(input, output, stride);
}
-void vpx_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void vpx_highbd_fdct8x8_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct8x8_c(input, final_output, stride);
+ vpx_fdct8x8_c(input, output, stride);
}
-void vpx_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+void vpx_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct8x8_1_c(input, final_output, stride);
+ vpx_fdct8x8_1_c(input, output, stride);
}
void vpx_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
@@ -792,17 +792,18 @@ void vpx_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
vpx_fdct16x16_1_c(input, output, stride);
}
-void vpx_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
- vpx_fdct32x32_c(input, out, stride);
+void vpx_highbd_fdct32x32_c(const int16_t *input, tran_low_t *output,
+ int stride) {
+ vpx_fdct32x32_c(input, output, stride);
}
-void vpx_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
+void vpx_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct32x32_rd_c(input, out, stride);
+ vpx_fdct32x32_rd_c(input, output, stride);
}
-void vpx_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+void vpx_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *output,
int stride) {
- vpx_fdct32x32_1_c(input, out, stride);
+ vpx_fdct32x32_1_c(input, output, stride);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/inv_txfm.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/inv_txfm.c
index 0194aa1e186..69de05e7188 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/inv_txfm.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/inv_txfm.c
@@ -67,11 +67,11 @@ void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
}
}
-void vpx_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int stride) {
+void vpx_iwht4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
int i;
tran_high_t a1, e1;
tran_low_t tmp[4];
- const tran_low_t *ip = in;
+ const tran_low_t *ip = input;
tran_low_t *op = tmp;
a1 = ip[0] >> UNIT_QUANT_SHIFT;
@@ -1346,12 +1346,12 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint16_t *dest,
}
}
-void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint16_t *dest,
+void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
int i;
tran_high_t a1, e1;
tran_low_t tmp[4];
- const tran_low_t *ip = in;
+ const tran_low_t *ip = input;
tran_low_t *op = tmp;
(void)bd;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/loopfilter.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/loopfilter.c
index 9866ea37d6d..47f30c96aff 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/loopfilter.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/loopfilter.c
@@ -109,29 +109,30 @@ static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1,
*op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
}
-void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */,
- const uint8_t *blimit, const uint8_t *limit,
- const uint8_t *thresh) {
+void vpx_lpf_horizontal_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
int i;
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
for (i = 0; i < 8; ++i) {
- const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
- const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const uint8_t p3 = s[-4 * pitch], p2 = s[-3 * pitch], p1 = s[-2 * pitch],
+ p0 = s[-pitch];
+ const uint8_t q0 = s[0 * pitch], q1 = s[1 * pitch], q2 = s[2 * pitch],
+ q3 = s[3 * pitch];
const int8_t mask =
filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
- filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p);
+ filter4(mask, *thresh, s - 2 * pitch, s - 1 * pitch, s, s + 1 * pitch);
++s;
}
}
-void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1);
+ vpx_lpf_horizontal_4_c(s, pitch, blimit0, limit0, thresh0);
+ vpx_lpf_horizontal_4_c(s + 8, pitch, blimit1, limit1, thresh1);
}
void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit,
@@ -178,31 +179,33 @@ static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat,
}
}
-void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit,
+void vpx_lpf_horizontal_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
int i;
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
for (i = 0; i < 8; ++i) {
- const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
- const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const uint8_t p3 = s[-4 * pitch], p2 = s[-3 * pitch], p1 = s[-2 * pitch],
+ p0 = s[-pitch];
+ const uint8_t q0 = s[0 * pitch], q1 = s[1 * pitch], q2 = s[2 * pitch],
+ q3 = s[3 * pitch];
const int8_t mask =
filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
- filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s,
- s + 1 * p, s + 2 * p, s + 3 * p);
+ filter8(mask, *thresh, flat, s - 4 * pitch, s - 3 * pitch, s - 2 * pitch,
+ s - 1 * pitch, s, s + 1 * pitch, s + 2 * pitch, s + 3 * pitch);
++s;
}
}
-void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0,
+void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
- vpx_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0);
- vpx_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1);
+ vpx_lpf_horizontal_8_c(s, pitch, blimit0, limit0, thresh0);
+ vpx_lpf_horizontal_8_c(s + 8, pitch, blimit1, limit1, thresh1);
}
void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit,
@@ -283,7 +286,8 @@ static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat,
}
}
-static void mb_lpf_horizontal_edge_w(uint8_t *s, int p, const uint8_t *blimit,
+static void mb_lpf_horizontal_edge_w(uint8_t *s, int pitch,
+ const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int count) {
int i;
@@ -291,34 +295,37 @@ static void mb_lpf_horizontal_edge_w(uint8_t *s, int p, const uint8_t *blimit,
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
for (i = 0; i < 8 * count; ++i) {
- const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
- const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const uint8_t p3 = s[-4 * pitch], p2 = s[-3 * pitch], p1 = s[-2 * pitch],
+ p0 = s[-pitch];
+ const uint8_t q0 = s[0 * pitch], q1 = s[1 * pitch], q2 = s[2 * pitch],
+ q3 = s[3 * pitch];
const int8_t mask =
filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3);
const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
- const int8_t flat2 =
- flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0,
- s[4 * p], s[5 * p], s[6 * p], s[7 * p]);
-
- filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p,
- s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s,
- s + 1 * p, s + 2 * p, s + 3 * p, s + 4 * p, s + 5 * p, s + 6 * p,
- s + 7 * p);
+ const int8_t flat2 = flat_mask5(
+ 1, s[-8 * pitch], s[-7 * pitch], s[-6 * pitch], s[-5 * pitch], p0, q0,
+ s[4 * pitch], s[5 * pitch], s[6 * pitch], s[7 * pitch]);
+
+ filter16(mask, *thresh, flat, flat2, s - 8 * pitch, s - 7 * pitch,
+ s - 6 * pitch, s - 5 * pitch, s - 4 * pitch, s - 3 * pitch,
+ s - 2 * pitch, s - 1 * pitch, s, s + 1 * pitch, s + 2 * pitch,
+ s + 3 * pitch, s + 4 * pitch, s + 5 * pitch, s + 6 * pitch,
+ s + 7 * pitch);
++s;
}
}
-void vpx_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit,
+void vpx_lpf_horizontal_16_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
- mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1);
+ mb_lpf_horizontal_edge_w(s, pitch, blimit, limit, thresh, 1);
}
-void vpx_lpf_horizontal_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
+void vpx_lpf_horizontal_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
- mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2);
+ mb_lpf_horizontal_edge_w(s, pitch, blimit, limit, thresh, 2);
}
-static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit,
+static void mb_lpf_vertical_edge_w(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int count) {
int i;
@@ -335,18 +342,18 @@ static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit,
filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5, s - 4,
s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6,
s + 7);
- s += p;
+ s += pitch;
}
}
-void vpx_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
+void vpx_lpf_vertical_16_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
- mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8);
+ mb_lpf_vertical_edge_w(s, pitch, blimit, limit, thresh, 8);
}
-void vpx_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
+void vpx_lpf_vertical_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh) {
- mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16);
+ mb_lpf_vertical_edge_w(s, pitch, blimit, limit, thresh, 16);
}
#if CONFIG_VP9_HIGHBITDEPTH
@@ -440,7 +447,7 @@ static INLINE void highbd_filter4(int8_t mask, uint8_t thresh, uint16_t *op1,
*op1 = signed_char_clamp_high(ps1 + filter, bd) + (0x80 << shift);
}
-void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
+void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int pitch,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh, int bd) {
int i;
@@ -448,27 +455,28 @@ void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */,
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
for (i = 0; i < 8; ++i) {
- const uint16_t p3 = s[-4 * p];
- const uint16_t p2 = s[-3 * p];
- const uint16_t p1 = s[-2 * p];
- const uint16_t p0 = s[-p];
- const uint16_t q0 = s[0 * p];
- const uint16_t q1 = s[1 * p];
- const uint16_t q2 = s[2 * p];
- const uint16_t q3 = s[3 * p];
+ const uint16_t p3 = s[-4 * pitch];
+ const uint16_t p2 = s[-3 * pitch];
+ const uint16_t p1 = s[-2 * pitch];
+ const uint16_t p0 = s[-pitch];
+ const uint16_t q0 = s[0 * pitch];
+ const uint16_t q1 = s[1 * pitch];
+ const uint16_t q2 = s[2 * pitch];
+ const uint16_t q3 = s[3 * pitch];
const int8_t mask =
highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
- highbd_filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p, bd);
+ highbd_filter4(mask, *thresh, s - 2 * pitch, s - 1 * pitch, s,
+ s + 1 * pitch, bd);
++s;
}
}
void vpx_highbd_lpf_horizontal_4_dual_c(
- uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+ uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
- vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, bd);
- vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, bd);
+ vpx_highbd_lpf_horizontal_4_c(s, pitch, blimit0, limit0, thresh0, bd);
+ vpx_highbd_lpf_horizontal_4_c(s + 8, pitch, blimit1, limit1, thresh1, bd);
}
void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit,
@@ -517,33 +525,36 @@ static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat,
}
}
-void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit,
- const uint8_t *limit, const uint8_t *thresh,
- int bd) {
+void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int pitch,
+ const uint8_t *blimit, const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
int i;
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
for (i = 0; i < 8; ++i) {
- const uint16_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
- const uint16_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const uint16_t p3 = s[-4 * pitch], p2 = s[-3 * pitch], p1 = s[-2 * pitch],
+ p0 = s[-pitch];
+ const uint16_t q0 = s[0 * pitch], q1 = s[1 * pitch], q2 = s[2 * pitch],
+ q3 = s[3 * pitch];
const int8_t mask =
highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
const int8_t flat =
highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd);
- highbd_filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p,
- s - 1 * p, s, s + 1 * p, s + 2 * p, s + 3 * p, bd);
+ highbd_filter8(mask, *thresh, flat, s - 4 * pitch, s - 3 * pitch,
+ s - 2 * pitch, s - 1 * pitch, s, s + 1 * pitch,
+ s + 2 * pitch, s + 3 * pitch, bd);
++s;
}
}
void vpx_highbd_lpf_horizontal_8_dual_c(
- uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+ uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
- vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, bd);
- vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, bd);
+ vpx_highbd_lpf_horizontal_8_c(s, pitch, blimit0, limit0, thresh0, bd);
+ vpx_highbd_lpf_horizontal_8_c(s + 8, pitch, blimit1, limit1, thresh1, bd);
}
void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit,
@@ -639,7 +650,7 @@ static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, uint8_t flat,
}
}
-static void highbd_mb_lpf_horizontal_edge_w(uint16_t *s, int p,
+static void highbd_mb_lpf_horizontal_edge_w(uint16_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int count,
@@ -649,44 +660,45 @@ static void highbd_mb_lpf_horizontal_edge_w(uint16_t *s, int p,
// loop filter designed to work using chars so that we can make maximum use
// of 8 bit simd instructions.
for (i = 0; i < 8 * count; ++i) {
- const uint16_t p3 = s[-4 * p];
- const uint16_t p2 = s[-3 * p];
- const uint16_t p1 = s[-2 * p];
- const uint16_t p0 = s[-p];
- const uint16_t q0 = s[0 * p];
- const uint16_t q1 = s[1 * p];
- const uint16_t q2 = s[2 * p];
- const uint16_t q3 = s[3 * p];
+ const uint16_t p3 = s[-4 * pitch];
+ const uint16_t p2 = s[-3 * pitch];
+ const uint16_t p1 = s[-2 * pitch];
+ const uint16_t p0 = s[-pitch];
+ const uint16_t q0 = s[0 * pitch];
+ const uint16_t q1 = s[1 * pitch];
+ const uint16_t q2 = s[2 * pitch];
+ const uint16_t q3 = s[3 * pitch];
const int8_t mask =
highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd);
const int8_t flat =
highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd);
- const int8_t flat2 =
- highbd_flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0,
- s[4 * p], s[5 * p], s[6 * p], s[7 * p], bd);
-
- highbd_filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p,
- s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s,
- s + 1 * p, s + 2 * p, s + 3 * p, s + 4 * p, s + 5 * p,
- s + 6 * p, s + 7 * p, bd);
+ const int8_t flat2 = highbd_flat_mask5(
+ 1, s[-8 * pitch], s[-7 * pitch], s[-6 * pitch], s[-5 * pitch], p0, q0,
+ s[4 * pitch], s[5 * pitch], s[6 * pitch], s[7 * pitch], bd);
+
+ highbd_filter16(mask, *thresh, flat, flat2, s - 8 * pitch, s - 7 * pitch,
+ s - 6 * pitch, s - 5 * pitch, s - 4 * pitch, s - 3 * pitch,
+ s - 2 * pitch, s - 1 * pitch, s, s + 1 * pitch,
+ s + 2 * pitch, s + 3 * pitch, s + 4 * pitch, s + 5 * pitch,
+ s + 6 * pitch, s + 7 * pitch, bd);
++s;
}
}
-void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit,
- const uint8_t *limit, const uint8_t *thresh,
- int bd) {
- highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1, bd);
+void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int pitch,
+ const uint8_t *blimit, const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
+ highbd_mb_lpf_horizontal_edge_w(s, pitch, blimit, limit, thresh, 1, bd);
}
-void vpx_highbd_lpf_horizontal_16_dual_c(uint16_t *s, int p,
+void vpx_highbd_lpf_horizontal_16_dual_c(uint16_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
- highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2, bd);
+ highbd_mb_lpf_horizontal_edge_w(s, pitch, blimit, limit, thresh, 2, bd);
}
-static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p,
+static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int count,
@@ -712,20 +724,20 @@ static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p,
highbd_filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5,
s - 4, s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4,
s + 5, s + 6, s + 7, bd);
- s += p;
+ s += pitch;
}
}
-void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit,
+void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int pitch, const uint8_t *blimit,
const uint8_t *limit, const uint8_t *thresh,
int bd) {
- highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8, bd);
+ highbd_mb_lpf_vertical_edge_w(s, pitch, blimit, limit, thresh, 8, bd);
}
-void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
+void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
- highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
+ highbd_mb_lpf_vertical_edge_w(s, pitch, blimit, limit, thresh, 16, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c
index d9c2bef69ed..cc458c86182 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c
@@ -15,6 +15,7 @@
#include "vpx_dsp/mips/convolve_common_dspr2.h"
#include "vpx_dsp/vpx_convolve.h"
#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
@@ -341,7 +342,7 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
assert(y_step_q4 == 16);
assert(((const int32_t *)filter_y)[1] != 0x800000);
- if (((const int32_t *)filter_y)[0] == 0) {
+ if (vpx_get_filter_taps(filter_y) == 2) {
vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter,
x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
index fb68ad88139..7a9aa49d8a1 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
@@ -15,6 +15,7 @@
#include "vpx_dsp/mips/convolve_common_dspr2.h"
#include "vpx_dsp/vpx_convolve.h"
#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
@@ -945,7 +946,7 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
assert(x_step_q4 == 16);
assert(((const int32_t *)filter_x)[1] != 0x800000);
- if (((const int32_t *)filter_x)[0] == 0) {
+ if (vpx_get_filter_taps(filter_x) == 2) {
vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter,
x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_dspr2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_dspr2.c
index 89f0f41962a..1e7052f6c58 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_dspr2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_dspr2.c
@@ -1322,7 +1322,7 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
if (filter_x[3] == 0x80) {
copy_horiz_transposed(src - src_stride * 3, src_stride, temp,
intermediate_height, w, intermediate_height);
- } else if (((const int32_t *)filter_x)[0] == 0) {
+ } else if (vpx_get_filter_taps(filter_x) == 2) {
vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
intermediate_height, filter_x, w, intermediate_height);
} else {
@@ -1365,7 +1365,7 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
/* copy the src to dst */
if (filter_y[3] == 0x80) {
copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w);
- } else if (((const int32_t *)filter_y)[0] == 0) {
+ } else if (vpx_get_filter_taps(filter_y) == 2) {
vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
filter_y, h, w);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c
index 77e95c84449..09d6f36e561 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c
@@ -825,7 +825,7 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
assert(x_step_q4 == 16);
assert(((const int32_t *)filter_x)[1] != 0x800000);
- if (((const int32_t *)filter_x)[0] == 0) {
+ if (vpx_get_filter_taps(filter_x) == 2) {
vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter, x0_q4,
x_step_q4, y0_q4, y_step_q4, w, h);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c
index c329f71ccf6..fd977b53360 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c
@@ -325,7 +325,7 @@ void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
assert(y_step_q4 == 16);
assert(((const int32_t *)filter_y)[1] != 0x800000);
- if (((const int32_t *)filter_y)[0] == 0) {
+ if (vpx_get_filter_taps(filter_y) == 2) {
vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter, x0_q4,
x_step_q4, y0_q4, y_step_q4, w, h);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/deblock_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/deblock_msa.c
index 9ef04836a27..1707d328410 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/deblock_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/deblock_msa.c
@@ -508,11 +508,11 @@ void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
}
}
-void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
- int32_t rows, int32_t cols, int32_t flimit) {
+void vpx_mbpost_proc_across_ip_msa(uint8_t *src, int32_t pitch, int32_t rows,
+ int32_t cols, int32_t flimit) {
int32_t row, col, cnt;
- uint8_t *src_dup = src_ptr;
- v16u8 src0, src, tmp_orig;
+ uint8_t *src_dup = src;
+ v16u8 src0, src1, tmp_orig;
v16u8 tmp = { 0 };
v16i8 zero = { 0 };
v8u16 sum_h, src_r_h, src_l_h;
@@ -531,13 +531,13 @@ void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
src_dup[cols + 16] = src_dup[cols - 1];
tmp_orig = (v16u8)__msa_ldi_b(0);
tmp_orig[15] = tmp[15];
- src = LD_UB(src_dup - 8);
- src[15] = 0;
- ILVRL_B2_UH(zero, src, src_r_h, src_l_h);
+ src1 = LD_UB(src_dup - 8);
+ src1[15] = 0;
+ ILVRL_B2_UH(zero, src1, src_r_h, src_l_h);
src_r_w = __msa_dotp_u_w(src_r_h, src_r_h);
src_r_w += __msa_dotp_u_w(src_l_h, src_l_h);
sum_sq = HADD_SW_S32(src_r_w) + 16;
- sum_h = __msa_hadd_u_h(src, src);
+ sum_h = __msa_hadd_u_h(src1, src1);
sum = HADD_UH_U32(sum_h);
{
v16u8 src7, src8, src_r, src_l;
@@ -566,8 +566,8 @@ void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
sum_l[cnt + 1] = sum_l[cnt] + sub_l[cnt + 1];
}
sum = sum_l[7];
- src = LD_UB(src_dup + 16 * col);
- ILVRL_B2_UH(zero, src, src_r_h, src_l_h);
+ src1 = LD_UB(src_dup + 16 * col);
+ ILVRL_B2_UH(zero, src1, src_r_h, src_l_h);
src7 = (v16u8)((const8 + sum_r + (v8i16)src_r_h) >> 4);
src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4);
tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7);
@@ -613,7 +613,7 @@ void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
total3 = (total3 < flimit_vec);
PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
- tmp = __msa_bmz_v(tmp, src, (v16u8)mask);
+ tmp = __msa_bmz_v(tmp, src1, (v16u8)mask);
if (col == 0) {
uint64_t src_d;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c
index 313e06f92dd..6c2649d7e70 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c
@@ -1619,16 +1619,16 @@ static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa(
#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \
uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa( \
- const uint8_t *src, int32_t src_stride, int32_t xoffset, \
- int32_t yoffset, const uint8_t *ref, int32_t ref_stride, \
+ const uint8_t *src, int32_t src_stride, int32_t x_offset, \
+ int32_t y_offset, const uint8_t *ref, int32_t ref_stride, \
uint32_t *sse) { \
int32_t diff; \
uint32_t var; \
- const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \
- const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \
+ const uint8_t *h_filter = bilinear_filters_msa[x_offset]; \
+ const uint8_t *v_filter = bilinear_filters_msa[y_offset]; \
\
- if (yoffset) { \
- if (xoffset) { \
+ if (y_offset) { \
+ if (x_offset) { \
*sse = sub_pixel_sse_diff_##wd##width_hv_msa( \
src, src_stride, ref, ref_stride, h_filter, v_filter, ht, &diff); \
} else { \
@@ -1638,7 +1638,7 @@ static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa(
\
var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \
} else { \
- if (xoffset) { \
+ if (x_offset) { \
*sse = sub_pixel_sse_diff_##wd##width_h_msa( \
src, src_stride, ref, ref_stride, h_filter, ht, &diff); \
\
@@ -1672,15 +1672,15 @@ VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
#define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \
uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa( \
- const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \
- int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \
+ const uint8_t *src_ptr, int32_t src_stride, int32_t x_offset, \
+ int32_t y_offset, const uint8_t *ref_ptr, int32_t ref_stride, \
uint32_t *sse, const uint8_t *sec_pred) { \
int32_t diff; \
- const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \
- const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \
+ const uint8_t *h_filter = bilinear_filters_msa[x_offset]; \
+ const uint8_t *v_filter = bilinear_filters_msa[y_offset]; \
\
- if (yoffset) { \
- if (xoffset) { \
+ if (y_offset) { \
+ if (x_offset) { \
*sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa( \
src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, \
v_filter, ht, &diff); \
@@ -1690,7 +1690,7 @@ VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
&diff); \
} \
} else { \
- if (xoffset) { \
+ if (x_offset) { \
*sse = sub_pixel_avg_sse_diff_##wd##width_h_msa( \
src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \
&diff); \
@@ -1719,16 +1719,16 @@ VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
int32_t src_stride,
- int32_t xoffset, int32_t yoffset,
+ int32_t x_offset, int32_t y_offset,
const uint8_t *ref_ptr,
int32_t ref_stride, uint32_t *sse,
const uint8_t *sec_pred) {
int32_t diff;
- const uint8_t *h_filter = bilinear_filters_msa[xoffset];
- const uint8_t *v_filter = bilinear_filters_msa[yoffset];
+ const uint8_t *h_filter = bilinear_filters_msa[x_offset];
+ const uint8_t *v_filter = bilinear_filters_msa[y_offset];
- if (yoffset) {
- if (xoffset) {
+ if (y_offset) {
+ if (x_offset) {
*sse = sub_pixel_avg_sse_diff_32width_hv_msa(
src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter,
v_filter, 64, &diff);
@@ -1738,7 +1738,7 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
v_filter, 64, &diff);
}
} else {
- if (xoffset) {
+ if (x_offset) {
*sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride, ref_ptr,
ref_stride, sec_pred,
h_filter, 64, &diff);
@@ -1753,15 +1753,15 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \
uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa( \
- const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \
- int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \
+ const uint8_t *src_ptr, int32_t src_stride, int32_t x_offset, \
+ int32_t y_offset, const uint8_t *ref_ptr, int32_t ref_stride, \
uint32_t *sse, const uint8_t *sec_pred) { \
int32_t diff; \
- const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \
- const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \
+ const uint8_t *h_filter = bilinear_filters_msa[x_offset]; \
+ const uint8_t *v_filter = bilinear_filters_msa[y_offset]; \
\
- if (yoffset) { \
- if (xoffset) { \
+ if (y_offset) { \
+ if (x_offset) { \
*sse = sub_pixel_avg_sse_diff_64width_hv_msa( \
src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, \
v_filter, ht, &diff); \
@@ -1771,7 +1771,7 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
&diff); \
} \
} else { \
- if (xoffset) { \
+ if (x_offset) { \
*sse = sub_pixel_avg_sse_diff_64width_h_msa( \
src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \
&diff); \
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/variance_mmi.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/variance_mmi.c
index 4af60d3634b..88908e01aae 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/variance_mmi.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/variance_mmi.c
@@ -87,10 +87,10 @@ static const uint8_t bilinear_filters[8][2] = {
"paddh %[ftmp12], %[ftmp12], %[ftmp6] \n\t"
#define VARIANCE_SSE_8 \
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t" \
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t" \
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t" \
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t" \
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t" \
"pasubub %[ftmp3], %[ftmp1], %[ftmp2] \n\t" \
"punpcklbh %[ftmp4], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t" \
@@ -101,10 +101,10 @@ static const uint8_t bilinear_filters[8][2] = {
#define VARIANCE_SSE_16 \
VARIANCE_SSE_8 \
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t" \
- "gsldlc1 %[ftmp2], 0x0f(%[b]) \n\t" \
- "gsldrc1 %[ftmp2], 0x08(%[b]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
+ "gsldlc1 %[ftmp2], 0x0f(%[ref_ptr]) \n\t" \
+ "gsldrc1 %[ftmp2], 0x08(%[ref_ptr]) \n\t" \
"pasubub %[ftmp3], %[ftmp1], %[ftmp2] \n\t" \
"punpcklbh %[ftmp4], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t" \
@@ -115,11 +115,11 @@ static const uint8_t bilinear_filters[8][2] = {
#define VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_A \
/* calculate fdata3[0]~fdata3[3], store at ftmp2*/ \
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "gsldlc1 %[ftmp1], 0x08(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x01(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x01(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
"pmullh %[ftmp2], %[ftmp2], %[filter_x0] \n\t" \
"paddh %[ftmp2], %[ftmp2], %[ff_ph_40] \n\t" \
@@ -129,11 +129,11 @@ static const uint8_t bilinear_filters[8][2] = {
#define VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_B \
/* calculate fdata3[0]~fdata3[3], store at ftmp4*/ \
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t" \
- "gsldlc1 %[ftmp1], 0x08(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x01(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x01(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t" \
"pmullh %[ftmp4], %[ftmp4], %[filter_x0] \n\t" \
"paddh %[ftmp4], %[ftmp4], %[ff_ph_40] \n\t" \
@@ -169,12 +169,12 @@ static const uint8_t bilinear_filters[8][2] = {
#define VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_A \
/* calculate fdata3[0]~fdata3[7], store at ftmp2 and ftmp3*/ \
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "gsldlc1 %[ftmp1], 0x08(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x01(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x01(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t" \
"pmullh %[ftmp2], %[ftmp2], %[filter_x0] \n\t" \
@@ -190,12 +190,12 @@ static const uint8_t bilinear_filters[8][2] = {
#define VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_B \
/* calculate fdata3[0]~fdata3[7], store at ftmp8 and ftmp9*/ \
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp8], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp9], %[ftmp1], %[ftmp0] \n\t" \
- "gsldlc1 %[ftmp1], 0x08(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x01(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x01(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp10], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp11], %[ftmp1], %[ftmp0] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[filter_x0] \n\t" \
@@ -258,12 +258,12 @@ static const uint8_t bilinear_filters[8][2] = {
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_A \
\
/* calculate fdata3[8]~fdata3[15], store at ftmp4 and ftmp5*/ \
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t" \
- "gsldlc1 %[ftmp1], 0x10(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x09(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x10(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x09(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp6], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp7], %[ftmp1], %[ftmp0] \n\t" \
"pmullh %[ftmp4], %[ftmp4], %[filter_x0] \n\t" \
@@ -282,12 +282,12 @@ static const uint8_t bilinear_filters[8][2] = {
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_B \
\
/* calculate fdata3[8]~fdata3[15], store at ftmp10 and ftmp11*/ \
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp10], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp11], %[ftmp1], %[ftmp0] \n\t" \
- "gsldlc1 %[ftmp1], 0x10(%[a]) \n\t" \
- "gsldrc1 %[ftmp1], 0x09(%[a]) \n\t" \
+ "gsldlc1 %[ftmp1], 0x10(%[src_ptr]) \n\t" \
+ "gsldrc1 %[ftmp1], 0x09(%[src_ptr]) \n\t" \
"punpcklbh %[ftmp12], %[ftmp1], %[ftmp0] \n\t" \
"punpckhbh %[ftmp13], %[ftmp1], %[ftmp0] \n\t" \
"pmullh %[ftmp10], %[ftmp10], %[filter_x0] \n\t" \
@@ -357,24 +357,23 @@ static const uint8_t bilinear_filters[8][2] = {
// taps should sum to FILTER_WEIGHT. pixel_step defines whether the filter is
// applied horizontally (pixel_step = 1) or vertically (pixel_step = stride).
// It defines the offset required to move from one input to the next.
-static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b,
- unsigned int src_pixels_per_line,
- int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const uint8_t *filter) {
+static void var_filter_block2d_bil_first_pass(
+ const uint8_t *src_ptr, uint16_t *ref_ptr, unsigned int src_pixels_per_line,
+ int pixel_step, unsigned int output_height, unsigned int output_width,
+ const uint8_t *filter) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
- b[j] = ROUND_POWER_OF_TWO(
- (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
+ ref_ptr[j] = ROUND_POWER_OF_TWO(
+ (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
+ FILTER_BITS);
- ++a;
+ ++src_ptr;
}
- a += src_pixels_per_line - output_width;
- b += output_width;
+ src_ptr += src_pixels_per_line - output_width;
+ ref_ptr += output_width;
}
}
@@ -387,28 +386,27 @@ static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b,
// filter is applied horizontally (pixel_step = 1) or vertically
// (pixel_step = stride). It defines the offset required to move from one input
// to the next. Output is 8-bit.
-static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
- unsigned int src_pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const uint8_t *filter) {
+static void var_filter_block2d_bil_second_pass(
+ const uint16_t *src_ptr, uint8_t *ref_ptr, unsigned int src_pixels_per_line,
+ unsigned int pixel_step, unsigned int output_height,
+ unsigned int output_width, const uint8_t *filter) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
- b[j] = ROUND_POWER_OF_TWO(
- (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
- ++a;
+ ref_ptr[j] = ROUND_POWER_OF_TWO(
+ (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
+ FILTER_BITS);
+ ++src_ptr;
}
- a += src_pixels_per_line - output_width;
- b += output_width;
+ src_ptr += src_pixels_per_line - output_width;
+ ref_ptr += output_width;
}
}
-static inline uint32_t vpx_variance64x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static inline uint32_t vpx_variance64x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse, int high) {
int sum;
double ftmp[12];
@@ -424,57 +422,57 @@ static inline uint32_t vpx_variance64x(const uint8_t *a, int a_stride,
"xor %[ftmp9], %[ftmp9], %[ftmp9] \n\t"
"xor %[ftmp10], %[ftmp10], %[ftmp10] \n\t"
"1: \n\t"
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x0f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x08(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x17(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x10(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x17(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x10(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x17(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x10(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x17(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x10(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x1f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x18(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x1f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x18(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x1f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x18(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x1f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x18(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x27(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x20(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x27(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x20(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x27(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x20(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x27(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x20(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x2f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x28(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x2f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x28(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x2f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x28(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x2f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x28(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x37(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x30(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x37(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x30(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x37(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x30(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x37(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x30(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x3f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x38(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x3f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x38(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x3f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x38(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x3f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x38(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"mfc1 %[tmp1], %[ftmp9] \n\t"
@@ -491,9 +489,10 @@ static inline uint32_t vpx_variance64x(const uint8_t *a, int a_stride,
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
[tmp2]"=&r"(tmp[2]),
- [a]"+&r"(a), [b]"+&r"(b),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr),
[sum]"=&r"(sum)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse)
: "memory"
);
@@ -501,18 +500,19 @@ static inline uint32_t vpx_variance64x(const uint8_t *a, int a_stride,
return *sse - (((int64_t)sum * sum) / (64 * high));
}
-#define VPX_VARIANCE64XN(n) \
- uint32_t vpx_variance64x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- return vpx_variance64x(a, a_stride, b, b_stride, sse, n); \
+#define VPX_VARIANCE64XN(n) \
+ uint32_t vpx_variance64x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_variance64x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
VPX_VARIANCE64XN(64)
VPX_VARIANCE64XN(32)
-uint32_t vpx_variance32x64_mmi(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, uint32_t *sse) {
+uint32_t vpx_variance32x64_mmi(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ uint32_t *sse) {
int sum;
double ftmp[12];
uint32_t tmp[3];
@@ -527,33 +527,33 @@ uint32_t vpx_variance32x64_mmi(const uint8_t *a, int a_stride, const uint8_t *b,
"xor %[ftmp9], %[ftmp9], %[ftmp9] \n\t"
"xor %[ftmp10], %[ftmp10], %[ftmp10] \n\t"
"1: \n\t"
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x0f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x08(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x17(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x10(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x17(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x10(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x17(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x10(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x17(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x10(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
- "gsldlc1 %[ftmp1], 0x1f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x18(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x1f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x18(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x1f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x18(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x1f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x18(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8_FOR_W64
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"mfc1 %[tmp1], %[ftmp9] \n\t"
@@ -570,9 +570,10 @@ uint32_t vpx_variance32x64_mmi(const uint8_t *a, int a_stride, const uint8_t *b,
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
[tmp2]"=&r"(tmp[2]),
- [a]"+&r"(a), [b]"+&r"(b),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr),
[sum]"=&r"(sum)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[sse]"r"(sse)
: "memory"
);
@@ -580,8 +581,8 @@ uint32_t vpx_variance32x64_mmi(const uint8_t *a, int a_stride, const uint8_t *b,
return *sse - (((int64_t)sum * sum) / 2048);
}
-static inline uint32_t vpx_variance32x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static inline uint32_t vpx_variance32x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse, int high) {
int sum;
double ftmp[13];
@@ -598,30 +599,30 @@ static inline uint32_t vpx_variance32x(const uint8_t *a, int a_stride,
"xor %[ftmp10], %[ftmp10], %[ftmp10] \n\t"
"xor %[ftmp12], %[ftmp12], %[ftmp12] \n\t"
"1: \n\t"
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x0f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x08(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
- "gsldlc1 %[ftmp1], 0x17(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x10(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x17(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x10(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x17(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x10(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x17(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x10(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
- "gsldlc1 %[ftmp1], 0x1f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x18(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x1f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x18(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x1f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x18(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x1f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x18(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"dsrl %[ftmp9], %[ftmp8], %[ftmp11] \n\t"
@@ -646,8 +647,9 @@ static inline uint32_t vpx_variance32x(const uint8_t *a, int a_stride,
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[ftmp12]"=&f"(ftmp[12]), [tmp0]"=&r"(tmp[0]),
- [a]"+&r"(a), [b]"+&r"(b)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse), [sum]"r"(&sum)
: "memory"
);
@@ -655,18 +657,18 @@ static inline uint32_t vpx_variance32x(const uint8_t *a, int a_stride,
return *sse - (((int64_t)sum * sum) / (32 * high));
}
-#define VPX_VARIANCE32XN(n) \
- uint32_t vpx_variance32x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- return vpx_variance32x(a, a_stride, b, b_stride, sse, n); \
+#define VPX_VARIANCE32XN(n) \
+ uint32_t vpx_variance32x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_variance32x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
VPX_VARIANCE32XN(32)
VPX_VARIANCE32XN(16)
-static inline uint32_t vpx_variance16x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static inline uint32_t vpx_variance16x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse, int high) {
int sum;
double ftmp[13];
@@ -683,20 +685,20 @@ static inline uint32_t vpx_variance16x(const uint8_t *a, int a_stride,
"xor %[ftmp10], %[ftmp10], %[ftmp10] \n\t"
"xor %[ftmp12], %[ftmp12], %[ftmp12] \n\t"
"1: \n\t"
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
- "gsldlc1 %[ftmp1], 0x0f(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x08(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x0f(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x08(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x0f(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x08(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x0f(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x08(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"dsrl %[ftmp9], %[ftmp8], %[ftmp11] \n\t"
@@ -721,8 +723,9 @@ static inline uint32_t vpx_variance16x(const uint8_t *a, int a_stride,
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[ftmp12]"=&f"(ftmp[12]), [tmp0]"=&r"(tmp[0]),
- [a]"+&r"(a), [b]"+&r"(b)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse), [sum]"r"(&sum)
: "memory"
);
@@ -730,19 +733,19 @@ static inline uint32_t vpx_variance16x(const uint8_t *a, int a_stride,
return *sse - (((int64_t)sum * sum) / (16 * high));
}
-#define VPX_VARIANCE16XN(n) \
- uint32_t vpx_variance16x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- return vpx_variance16x(a, a_stride, b, b_stride, sse, n); \
+#define VPX_VARIANCE16XN(n) \
+ uint32_t vpx_variance16x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_variance16x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
VPX_VARIANCE16XN(32)
VPX_VARIANCE16XN(16)
VPX_VARIANCE16XN(8)
-static inline uint32_t vpx_variance8x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static inline uint32_t vpx_variance8x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse, int high) {
int sum;
double ftmp[13];
@@ -759,15 +762,15 @@ static inline uint32_t vpx_variance8x(const uint8_t *a, int a_stride,
"xor %[ftmp10], %[ftmp10], %[ftmp10] \n\t"
"xor %[ftmp12], %[ftmp12], %[ftmp12] \n\t"
"1: \n\t"
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_8
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"dsrl %[ftmp9], %[ftmp8], %[ftmp11] \n\t"
@@ -792,8 +795,9 @@ static inline uint32_t vpx_variance8x(const uint8_t *a, int a_stride,
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[ftmp12]"=&f"(ftmp[12]), [tmp0]"=&r"(tmp[0]),
- [a]"+&r"(a), [b]"+&r"(b)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse), [sum]"r"(&sum)
: "memory"
);
@@ -801,19 +805,19 @@ static inline uint32_t vpx_variance8x(const uint8_t *a, int a_stride,
return *sse - (((int64_t)sum * sum) / (8 * high));
}
-#define VPX_VARIANCE8XN(n) \
- uint32_t vpx_variance8x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- return vpx_variance8x(a, a_stride, b, b_stride, sse, n); \
+#define VPX_VARIANCE8XN(n) \
+ uint32_t vpx_variance8x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_variance8x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
VPX_VARIANCE8XN(16)
VPX_VARIANCE8XN(8)
VPX_VARIANCE8XN(4)
-static inline uint32_t vpx_variance4x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+static inline uint32_t vpx_variance4x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse, int high) {
int sum;
double ftmp[12];
@@ -830,15 +834,15 @@ static inline uint32_t vpx_variance4x(const uint8_t *a, int a_stride,
"xor %[ftmp7], %[ftmp7], %[ftmp7] \n\t"
"xor %[ftmp8], %[ftmp8], %[ftmp8] \n\t"
"1: \n\t"
- "gsldlc1 %[ftmp1], 0x07(%[a]) \n\t"
- "gsldrc1 %[ftmp1], 0x00(%[a]) \n\t"
- "gsldlc1 %[ftmp2], 0x07(%[b]) \n\t"
- "gsldrc1 %[ftmp2], 0x00(%[b]) \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src_ptr]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src_ptr]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[ref_ptr]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[ref_ptr]) \n\t"
VARIANCE_SSE_SUM_4
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"dsrl %[ftmp9], %[ftmp6], %[ftmp10] \n\t"
@@ -862,8 +866,9 @@ static inline uint32_t vpx_variance4x(const uint8_t *a, int a_stride,
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
[ftmp10]"=&f"(ftmp[10]),
[tmp0]"=&r"(tmp[0]),
- [a]"+&r"(a), [b]"+&r"(b)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse), [sum]"r"(&sum)
: "memory"
);
@@ -871,19 +876,19 @@ static inline uint32_t vpx_variance4x(const uint8_t *a, int a_stride,
return *sse - (((int64_t)sum * sum) / (4 * high));
}
-#define VPX_VARIANCE4XN(n) \
- uint32_t vpx_variance4x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- return vpx_variance4x(a, a_stride, b, b_stride, sse, n); \
+#define VPX_VARIANCE4XN(n) \
+ uint32_t vpx_variance4x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_variance4x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
VPX_VARIANCE4XN(8)
VPX_VARIANCE4XN(4)
-static inline uint32_t vpx_mse16x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride, uint32_t *sse,
- uint64_t high) {
+static inline uint32_t vpx_mse16x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ uint32_t *sse, uint64_t high) {
double ftmp[12];
uint32_t tmp[1];
@@ -900,8 +905,8 @@ static inline uint32_t vpx_mse16x(const uint8_t *a, int a_stride,
VARIANCE_SSE_16
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"dsrl %[ftmp9], %[ftmp8], %[ftmp11] \n\t"
@@ -914,8 +919,9 @@ static inline uint32_t vpx_mse16x(const uint8_t *a, int a_stride,
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[tmp0]"=&r"(tmp[0]),
- [a]"+&r"(a), [b]"+&r"(b)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse)
: "memory"
);
@@ -923,19 +929,19 @@ static inline uint32_t vpx_mse16x(const uint8_t *a, int a_stride,
return *sse;
}
-#define vpx_mse16xN(n) \
- uint32_t vpx_mse16x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- return vpx_mse16x(a, a_stride, b, b_stride, sse, n); \
+#define vpx_mse16xN(n) \
+ uint32_t vpx_mse16x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_mse16x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
vpx_mse16xN(16);
vpx_mse16xN(8);
-static inline uint32_t vpx_mse8x(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride, uint32_t *sse,
- uint64_t high) {
+static inline uint32_t vpx_mse8x(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ uint32_t *sse, uint64_t high) {
double ftmp[12];
uint32_t tmp[1];
@@ -952,8 +958,8 @@ static inline uint32_t vpx_mse8x(const uint8_t *a, int a_stride,
VARIANCE_SSE_8
"addiu %[tmp0], %[tmp0], -0x01 \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
- MMI_ADDU(%[b], %[b], %[b_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
+ MMI_ADDU(%[ref_ptr], %[ref_ptr], %[ref_stride])
"bnez %[tmp0], 1b \n\t"
"dsrl %[ftmp9], %[ftmp8], %[ftmp11] \n\t"
@@ -966,8 +972,9 @@ static inline uint32_t vpx_mse8x(const uint8_t *a, int a_stride,
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
[ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
[tmp0]"=&r"(tmp[0]),
- [a]"+&r"(a), [b]"+&r"(b)
- : [a_stride]"r"((mips_reg)a_stride),[b_stride]"r"((mips_reg)b_stride),
+ [src_ptr]"+&r"(src_ptr), [ref_ptr]"+&r"(ref_ptr)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [ref_stride]"r"((mips_reg)ref_stride),
[high]"r"(&high), [sse]"r"(sse)
: "memory"
);
@@ -975,28 +982,29 @@ static inline uint32_t vpx_mse8x(const uint8_t *a, int a_stride,
return *sse;
}
-#define vpx_mse8xN(n) \
- uint32_t vpx_mse8x##n##_mmi(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- return vpx_mse8x(a, a_stride, b, b_stride, sse, n); \
+#define vpx_mse8xN(n) \
+ uint32_t vpx_mse8x##n##_mmi(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ return vpx_mse8x(src_ptr, src_stride, ref_ptr, ref_stride, sse, n); \
}
vpx_mse8xN(16);
vpx_mse8xN(8);
-#define SUBPIX_VAR(W, H) \
- uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- return vpx_variance##W##x##H##_mmi(temp2, W, b, b_stride, sse); \
+#define SUBPIX_VAR(W, H) \
+ uint32_t vpx_sub_pixel_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp2, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR(64, 64)
@@ -1006,9 +1014,10 @@ SUBPIX_VAR(32, 32)
SUBPIX_VAR(32, 16)
SUBPIX_VAR(16, 32)
-static inline void var_filter_block2d_bil_16x(const uint8_t *a, int a_stride,
- int xoffset, int yoffset,
- uint8_t *temp2, int counter) {
+static inline void var_filter_block2d_bil_16x(const uint8_t *src_ptr,
+ int src_stride, int x_offset,
+ int y_offset, uint8_t *temp2,
+ int counter) {
uint8_t *temp2_ptr = temp2;
mips_reg l_counter = counter;
double ftmp[15];
@@ -1016,8 +1025,8 @@ static inline void var_filter_block2d_bil_16x(const uint8_t *a, int a_stride,
DECLARE_ALIGNED(8, const uint64_t, ff_ph_40) = { 0x0040004000400040ULL };
DECLARE_ALIGNED(8, const uint64_t, mask) = { 0x00ff00ff00ff00ffULL };
- const uint8_t *filter_x = bilinear_filters[xoffset];
- const uint8_t *filter_y = bilinear_filters[yoffset];
+ const uint8_t *filter_x = bilinear_filters[x_offset];
+ const uint8_t *filter_y = bilinear_filters[y_offset];
__asm__ volatile (
"xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
@@ -1031,26 +1040,26 @@ static inline void var_filter_block2d_bil_16x(const uint8_t *a, int a_stride,
// fdata3: fdata3[0] ~ fdata3[15]
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_16_A
- // fdata3 +a_stride*1: fdata3[0] ~ fdata3[15]
- MMI_ADDU(%[a], %[a], %[a_stride])
+ // fdata3 +src_stride*1: fdata3[0] ~ fdata3[15]
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_16_B
// temp2: temp2[0] ~ temp2[15]
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_16_A
- // fdata3 +a_stride*2: fdata3[0] ~ fdata3[15]
- MMI_ADDU(%[a], %[a], %[a_stride])
+ // fdata3 +src_stride*2: fdata3[0] ~ fdata3[15]
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_16_A
// temp2+16*1: temp2[0] ~ temp2[15]
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x10)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_16_B
"1: \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_16_B
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x10)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_16_A
- MMI_ADDU(%[a], %[a], %[a_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_16_A
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x10)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_16_B
@@ -1062,43 +1071,44 @@ static inline void var_filter_block2d_bil_16x(const uint8_t *a, int a_stride,
[ftmp9] "=&f"(ftmp[9]), [ftmp10] "=&f"(ftmp[10]),
[ftmp11] "=&f"(ftmp[11]), [ftmp12] "=&f"(ftmp[12]),
[ftmp13] "=&f"(ftmp[13]), [ftmp14] "=&f"(ftmp[14]),
- [tmp0] "=&r"(tmp[0]), [a] "+&r"(a), [temp2_ptr] "+&r"(temp2_ptr),
+ [tmp0] "=&r"(tmp[0]), [src_ptr] "+&r"(src_ptr), [temp2_ptr] "+&r"(temp2_ptr),
[counter]"+&r"(l_counter)
: [filter_x0] "f"((uint64_t)filter_x[0]),
[filter_x1] "f"((uint64_t)filter_x[1]),
[filter_y0] "f"((uint64_t)filter_y[0]),
[filter_y1] "f"((uint64_t)filter_y[1]),
- [a_stride] "r"((mips_reg)a_stride), [ff_ph_40] "f"(ff_ph_40),
+ [src_stride] "r"((mips_reg)src_stride), [ff_ph_40] "f"(ff_ph_40),
[mask] "f"(mask)
: "memory"
);
}
-#define SUBPIX_VAR16XN(H) \
- uint32_t vpx_sub_pixel_variance16x##H##_mmi( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint8_t temp2[16 * H]; \
- var_filter_block2d_bil_16x(a, a_stride, xoffset, yoffset, temp2, \
- (H - 2) / 2); \
- \
- return vpx_variance16x##H##_mmi(temp2, 16, b, b_stride, sse); \
+#define SUBPIX_VAR16XN(H) \
+ uint32_t vpx_sub_pixel_variance16x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint8_t temp2[16 * H]; \
+ var_filter_block2d_bil_16x(src_ptr, src_stride, x_offset, y_offset, temp2, \
+ (H - 2) / 2); \
+ \
+ return vpx_variance16x##H##_mmi(temp2, 16, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR16XN(16)
SUBPIX_VAR16XN(8)
-static inline void var_filter_block2d_bil_8x(const uint8_t *a, int a_stride,
- int xoffset, int yoffset,
- uint8_t *temp2, int counter) {
+static inline void var_filter_block2d_bil_8x(const uint8_t *src_ptr,
+ int src_stride, int x_offset,
+ int y_offset, uint8_t *temp2,
+ int counter) {
uint8_t *temp2_ptr = temp2;
mips_reg l_counter = counter;
double ftmp[15];
mips_reg tmp[2];
DECLARE_ALIGNED(8, const uint64_t, ff_ph_40) = { 0x0040004000400040ULL };
DECLARE_ALIGNED(8, const uint64_t, mask) = { 0x00ff00ff00ff00ffULL };
- const uint8_t *filter_x = bilinear_filters[xoffset];
- const uint8_t *filter_y = bilinear_filters[yoffset];
+ const uint8_t *filter_x = bilinear_filters[x_offset];
+ const uint8_t *filter_y = bilinear_filters[y_offset];
__asm__ volatile (
"xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
@@ -1112,26 +1122,26 @@ static inline void var_filter_block2d_bil_8x(const uint8_t *a, int a_stride,
// fdata3: fdata3[0] ~ fdata3[7]
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_A
- // fdata3 +a_stride*1: fdata3[0] ~ fdata3[7]
- MMI_ADDU(%[a], %[a], %[a_stride])
+ // fdata3 +src_stride*1: fdata3[0] ~ fdata3[7]
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_B
// temp2: temp2[0] ~ temp2[7]
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_8_A
- // fdata3 +a_stride*2: fdata3[0] ~ fdata3[7]
- MMI_ADDU(%[a], %[a], %[a_stride])
+ // fdata3 +src_stride*2: fdata3[0] ~ fdata3[7]
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_A
// temp2+8*1: temp2[0] ~ temp2[7]
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x08)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_8_B
"1: \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_B
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x08)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_8_A
- MMI_ADDU(%[a], %[a], %[a_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_8_A
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x08)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_8_B
@@ -1143,44 +1153,45 @@ static inline void var_filter_block2d_bil_8x(const uint8_t *a, int a_stride,
[ftmp9] "=&f"(ftmp[9]), [ftmp10] "=&f"(ftmp[10]),
[ftmp11] "=&f"(ftmp[11]), [ftmp12] "=&f"(ftmp[12]),
[ftmp13] "=&f"(ftmp[13]), [ftmp14] "=&f"(ftmp[14]),
- [tmp0] "=&r"(tmp[0]), [a] "+&r"(a), [temp2_ptr] "+&r"(temp2_ptr),
+ [tmp0] "=&r"(tmp[0]), [src_ptr] "+&r"(src_ptr), [temp2_ptr] "+&r"(temp2_ptr),
[counter]"+&r"(l_counter)
: [filter_x0] "f"((uint64_t)filter_x[0]),
[filter_x1] "f"((uint64_t)filter_x[1]),
[filter_y0] "f"((uint64_t)filter_y[0]),
[filter_y1] "f"((uint64_t)filter_y[1]),
- [a_stride] "r"((mips_reg)a_stride), [ff_ph_40] "f"(ff_ph_40),
+ [src_stride] "r"((mips_reg)src_stride), [ff_ph_40] "f"(ff_ph_40),
[mask] "f"(mask)
: "memory"
);
}
-#define SUBPIX_VAR8XN(H) \
- uint32_t vpx_sub_pixel_variance8x##H##_mmi( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint8_t temp2[8 * H]; \
- var_filter_block2d_bil_8x(a, a_stride, xoffset, yoffset, temp2, \
- (H - 2) / 2); \
- \
- return vpx_variance8x##H##_mmi(temp2, 8, b, b_stride, sse); \
+#define SUBPIX_VAR8XN(H) \
+ uint32_t vpx_sub_pixel_variance8x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint8_t temp2[8 * H]; \
+ var_filter_block2d_bil_8x(src_ptr, src_stride, x_offset, y_offset, temp2, \
+ (H - 2) / 2); \
+ \
+ return vpx_variance8x##H##_mmi(temp2, 8, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR8XN(16)
SUBPIX_VAR8XN(8)
SUBPIX_VAR8XN(4)
-static inline void var_filter_block2d_bil_4x(const uint8_t *a, int a_stride,
- int xoffset, int yoffset,
- uint8_t *temp2, int counter) {
+static inline void var_filter_block2d_bil_4x(const uint8_t *src_ptr,
+ int src_stride, int x_offset,
+ int y_offset, uint8_t *temp2,
+ int counter) {
uint8_t *temp2_ptr = temp2;
mips_reg l_counter = counter;
double ftmp[7];
mips_reg tmp[2];
DECLARE_ALIGNED(8, const uint64_t, ff_ph_40) = { 0x0040004000400040ULL };
DECLARE_ALIGNED(8, const uint64_t, mask) = { 0x00ff00ff00ff00ffULL };
- const uint8_t *filter_x = bilinear_filters[xoffset];
- const uint8_t *filter_y = bilinear_filters[yoffset];
+ const uint8_t *filter_x = bilinear_filters[x_offset];
+ const uint8_t *filter_y = bilinear_filters[y_offset];
__asm__ volatile (
"xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
@@ -1193,26 +1204,26 @@ static inline void var_filter_block2d_bil_4x(const uint8_t *a, int a_stride,
// fdata3: fdata3[0] ~ fdata3[3]
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_A
- // fdata3 +a_stride*1: fdata3[0] ~ fdata3[3]
- MMI_ADDU(%[a], %[a], %[a_stride])
+ // fdata3 +src_stride*1: fdata3[0] ~ fdata3[3]
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_B
// temp2: temp2[0] ~ temp2[7]
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_4_A
- // fdata3 +a_stride*2: fdata3[0] ~ fdata3[3]
- MMI_ADDU(%[a], %[a], %[a_stride])
+ // fdata3 +src_stride*2: fdata3[0] ~ fdata3[3]
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_A
// temp2+4*1: temp2[0] ~ temp2[7]
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x04)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_4_B
"1: \n\t"
- MMI_ADDU(%[a], %[a], %[a_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_B
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x04)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_4_A
- MMI_ADDU(%[a], %[a], %[a_stride])
+ MMI_ADDU(%[src_ptr], %[src_ptr], %[src_stride])
VAR_FILTER_BLOCK2D_BIL_FIRST_PASS_4_A
MMI_ADDIU(%[temp2_ptr], %[temp2_ptr], 0x04)
VAR_FILTER_BLOCK2D_BIL_SECOND_PASS_4_B
@@ -1220,49 +1231,49 @@ static inline void var_filter_block2d_bil_4x(const uint8_t *a, int a_stride,
"bnez %[counter], 1b \n\t"
: [ftmp0] "=&f"(ftmp[0]), [ftmp1] "=&f"(ftmp[1]), [ftmp2] "=&f"(ftmp[2]),
[ftmp3] "=&f"(ftmp[3]), [ftmp4] "=&f"(ftmp[4]), [ftmp5] "=&f"(ftmp[5]),
- [ftmp6] "=&f"(ftmp[6]), [tmp0] "=&r"(tmp[0]), [a] "+&r"(a),
+ [ftmp6] "=&f"(ftmp[6]), [tmp0] "=&r"(tmp[0]), [src_ptr] "+&r"(src_ptr),
[temp2_ptr] "+&r"(temp2_ptr), [counter]"+&r"(l_counter)
: [filter_x0] "f"((uint64_t)filter_x[0]),
[filter_x1] "f"((uint64_t)filter_x[1]),
[filter_y0] "f"((uint64_t)filter_y[0]),
[filter_y1] "f"((uint64_t)filter_y[1]),
- [a_stride] "r"((mips_reg)a_stride), [ff_ph_40] "f"(ff_ph_40),
+ [src_stride] "r"((mips_reg)src_stride), [ff_ph_40] "f"(ff_ph_40),
[mask] "f"(mask)
: "memory"
);
}
-#define SUBPIX_VAR4XN(H) \
- uint32_t vpx_sub_pixel_variance4x##H##_mmi( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint8_t temp2[4 * H]; \
- var_filter_block2d_bil_4x(a, a_stride, xoffset, yoffset, temp2, \
- (H - 2) / 2); \
- \
- return vpx_variance4x##H##_mmi(temp2, 4, b, b_stride, sse); \
+#define SUBPIX_VAR4XN(H) \
+ uint32_t vpx_sub_pixel_variance4x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint8_t temp2[4 * H]; \
+ var_filter_block2d_bil_4x(src_ptr, src_stride, x_offset, y_offset, temp2, \
+ (H - 2) / 2); \
+ \
+ return vpx_variance4x##H##_mmi(temp2, 4, ref_ptr, ref_stride, sse); \
}
SUBPIX_VAR4XN(8)
SUBPIX_VAR4XN(4)
-#define SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
- \
- return vpx_variance##W##x##H##_mmi(temp3, W, b, b_stride, sse); \
+#define SUBPIX_AVG_VAR(W, H) \
+ uint32_t vpx_sub_pixel_avg_variance##W##x##H##_mmi( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
+ \
+ return vpx_variance##W##x##H##_mmi(temp3, W, ref_ptr, ref_stride, sse); \
}
SUBPIX_AVG_VAR(64, 64)
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c
index 187a013421a..5b5a1cbc3a5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c
@@ -658,7 +658,7 @@ void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
filt_hor[cnt] = filter_x[cnt];
}
- if (((const int32_t *)filter_x)[0] == 0) {
+ if (vpx_get_filter_taps(filter_x) == 2) {
switch (w) {
case 4:
common_hz_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c
index 5187cea21c9..ba816192a1f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c
@@ -538,8 +538,8 @@ void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
filt_ver[cnt] = filter_y[cnt];
}
- if (((const int32_t *)filter_x)[0] == 0 &&
- ((const int32_t *)filter_y)[0] == 0) {
+ if (vpx_get_filter_taps(filter_x) == 2 &&
+ vpx_get_filter_taps(filter_y) == 2) {
switch (w) {
case 4:
common_hv_2ht_2vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
@@ -571,8 +571,8 @@ void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride,
x_step_q4, y0_q4, y_step_q4, w, h);
break;
}
- } else if (((const int32_t *)filter_x)[0] == 0 ||
- ((const int32_t *)filter_y)[0] == 0) {
+ } else if (vpx_get_filter_taps(filter_x) == 2 ||
+ vpx_get_filter_taps(filter_y) == 2) {
vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter, x0_q4,
x_step_q4, y0_q4, y_step_q4, w, h);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
index ef8c9011406..e6a790dfc6d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
@@ -625,7 +625,7 @@ void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
filt_ver[cnt] = filter_y[cnt];
}
- if (((const int32_t *)filter_y)[0] == 0) {
+ if (vpx_get_filter_taps(filter_y) == 2) {
switch (w) {
case 4:
common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c
index 152dc26104e..792c0f709c4 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c
@@ -634,7 +634,7 @@ void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride,
filt_hor[cnt] = filter_x[cnt];
}
- if (((const int32_t *)filter_x)[0] == 0) {
+ if (vpx_get_filter_taps(filter_x) == 2) {
switch (w) {
case 4:
common_hz_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c
index d35a5a7a639..c942167587b 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c
@@ -558,8 +558,8 @@ void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
filt_ver[cnt] = filter_y[cnt];
}
- if (((const int32_t *)filter_x)[0] == 0 &&
- ((const int32_t *)filter_y)[0] == 0) {
+ if (vpx_get_filter_taps(filter_x) == 2 &&
+ vpx_get_filter_taps(filter_y) == 2) {
switch (w) {
case 4:
common_hv_2ht_2vt_4w_msa(src, (int32_t)src_stride, dst,
@@ -591,8 +591,8 @@ void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
x_step_q4, y0_q4, y_step_q4, w, h);
break;
}
- } else if (((const int32_t *)filter_x)[0] == 0 ||
- ((const int32_t *)filter_y)[0] == 0) {
+ } else if (vpx_get_filter_taps(filter_x) == 2 ||
+ vpx_get_filter_taps(filter_y) == 2) {
vpx_convolve8_c(src, src_stride, dst, dst_stride, filter, x0_q4, x_step_q4,
y0_q4, y_step_q4, w, h);
} else {
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c
index 13fce0077c9..195228689e0 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c
@@ -641,7 +641,7 @@ void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
filt_ver[cnt] = filter_y[cnt];
}
- if (((const int32_t *)filter_y)[0] == 0) {
+ if (vpx_get_filter_taps(filter_y) == 2) {
switch (w) {
case 4:
common_vt_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride,
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/ppc/variance_vsx.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/ppc/variance_vsx.c
index 50311d1b01c..8926160c24f 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/ppc/variance_vsx.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/ppc/variance_vsx.c
@@ -14,14 +14,16 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/ppc/types_vsx.h"
-uint32_t vpx_get4x4sse_cs_vsx(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride) {
+uint32_t vpx_get4x4sse_cs_vsx(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride) {
int distortion;
- const int16x8_t a0 = unpack_to_s16_h(read4x2(a, a_stride));
- const int16x8_t a1 = unpack_to_s16_h(read4x2(a + a_stride * 2, a_stride));
- const int16x8_t b0 = unpack_to_s16_h(read4x2(b, b_stride));
- const int16x8_t b1 = unpack_to_s16_h(read4x2(b + b_stride * 2, b_stride));
+ const int16x8_t a0 = unpack_to_s16_h(read4x2(src_ptr, src_stride));
+ const int16x8_t a1 =
+ unpack_to_s16_h(read4x2(src_ptr + src_stride * 2, src_stride));
+ const int16x8_t b0 = unpack_to_s16_h(read4x2(ref_ptr, ref_stride));
+ const int16x8_t b1 =
+ unpack_to_s16_h(read4x2(ref_ptr + ref_stride * 2, ref_stride));
const int16x8_t d0 = vec_sub(a0, b0);
const int16x8_t d1 = vec_sub(a1, b1);
const int32x4_t ds = vec_msum(d1, d1, vec_msum(d0, d0, vec_splat_s32(0)));
@@ -33,12 +35,12 @@ uint32_t vpx_get4x4sse_cs_vsx(const uint8_t *a, int a_stride, const uint8_t *b,
}
// TODO(lu_zero): Unroll
-uint32_t vpx_get_mb_ss_vsx(const int16_t *a) {
+uint32_t vpx_get_mb_ss_vsx(const int16_t *src_ptr) {
unsigned int i, sum = 0;
int32x4_t s = vec_splat_s32(0);
for (i = 0; i < 256; i += 8) {
- const int16x8_t v = vec_vsx_ld(0, a + i);
+ const int16x8_t v = vec_vsx_ld(0, src_ptr + i);
s = vec_msum(v, v, s);
}
@@ -96,15 +98,16 @@ void vpx_comp_avg_pred_vsx(uint8_t *comp_pred, const uint8_t *pred, int width,
}
}
-static INLINE void variance_inner_32(const uint8_t *a, const uint8_t *b,
+static INLINE void variance_inner_32(const uint8_t *src_ptr,
+ const uint8_t *ref_ptr,
int32x4_t *sum_squared, int32x4_t *sum) {
int32x4_t s = *sum;
int32x4_t ss = *sum_squared;
- const uint8x16_t va0 = vec_vsx_ld(0, a);
- const uint8x16_t vb0 = vec_vsx_ld(0, b);
- const uint8x16_t va1 = vec_vsx_ld(16, a);
- const uint8x16_t vb1 = vec_vsx_ld(16, b);
+ const uint8x16_t va0 = vec_vsx_ld(0, src_ptr);
+ const uint8x16_t vb0 = vec_vsx_ld(0, ref_ptr);
+ const uint8x16_t va1 = vec_vsx_ld(16, src_ptr);
+ const uint8x16_t vb1 = vec_vsx_ld(16, ref_ptr);
const int16x8_t a0 = unpack_to_s16_h(va0);
const int16x8_t b0 = unpack_to_s16_h(vb0);
@@ -131,9 +134,9 @@ static INLINE void variance_inner_32(const uint8_t *a, const uint8_t *b,
*sum_squared = ss;
}
-static INLINE void variance(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, int w, int h, uint32_t *sse,
- int *sum) {
+static INLINE void variance(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride, int w,
+ int h, uint32_t *sse, int *sum) {
int i;
int32x4_t s = vec_splat_s32(0);
@@ -142,31 +145,31 @@ static INLINE void variance(const uint8_t *a, int a_stride, const uint8_t *b,
switch (w) {
case 4:
for (i = 0; i < h / 2; ++i) {
- const int16x8_t a0 = unpack_to_s16_h(read4x2(a, a_stride));
- const int16x8_t b0 = unpack_to_s16_h(read4x2(b, b_stride));
+ const int16x8_t a0 = unpack_to_s16_h(read4x2(src_ptr, src_stride));
+ const int16x8_t b0 = unpack_to_s16_h(read4x2(ref_ptr, ref_stride));
const int16x8_t d = vec_sub(a0, b0);
s = vec_sum4s(d, s);
ss = vec_msum(d, d, ss);
- a += a_stride * 2;
- b += b_stride * 2;
+ src_ptr += src_stride * 2;
+ ref_ptr += ref_stride * 2;
}
break;
case 8:
for (i = 0; i < h; ++i) {
- const int16x8_t a0 = unpack_to_s16_h(vec_vsx_ld(0, a));
- const int16x8_t b0 = unpack_to_s16_h(vec_vsx_ld(0, b));
+ const int16x8_t a0 = unpack_to_s16_h(vec_vsx_ld(0, src_ptr));
+ const int16x8_t b0 = unpack_to_s16_h(vec_vsx_ld(0, ref_ptr));
const int16x8_t d = vec_sub(a0, b0);
s = vec_sum4s(d, s);
ss = vec_msum(d, d, ss);
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
break;
case 16:
for (i = 0; i < h; ++i) {
- const uint8x16_t va = vec_vsx_ld(0, a);
- const uint8x16_t vb = vec_vsx_ld(0, b);
+ const uint8x16_t va = vec_vsx_ld(0, src_ptr);
+ const uint8x16_t vb = vec_vsx_ld(0, ref_ptr);
const int16x8_t a0 = unpack_to_s16_h(va);
const int16x8_t b0 = unpack_to_s16_h(vb);
const int16x8_t a1 = unpack_to_s16_l(va);
@@ -179,24 +182,24 @@ static INLINE void variance(const uint8_t *a, int a_stride, const uint8_t *b,
s = vec_sum4s(d1, s);
ss = vec_msum(d1, d1, ss);
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
break;
case 32:
for (i = 0; i < h; ++i) {
- variance_inner_32(a, b, &ss, &s);
- a += a_stride;
- b += b_stride;
+ variance_inner_32(src_ptr, ref_ptr, &ss, &s);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
break;
case 64:
for (i = 0; i < h; ++i) {
- variance_inner_32(a, b, &ss, &s);
- variance_inner_32(a + 32, b + 32, &ss, &s);
+ variance_inner_32(src_ptr, ref_ptr, &ss, &s);
+ variance_inner_32(src_ptr + 32, ref_ptr + 32, &ss, &s);
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
break;
}
@@ -214,33 +217,33 @@ static INLINE void variance(const uint8_t *a, int a_stride, const uint8_t *b,
* and returns that value using pass-by-reference instead of returning
* sse - sum^2 / w*h
*/
-#define GET_VAR(W, H) \
- void vpx_get##W##x##H##var_vsx(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse, int *sum) { \
- variance(a, a_stride, b, b_stride, W, H, sse, sum); \
+#define GET_VAR(W, H) \
+ void vpx_get##W##x##H##var_vsx(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse, int *sum) { \
+ variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, sum); \
}
/* Identical to the variance call except it does not calculate the
* sse - sum^2 / w*h and returns sse in addtion to modifying the passed in
* variable.
*/
-#define MSE(W, H) \
- uint32_t vpx_mse##W##x##H##_vsx(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- return *sse; \
+#define MSE(W, H) \
+ uint32_t vpx_mse##W##x##H##_vsx(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ int sum; \
+ variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
+ return *sse; \
}
-#define VAR(W, H) \
- uint32_t vpx_variance##W##x##H##_vsx(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+#define VAR(W, H) \
+ uint32_t vpx_variance##W##x##H##_vsx(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ int sum; \
+ variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
}
#define VARIANCES(W, H) VAR(W, H)
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.c
index e37ca92ad45..82a65959236 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.c
@@ -17,7 +17,7 @@
void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t dequant_ptr, uint16_t *eob_ptr) {
+ const int16_t dequant, uint16_t *eob_ptr) {
const int rc = 0;
const int coeff = coeff_ptr[rc];
const int coeff_sign = (coeff >> 31);
@@ -31,7 +31,7 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
tmp = (tmp * quant) >> 16;
qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
- dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr;
+ dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant;
if (tmp) eob = 0;
}
*eob_ptr = eob + 1;
@@ -41,7 +41,7 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t dequant,
uint16_t *eob_ptr) {
int eob = -1;
@@ -55,7 +55,7 @@ void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
const int64_t tmp = abs_coeff + round_ptr[0];
const int abs_qcoeff = (int)((tmp * quant) >> 16);
qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
- dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr;
+ dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant;
if (abs_qcoeff) eob = 0;
}
*eob_ptr = eob + 1;
@@ -65,7 +65,7 @@ void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t dequant_ptr, uint16_t *eob_ptr) {
+ const int16_t dequant, uint16_t *eob_ptr) {
const int n_coeffs = 1024;
const int rc = 0;
const int coeff = coeff_ptr[rc];
@@ -81,7 +81,7 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
INT16_MIN, INT16_MAX);
tmp = (tmp * quant) >> 15;
qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
- dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr / 2;
+ dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant / 2;
if (tmp) eob = 0;
}
*eob_ptr = eob + 1;
@@ -92,8 +92,7 @@ void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
- const int16_t dequant_ptr,
- uint16_t *eob_ptr) {
+ const int16_t dequant, uint16_t *eob_ptr) {
const int n_coeffs = 1024;
int eob = -1;
@@ -107,7 +106,7 @@ void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
const int64_t tmp = abs_coeff + ROUND_POWER_OF_TWO(round_ptr[0], 1);
const int abs_qcoeff = (int)((tmp * quant) >> 15);
qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
- dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr / 2;
+ dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant / 2;
if (abs_qcoeff) eob = 0;
}
*eob_ptr = eob + 1;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.h
index 94c8206d9c3..7cac140e9de 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/quantize.h
@@ -19,26 +19,25 @@ extern "C" {
#endif
void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
- const int16_t *round_ptr, const int16_t quant_ptr,
+ const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t dequant_ptr, uint16_t *eob_ptr);
+ const int16_t dequant, uint16_t *eob_ptr);
void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
- const int16_t *round_ptr, const int16_t quant_ptr,
+ const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t dequant_ptr, uint16_t *eob_ptr);
+ const int16_t dequant, uint16_t *eob_ptr);
#if CONFIG_VP9_HIGHBITDEPTH
void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
- const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+ const int16_t quant, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t dequant,
uint16_t *eob_ptr);
void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
- const int16_t *round_ptr,
- const int16_t quant_ptr,
+ const int16_t *round_ptr, const int16_t quant,
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
- const int16_t dequant_ptr, uint16_t *eob_ptr);
+ const int16_t dequant, uint16_t *eob_ptr);
#endif
#ifdef __cplusplus
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/sad.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/sad.c
index d4a5329688b..873ddca0938 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/sad.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/sad.c
@@ -17,54 +17,55 @@
#include "vpx_ports/mem.h"
/* Sum the difference between every corresponding element of the buffers. */
-static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, int width, int height) {
+static INLINE unsigned int sad(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ int width, int height) {
int y, x;
unsigned int sad = 0;
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) sad += abs(a[x] - b[x]);
+ for (x = 0; x < width; x++) sad += abs(src_ptr[x] - ref_ptr[x]);
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
return sad;
}
-#define sadMxN(m, n) \
- unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride) { \
- return sad(src, src_stride, ref, ref_stride, m, n); \
- } \
- unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred) { \
- DECLARE_ALIGNED(16, uint8_t, comp_pred[m * n]); \
- vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
- return sad(src, src_stride, comp_pred, m, m, n); \
+#define sadMxN(m, n) \
+ unsigned int vpx_sad##m##x##n##_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ return sad(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \
+ } \
+ unsigned int vpx_sad##m##x##n##_avg_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ DECLARE_ALIGNED(16, uint8_t, comp_pred[m * n]); \
+ vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref_ptr, ref_stride); \
+ return sad(src_ptr, src_stride, comp_pred, m, m, n); \
}
// depending on call sites, pass **ref_array to avoid & in subsequent call and
// de-dup with 4D below.
-#define sadMxNxK(m, n, k) \
- void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref_array, int ref_stride, \
- uint32_t *sad_array) { \
- int i; \
- for (i = 0; i < k; ++i) \
- sad_array[i] = \
- vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \
+#define sadMxNxK(m, n, k) \
+ void vpx_sad##m##x##n##x##k##_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sad_array) { \
+ int i; \
+ for (i = 0; i < k; ++i) \
+ sad_array[i] = \
+ vpx_sad##m##x##n##_c(src_ptr, src_stride, &ref_ptr[i], ref_stride); \
}
// This appears to be equivalent to the above when k == 4 and refs is const
-#define sadMxNx4D(m, n) \
- void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
- const uint8_t *const ref_array[], \
- int ref_stride, uint32_t *sad_array) { \
- int i; \
- for (i = 0; i < 4; ++i) \
- sad_array[i] = \
- vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \
+#define sadMxNx4D(m, n) \
+ void vpx_sad##m##x##n##x4d_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *const ref_array[], \
+ int ref_stride, uint32_t *sad_array) { \
+ int i; \
+ for (i = 0; i < 4; ++i) \
+ sad_array[i] = \
+ vpx_sad##m##x##n##_c(src_ptr, src_stride, ref_array[i], ref_stride); \
}
/* clang-format off */
@@ -133,60 +134,61 @@ sadMxNx4D(4, 4)
#if CONFIG_VP9_HIGHBITDEPTH
static INLINE
- unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
- int b_stride, int width, int height) {
+ unsigned int highbd_sad(const uint8_t *src8_ptr, int src_stride,
+ const uint8_t *ref8_ptr, int ref_stride, int width,
+ int height) {
int y, x;
unsigned int sad = 0;
- const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
- const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8_ptr);
+ const uint16_t *ref_ptr = CONVERT_TO_SHORTPTR(ref8_ptr);
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) sad += abs(a[x] - b[x]);
+ for (x = 0; x < width; x++) sad += abs(src[x] - ref_ptr[x]);
- a += a_stride;
- b += b_stride;
+ src += src_stride;
+ ref_ptr += ref_stride;
}
return sad;
}
-static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride,
- const uint16_t *b, int b_stride,
+static INLINE unsigned int highbd_sadb(const uint8_t *src8_ptr, int src_stride,
+ const uint16_t *ref_ptr, int ref_stride,
int width, int height) {
int y, x;
unsigned int sad = 0;
- const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8_ptr);
for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) sad += abs(a[x] - b[x]);
+ for (x = 0; x < width; x++) sad += abs(src[x] - ref_ptr[x]);
- a += a_stride;
- b += b_stride;
+ src += src_stride;
+ ref_ptr += ref_stride;
}
return sad;
}
#define highbd_sadMxN(m, n) \
- unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, \
- int ref_stride) { \
- return highbd_sad(src, src_stride, ref, ref_stride, m, n); \
+ unsigned int vpx_highbd_sad##m##x##n##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ return highbd_sad(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \
} \
unsigned int vpx_highbd_sad##m##x##n##_avg_c( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred) { \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
DECLARE_ALIGNED(16, uint16_t, comp_pred[m * n]); \
vpx_highbd_comp_avg_pred_c(comp_pred, CONVERT_TO_SHORTPTR(second_pred), m, \
- n, CONVERT_TO_SHORTPTR(ref), ref_stride); \
- return highbd_sadb(src, src_stride, comp_pred, m, m, n); \
+ n, CONVERT_TO_SHORTPTR(ref_ptr), ref_stride); \
+ return highbd_sadb(src_ptr, src_stride, comp_pred, m, m, n); \
}
-#define highbd_sadMxNx4D(m, n) \
- void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \
- const uint8_t *const ref_array[], \
- int ref_stride, uint32_t *sad_array) { \
- int i; \
- for (i = 0; i < 4; ++i) { \
- sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \
- ref_array[i], ref_stride); \
- } \
+#define highbd_sadMxNx4D(m, n) \
+ void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *const ref_array[], \
+ int ref_stride, uint32_t *sad_array) { \
+ int i; \
+ for (i = 0; i < 4; ++i) { \
+ sad_array[i] = vpx_highbd_sad##m##x##n##_c(src_ptr, src_stride, \
+ ref_array[i], ref_stride); \
+ } \
}
/* clang-format off */
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/subtract.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/subtract.c
index 95e7071b27e..45c819e67a7 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/subtract.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/subtract.c
@@ -16,37 +16,37 @@
#include "vpx/vpx_integer.h"
#include "vpx_ports/mem.h"
-void vpx_subtract_block_c(int rows, int cols, int16_t *diff,
- ptrdiff_t diff_stride, const uint8_t *src,
- ptrdiff_t src_stride, const uint8_t *pred,
+void vpx_subtract_block_c(int rows, int cols, int16_t *diff_ptr,
+ ptrdiff_t diff_stride, const uint8_t *src_ptr,
+ ptrdiff_t src_stride, const uint8_t *pred_ptr,
ptrdiff_t pred_stride) {
int r, c;
for (r = 0; r < rows; r++) {
- for (c = 0; c < cols; c++) diff[c] = src[c] - pred[c];
+ for (c = 0; c < cols; c++) diff_ptr[c] = src_ptr[c] - pred_ptr[c];
- diff += diff_stride;
- pred += pred_stride;
- src += src_stride;
+ diff_ptr += diff_stride;
+ pred_ptr += pred_stride;
+ src_ptr += src_stride;
}
}
#if CONFIG_VP9_HIGHBITDEPTH
-void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
- ptrdiff_t diff_stride, const uint8_t *src8,
- ptrdiff_t src_stride, const uint8_t *pred8,
+void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff_ptr,
+ ptrdiff_t diff_stride, const uint8_t *src8_ptr,
+ ptrdiff_t src_stride, const uint8_t *pred8_ptr,
ptrdiff_t pred_stride, int bd) {
int r, c;
- uint16_t *src = CONVERT_TO_SHORTPTR(src8);
- uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ uint16_t *src = CONVERT_TO_SHORTPTR(src8_ptr);
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8_ptr);
(void)bd;
for (r = 0; r < rows; r++) {
for (c = 0; c < cols; c++) {
- diff[c] = src[c] - pred[c];
+ diff_ptr[c] = src[c] - pred[c];
}
- diff += diff_stride;
+ diff_ptr += diff_stride;
pred += pred_stride;
src += src_stride;
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.c
index a7eef0d0b99..30b55dcb407 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.c
@@ -21,36 +21,37 @@ static const uint8_t bilinear_filters[8][2] = {
{ 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 },
};
-uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride) {
+uint32_t vpx_get4x4sse_cs_c(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride) {
int distortion = 0;
int r, c;
for (r = 0; r < 4; ++r) {
for (c = 0; c < 4; ++c) {
- int diff = a[c] - b[c];
+ int diff = src_ptr[c] - ref_ptr[c];
distortion += diff * diff;
}
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
return distortion;
}
-uint32_t vpx_get_mb_ss_c(const int16_t *a) {
+uint32_t vpx_get_mb_ss_c(const int16_t *src_ptr) {
unsigned int i, sum = 0;
for (i = 0; i < 256; ++i) {
- sum += a[i] * a[i];
+ sum += src_ptr[i] * src_ptr[i];
}
return sum;
}
-static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
- int b_stride, int w, int h, uint32_t *sse, int *sum) {
+static void variance(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride, int w, int h,
+ uint32_t *sse, int *sum) {
int i, j;
*sum = 0;
@@ -58,13 +59,13 @@ static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- const int diff = a[j] - b[j];
+ const int diff = src_ptr[j] - ref_ptr[j];
*sum += diff;
*sse += diff * diff;
}
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
}
@@ -76,24 +77,23 @@ static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
// taps should sum to FILTER_WEIGHT. pixel_step defines whether the filter is
// applied horizontally (pixel_step = 1) or vertically (pixel_step = stride).
// It defines the offset required to move from one input to the next.
-static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b,
- unsigned int src_pixels_per_line,
- int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const uint8_t *filter) {
+static void var_filter_block2d_bil_first_pass(
+ const uint8_t *src_ptr, uint16_t *ref_ptr, unsigned int src_pixels_per_line,
+ int pixel_step, unsigned int output_height, unsigned int output_width,
+ const uint8_t *filter) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
- b[j] = ROUND_POWER_OF_TWO(
- (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
+ ref_ptr[j] = ROUND_POWER_OF_TWO(
+ (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
+ FILTER_BITS);
- ++a;
+ ++src_ptr;
}
- a += src_pixels_per_line - output_width;
- b += output_width;
+ src_ptr += src_pixels_per_line - output_width;
+ ref_ptr += output_width;
}
}
@@ -106,91 +106,90 @@ static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b,
// filter is applied horizontally (pixel_step = 1) or vertically
// (pixel_step = stride). It defines the offset required to move from one input
// to the next. Output is 8-bit.
-static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
- unsigned int src_pixels_per_line,
- unsigned int pixel_step,
- unsigned int output_height,
- unsigned int output_width,
- const uint8_t *filter) {
+static void var_filter_block2d_bil_second_pass(
+ const uint16_t *src_ptr, uint8_t *ref_ptr, unsigned int src_pixels_per_line,
+ unsigned int pixel_step, unsigned int output_height,
+ unsigned int output_width, const uint8_t *filter) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
- b[j] = ROUND_POWER_OF_TWO(
- (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
- ++a;
+ ref_ptr[j] = ROUND_POWER_OF_TWO(
+ (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
+ FILTER_BITS);
+ ++src_ptr;
}
- a += src_pixels_per_line - output_width;
- b += output_width;
+ src_ptr += src_pixels_per_line - output_width;
+ ref_ptr += output_width;
}
}
-#define VAR(W, H) \
- uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+#define VAR(W, H) \
+ uint32_t vpx_variance##W##x##H##_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ int sum; \
+ variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
}
-#define SUBPIX_VAR(W, H) \
- uint32_t vpx_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- \
- var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
+#define SUBPIX_VAR(W, H) \
+ uint32_t vpx_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_variance##W##x##H##_c(temp2, W, ref_ptr, ref_stride, sse); \
}
-#define SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c( \
- const uint8_t *a, int a_stride, int xoffset, int yoffset, \
- const uint8_t *b, int b_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
- bilinear_filters[xoffset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
- \
- return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
+#define SUBPIX_AVG_VAR(W, H) \
+ uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
+ \
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_stride, 1, H + 1, \
+ W, bilinear_filters[x_offset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_comp_avg_pred_c(temp3, second_pred, W, H, temp2, W); \
+ \
+ return vpx_variance##W##x##H##_c(temp3, W, ref_ptr, ref_stride, sse); \
}
/* Identical to the variance call except it takes an additional parameter, sum,
* and returns that value using pass-by-reference instead of returning
* sse - sum^2 / w*h
*/
-#define GET_VAR(W, H) \
- void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, uint32_t *sse, \
- int *sum) { \
- variance(a, a_stride, b, b_stride, W, H, sse, sum); \
+#define GET_VAR(W, H) \
+ void vpx_get##W##x##H##var_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse, int *sum) { \
+ variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, sum); \
}
/* Identical to the variance call except it does not calculate the
* sse - sum^2 / w*h and returns sse in addtion to modifying the passed in
* variable.
*/
-#define MSE(W, H) \
- uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- return *sse; \
+#define MSE(W, H) \
+ uint32_t vpx_mse##W##x##H##_c(const uint8_t *src_ptr, int src_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ uint32_t *sse) { \
+ int sum; \
+ variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, &sum); \
+ return *sse; \
}
/* All three forms of the variance are available in the same sizes. */
@@ -237,128 +236,140 @@ void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
}
#if CONFIG_VP9_HIGHBITDEPTH
-static void highbd_variance64(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride, int w, int h,
- uint64_t *sse, int64_t *sum) {
+static void highbd_variance64(const uint8_t *src8_ptr, int src_stride,
+ const uint8_t *ref8_ptr, int ref_stride, int w,
+ int h, uint64_t *sse, int64_t *sum) {
int i, j;
- uint16_t *a = CONVERT_TO_SHORTPTR(a8);
- uint16_t *b = CONVERT_TO_SHORTPTR(b8);
+ uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8_ptr);
+ uint16_t *ref_ptr = CONVERT_TO_SHORTPTR(ref8_ptr);
*sum = 0;
*sse = 0;
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
- const int diff = a[j] - b[j];
+ const int diff = src_ptr[j] - ref_ptr[j];
*sum += diff;
*sse += diff * diff;
}
- a += a_stride;
- b += b_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
}
-static void highbd_8_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride, int w, int h,
- uint32_t *sse, int *sum) {
+static void highbd_8_variance(const uint8_t *src8_ptr, int src_stride,
+ const uint8_t *ref8_ptr, int ref_stride, int w,
+ int h, uint32_t *sse, int *sum) {
uint64_t sse_long = 0;
int64_t sum_long = 0;
- highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
+ highbd_variance64(src8_ptr, src_stride, ref8_ptr, ref_stride, w, h, &sse_long,
+ &sum_long);
*sse = (uint32_t)sse_long;
*sum = (int)sum_long;
}
-static void highbd_10_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride, int w, int h,
- uint32_t *sse, int *sum) {
+static void highbd_10_variance(const uint8_t *src8_ptr, int src_stride,
+ const uint8_t *ref8_ptr, int ref_stride, int w,
+ int h, uint32_t *sse, int *sum) {
uint64_t sse_long = 0;
int64_t sum_long = 0;
- highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
+ highbd_variance64(src8_ptr, src_stride, ref8_ptr, ref_stride, w, h, &sse_long,
+ &sum_long);
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 4);
*sum = (int)ROUND_POWER_OF_TWO(sum_long, 2);
}
-static void highbd_12_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride, int w, int h,
- uint32_t *sse, int *sum) {
+static void highbd_12_variance(const uint8_t *src8_ptr, int src_stride,
+ const uint8_t *ref8_ptr, int ref_stride, int w,
+ int h, uint32_t *sse, int *sum) {
uint64_t sse_long = 0;
int64_t sum_long = 0;
- highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
+ highbd_variance64(src8_ptr, src_stride, ref8_ptr, ref_stride, w, h, &sse_long,
+ &sum_long);
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8);
*sum = (int)ROUND_POWER_OF_TWO(sum_long, 4);
}
-#define HIGHBD_VAR(W, H) \
- uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
- } \
- \
- uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- int64_t var; \
- highbd_10_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
- return (var >= 0) ? (uint32_t)var : 0; \
- } \
- \
- uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
- const uint8_t *b, int b_stride, \
- uint32_t *sse) { \
- int sum; \
- int64_t var; \
- highbd_12_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
- return (var >= 0) ? (uint32_t)var : 0; \
+#define HIGHBD_VAR(W, H) \
+ uint32_t vpx_highbd_8_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ highbd_8_variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, \
+ &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ } \
+ \
+ uint32_t vpx_highbd_10_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ int64_t var; \
+ highbd_10_variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, \
+ &sum); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
+ } \
+ \
+ uint32_t vpx_highbd_12_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ int64_t var; \
+ highbd_12_variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, \
+ &sum); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
-#define HIGHBD_GET_VAR(S) \
- void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- uint32_t *sse, int *sum) { \
- highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
- } \
- \
- void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- uint32_t *sse, int *sum) { \
- highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
- } \
- \
- void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- uint32_t *sse, int *sum) { \
- highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
+#define HIGHBD_GET_VAR(S) \
+ void vpx_highbd_8_get##S##x##S##var_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse, int *sum) { \
+ highbd_8_variance(src_ptr, src_stride, ref_ptr, ref_stride, S, S, sse, \
+ sum); \
+ } \
+ \
+ void vpx_highbd_10_get##S##x##S##var_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse, int *sum) { \
+ highbd_10_variance(src_ptr, src_stride, ref_ptr, ref_stride, S, S, sse, \
+ sum); \
+ } \
+ \
+ void vpx_highbd_12_get##S##x##S##var_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse, int *sum) { \
+ highbd_12_variance(src_ptr, src_stride, ref_ptr, ref_stride, S, S, sse, \
+ sum); \
}
-#define HIGHBD_MSE(W, H) \
- uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- uint32_t *sse) { \
- int sum; \
- highbd_8_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
- return *sse; \
- } \
- \
- uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- uint32_t *sse) { \
- int sum; \
- highbd_10_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
- return *sse; \
- } \
- \
- uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
- const uint8_t *ref, int ref_stride, \
- uint32_t *sse) { \
- int sum; \
- highbd_12_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
- return *sse; \
+#define HIGHBD_MSE(W, H) \
+ uint32_t vpx_highbd_8_mse##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ highbd_8_variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, \
+ &sum); \
+ return *sse; \
+ } \
+ \
+ uint32_t vpx_highbd_10_mse##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ highbd_10_variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, \
+ &sum); \
+ return *sse; \
+ } \
+ \
+ uint32_t vpx_highbd_12_mse##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ highbd_12_variance(src_ptr, src_stride, ref_ptr, ref_stride, W, H, sse, \
+ &sum); \
+ return *sse; \
}
static void highbd_var_filter_block2d_bil_first_pass(
@@ -403,111 +414,111 @@ static void highbd_var_filter_block2d_bil_second_pass(
}
}
-#define HIGHBD_SUBPIX_VAR(W, H) \
- uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- \
- highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
- highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
- dst, dst_stride, sse); \
- } \
- \
- uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- \
- highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
- highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
- dst, dst_stride, sse); \
- } \
- \
- uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, uint32_t *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- \
- highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
- highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
- dst, dst_stride, sse); \
+#define HIGHBD_SUBPIX_VAR(W, H) \
+ uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ \
+ highbd_var_filter_block2d_bil_first_pass( \
+ src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
+ highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ ref_ptr, ref_stride, sse); \
+ } \
+ \
+ uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ \
+ highbd_var_filter_block2d_bil_first_pass( \
+ src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
+ highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ ref_ptr, ref_stride, sse); \
+ } \
+ \
+ uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ \
+ highbd_var_filter_block2d_bil_first_pass( \
+ src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
+ highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
+ ref_ptr, ref_stride, sse); \
}
-#define HIGHBD_SUBPIX_AVG_VAR(W, H) \
- uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
- \
- highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
- highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- vpx_highbd_comp_avg_pred_c(temp3, CONVERT_TO_SHORTPTR(second_pred), W, H, \
- temp2, W); \
- \
- return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
- dst, dst_stride, sse); \
- } \
- \
- uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
- \
- highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
- highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- vpx_highbd_comp_avg_pred_c(temp3, CONVERT_TO_SHORTPTR(second_pred), W, H, \
- temp2, W); \
- \
- return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
- dst, dst_stride, sse); \
- } \
- \
- uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, uint32_t *sse, \
- const uint8_t *second_pred) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint16_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
- \
- highbd_var_filter_block2d_bil_first_pass( \
- src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
- highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters[yoffset]); \
- \
- vpx_highbd_comp_avg_pred_c(temp3, CONVERT_TO_SHORTPTR(second_pred), W, H, \
- temp2, W); \
- \
- return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
- dst, dst_stride, sse); \
+#define HIGHBD_SUBPIX_AVG_VAR(W, H) \
+ uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
+ \
+ highbd_var_filter_block2d_bil_first_pass( \
+ src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
+ highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_highbd_comp_avg_pred_c(temp3, CONVERT_TO_SHORTPTR(second_pred), W, H, \
+ temp2, W); \
+ \
+ return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ ref_ptr, ref_stride, sse); \
+ } \
+ \
+ uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
+ \
+ highbd_var_filter_block2d_bil_first_pass( \
+ src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
+ highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_highbd_comp_avg_pred_c(temp3, CONVERT_TO_SHORTPTR(second_pred), W, H, \
+ temp2, W); \
+ \
+ return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ ref_ptr, ref_stride, sse); \
+ } \
+ \
+ uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, \
+ const uint8_t *second_pred) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint16_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
+ \
+ highbd_var_filter_block2d_bil_first_pass( \
+ src_ptr, fdata3, src_stride, 1, H + 1, W, bilinear_filters[x_offset]); \
+ highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters[y_offset]); \
+ \
+ vpx_highbd_comp_avg_pred_c(temp3, CONVERT_TO_SHORTPTR(second_pred), W, H, \
+ temp2, W); \
+ \
+ return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
+ ref_ptr, ref_stride, sse); \
}
/* All three forms of the variance are available in the same sizes. */
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.h
index b67e9297e1d..6d0e1b8a6b3 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/variance.h
@@ -22,37 +22,38 @@ extern "C" {
#define FILTER_BITS 7
#define FILTER_WEIGHT 128
-typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *a, int a_stride,
- const uint8_t *b_ptr, int b_stride);
+typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride);
-typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride,
- const uint8_t *b_ptr, int b_stride,
+typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
const uint8_t *second_pred);
-typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b,
- int b_stride, int n);
+typedef void (*vp8_copy32xn_fn_t)(const uint8_t *src_ptr, int src_stride,
+ uint8_t *ref_ptr, int ref_stride, int n);
-typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
+typedef void (*vpx_sad_multi_fn_t)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sad_array);
-typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *a, int a_stride,
+typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *src_ptr, int src_stride,
const uint8_t *const b_array[],
- int b_stride, unsigned int *sad_array);
+ int ref_stride, unsigned int *sad_array);
-typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- unsigned int *sse);
+typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, unsigned int *sse);
-typedef unsigned int (*vpx_subpixvariance_fn_t)(const uint8_t *a, int a_stride,
- int xoffset, int yoffset,
- const uint8_t *b, int b_stride,
- unsigned int *sse);
+typedef unsigned int (*vpx_subpixvariance_fn_t)(
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse);
typedef unsigned int (*vpx_subp_avg_variance_fn_t)(
- const uint8_t *a_ptr, int a_stride, int xoffset, int yoffset,
- const uint8_t *b_ptr, int b_stride, unsigned int *sse,
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
const uint8_t *second_pred);
+
#if CONFIG_VP8
typedef struct variance_vtable {
vpx_sad_fn_t sdf;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp.mk b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp.mk
index 4b48f62a977..5ee3bfec729 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp.mk
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp.mk
@@ -69,6 +69,7 @@ DSP_SRCS-$(HAVE_MSA) += mips/deblock_msa.c
DSP_SRCS-$(HAVE_NEON) += arm/deblock_neon.c
DSP_SRCS-$(HAVE_SSE2) += x86/add_noise_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/deblock_sse2.asm
+DSP_SRCS-$(HAVE_SSE2) += x86/post_proc_sse2.c
DSP_SRCS-$(HAVE_VSX) += ppc/deblock_vsx.c
endif # CONFIG_POSTPROC
@@ -89,9 +90,12 @@ DSP_SRCS-yes += vpx_filter.h
DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/convolve.h
DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/vpx_asm_stubs.c
+
+DSP_SRCS-$(HAVE_SSE2) += x86/convolve_sse2.h
DSP_SRCS-$(HAVE_SSSE3) += x86/convolve_ssse3.h
DSP_SRCS-$(HAVE_AVX2) += x86/convolve_avx2.h
DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_8t_sse2.asm
+DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_4t_intrin_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_bilinear_sse2.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_bilinear_ssse3.asm
@@ -292,8 +296,8 @@ ifeq ($(CONFIG_VP9_ENCODER),yes)
DSP_SRCS-yes += quantize.c
DSP_SRCS-yes += quantize.h
-DSP_SRCS-$(HAVE_SSE2) += x86/quantize_x86.h
DSP_SRCS-$(HAVE_SSE2) += x86/quantize_sse2.c
+DSP_SRCS-$(HAVE_SSE2) += x86/quantize_sse2.h
DSP_SRCS-$(HAVE_SSSE3) += x86/quantize_ssse3.c
DSP_SRCS-$(HAVE_AVX) += x86/quantize_avx.c
DSP_SRCS-$(HAVE_NEON) += arm/quantize_neon.c
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl
index 2350bc6e870..182503810f5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -37,322 +37,322 @@ if ($opts{arch} eq "x86_64") {
# Intra prediction
#
-add_proto qw/void vpx_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d207_predictor_4x4 sse2/;
-add_proto qw/void vpx_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d45_predictor_4x4 neon sse2/;
-add_proto qw/void vpx_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d63_predictor_4x4 ssse3/;
-add_proto qw/void vpx_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_h_predictor_4x4 neon dspr2 msa sse2 vsx/;
-add_proto qw/void vpx_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d117_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d135_predictor_4x4 neon/;
-add_proto qw/void vpx_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d153_predictor_4x4 ssse3/;
-add_proto qw/void vpx_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_v_predictor_4x4 neon msa sse2/;
-add_proto qw/void vpx_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_tm_predictor_4x4 neon dspr2 msa sse2 vsx/;
-add_proto qw/void vpx_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_predictor_4x4 dspr2 msa neon sse2/;
-add_proto qw/void vpx_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_top_predictor_4x4 msa neon sse2/;
-add_proto qw/void vpx_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_left_predictor_4x4 msa neon sse2/;
-add_proto qw/void vpx_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_128_predictor_4x4 msa neon sse2/;
-add_proto qw/void vpx_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d207_predictor_8x8 ssse3/;
-add_proto qw/void vpx_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d45_predictor_8x8 neon sse2 vsx/;
-add_proto qw/void vpx_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d63_predictor_8x8 ssse3 vsx/;
-add_proto qw/void vpx_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_h_predictor_8x8 neon dspr2 msa sse2 vsx/;
-add_proto qw/void vpx_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d135_predictor_8x8 neon/;
-add_proto qw/void vpx_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d153_predictor_8x8 ssse3/;
-add_proto qw/void vpx_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_v_predictor_8x8 neon msa sse2/;
-add_proto qw/void vpx_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa sse2 vsx/;
-add_proto qw/void vpx_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_predictor_8x8 dspr2 neon msa sse2 vsx/;
-add_proto qw/void vpx_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_top_predictor_8x8 neon msa sse2/;
-add_proto qw/void vpx_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_left_predictor_8x8 neon msa sse2/;
-add_proto qw/void vpx_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_128_predictor_8x8 neon msa sse2/;
-add_proto qw/void vpx_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d207_predictor_16x16 ssse3/;
-add_proto qw/void vpx_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d45_predictor_16x16 neon ssse3 vsx/;
-add_proto qw/void vpx_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d63_predictor_16x16 ssse3 vsx/;
-add_proto qw/void vpx_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_h_predictor_16x16 neon dspr2 msa sse2 vsx/;
-add_proto qw/void vpx_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d135_predictor_16x16 neon/;
-add_proto qw/void vpx_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d153_predictor_16x16 ssse3/;
-add_proto qw/void vpx_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_v_predictor_16x16 neon msa sse2 vsx/;
-add_proto qw/void vpx_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_tm_predictor_16x16 neon msa sse2 vsx/;
-add_proto qw/void vpx_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_predictor_16x16 dspr2 neon msa sse2 vsx/;
-add_proto qw/void vpx_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_top_predictor_16x16 neon msa sse2 vsx/;
-add_proto qw/void vpx_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_left_predictor_16x16 neon msa sse2 vsx/;
-add_proto qw/void vpx_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_128_predictor_16x16 neon msa sse2 vsx/;
-add_proto qw/void vpx_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d207_predictor_32x32 ssse3/;
-add_proto qw/void vpx_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d45_predictor_32x32 neon ssse3 vsx/;
-add_proto qw/void vpx_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d63_predictor_32x32 ssse3 vsx/;
-add_proto qw/void vpx_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_h_predictor_32x32 neon msa sse2 vsx/;
-add_proto qw/void vpx_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
-add_proto qw/void vpx_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d135_predictor_32x32 neon/;
-add_proto qw/void vpx_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_d153_predictor_32x32 ssse3/;
-add_proto qw/void vpx_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_v_predictor_32x32 neon msa sse2 vsx/;
-add_proto qw/void vpx_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_tm_predictor_32x32 neon msa sse2 vsx/;
-add_proto qw/void vpx_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_predictor_32x32 msa neon sse2 vsx/;
-add_proto qw/void vpx_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_top_predictor_32x32 msa neon sse2 vsx/;
-add_proto qw/void vpx_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_left_predictor_32x32 msa neon sse2 vsx/;
-add_proto qw/void vpx_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
+add_proto qw/void vpx_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_128_predictor_32x32 msa neon sse2 vsx/;
# High bitdepth functions
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void vpx_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d207_predictor_4x4 sse2/;
- add_proto qw/void vpx_highbd_d45_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d45_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d45_predictor_4x4 neon ssse3/;
- add_proto qw/void vpx_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d63_predictor_4x4 sse2/;
- add_proto qw/void vpx_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_d117_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d117_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d117_predictor_4x4 sse2/;
- add_proto qw/void vpx_highbd_d135_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d135_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d135_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_d153_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d153_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d153_predictor_4x4 sse2/;
- add_proto qw/void vpx_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_v_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_tm_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_top_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_dc_left_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_left_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_left_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_dc_128_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_128_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_128_predictor_4x4 neon sse2/;
- add_proto qw/void vpx_highbd_d207_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d207_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d207_predictor_8x8 ssse3/;
- add_proto qw/void vpx_highbd_d45_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d45_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d45_predictor_8x8 neon ssse3/;
- add_proto qw/void vpx_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d63_predictor_8x8 ssse3/;
- add_proto qw/void vpx_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_d117_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d117_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d117_predictor_8x8 ssse3/;
- add_proto qw/void vpx_highbd_d135_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d135_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d135_predictor_8x8 neon ssse3/;
- add_proto qw/void vpx_highbd_d153_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d153_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d153_predictor_8x8 ssse3/;
- add_proto qw/void vpx_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_v_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_tm_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_top_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_dc_left_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_left_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_left_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_dc_128_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_128_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_128_predictor_8x8 neon sse2/;
- add_proto qw/void vpx_highbd_d207_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d207_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d207_predictor_16x16 ssse3/;
- add_proto qw/void vpx_highbd_d45_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d45_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d45_predictor_16x16 neon ssse3/;
- add_proto qw/void vpx_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d63_predictor_16x16 ssse3/;
- add_proto qw/void vpx_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_d117_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d117_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d117_predictor_16x16 ssse3/;
- add_proto qw/void vpx_highbd_d135_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d135_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d135_predictor_16x16 neon ssse3/;
- add_proto qw/void vpx_highbd_d153_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d153_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d153_predictor_16x16 ssse3/;
- add_proto qw/void vpx_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_v_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_tm_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_top_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_dc_left_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_left_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_left_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_dc_128_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_128_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_128_predictor_16x16 neon sse2/;
- add_proto qw/void vpx_highbd_d207_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d207_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d207_predictor_32x32 ssse3/;
- add_proto qw/void vpx_highbd_d45_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d45_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d45_predictor_32x32 neon ssse3/;
- add_proto qw/void vpx_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d63_predictor_32x32 ssse3/;
- add_proto qw/void vpx_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_32x32 neon sse2/;
- add_proto qw/void vpx_highbd_d117_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d117_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d117_predictor_32x32 ssse3/;
- add_proto qw/void vpx_highbd_d135_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d135_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d135_predictor_32x32 neon ssse3/;
- add_proto qw/void vpx_highbd_d153_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_d153_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_d153_predictor_32x32 ssse3/;
- add_proto qw/void vpx_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_v_predictor_32x32 neon sse2/;
- add_proto qw/void vpx_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_tm_predictor_32x32 neon sse2/;
- add_proto qw/void vpx_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_predictor_32x32 neon sse2/;
- add_proto qw/void vpx_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_top_predictor_32x32 neon sse2/;
- add_proto qw/void vpx_highbd_dc_left_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_left_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_left_predictor_32x32 neon sse2/;
- add_proto qw/void vpx_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ add_proto qw/void vpx_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_dc_128_predictor_32x32 neon sse2/;
} # CONFIG_VP9_HIGHBITDEPTH
@@ -400,28 +400,28 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
# Sub Pixel Filters
#
- add_proto qw/void vpx_highbd_convolve_copy/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve_copy/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve_copy sse2 avx2 neon/;
- add_proto qw/void vpx_highbd_convolve_avg/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve_avg/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve_avg sse2 avx2 neon/;
- add_proto qw/void vpx_highbd_convolve8/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve8/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve8 avx2 neon/, "$sse2_x86_64";
- add_proto qw/void vpx_highbd_convolve8_horiz/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve8_horiz/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve8_horiz avx2 neon/, "$sse2_x86_64";
- add_proto qw/void vpx_highbd_convolve8_vert/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve8_vert/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve8_vert avx2 neon/, "$sse2_x86_64";
- add_proto qw/void vpx_highbd_convolve8_avg/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve8_avg/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve8_avg avx2 neon/, "$sse2_x86_64";
- add_proto qw/void vpx_highbd_convolve8_avg_horiz/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve8_avg_horiz/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve8_avg_horiz avx2 neon/, "$sse2_x86_64";
- add_proto qw/void vpx_highbd_convolve8_avg_vert/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bps";
+ add_proto qw/void vpx_highbd_convolve8_avg_vert/, "const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd";
specialize qw/vpx_highbd_convolve8_avg_vert avx2 neon/, "$sse2_x86_64";
} # CONFIG_VP9_HIGHBITDEPTH
@@ -785,6 +785,15 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") {
add_proto qw/void vpx_hadamard_32x32/, "const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff";
specialize qw/vpx_hadamard_32x32 sse2 avx2/;
+ add_proto qw/void vpx_highbd_hadamard_8x8/, "const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff";
+ specialize qw/vpx_highbd_hadamard_8x8/;
+
+ add_proto qw/void vpx_highbd_hadamard_16x16/, "const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff";
+ specialize qw/vpx_highbd_hadamard_16x16/;
+
+ add_proto qw/void vpx_highbd_hadamard_32x32/, "const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff";
+ specialize qw/vpx_highbd_hadamard_32x32/;
+
add_proto qw/int vpx_satd/, "const tran_low_t *coeff, int length";
specialize qw/vpx_satd avx2 sse2 neon/;
} else {
@@ -888,43 +897,43 @@ specialize qw/vpx_sad4x4x8 sse4_1 msa mmi/;
#
# Multi-block SAD, comparing a reference to N independent blocks
#
-add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad64x64x4d avx512 avx2 neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad64x32x4d neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad32x64x4d neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad32x32x4d avx2 neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad32x16x4d neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad16x32x4d neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad16x16x4d neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad16x8x4d neon msa sse2 vsx mmi/;
-add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad8x16x4d neon msa sse2 mmi/;
-add_proto qw/void vpx_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad8x8x4d neon msa sse2 mmi/;
-add_proto qw/void vpx_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad8x4x4d neon msa sse2 mmi/;
-add_proto qw/void vpx_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad4x8x4d neon msa sse2 mmi/;
-add_proto qw/void vpx_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
+add_proto qw/void vpx_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_sad4x4x4d neon msa sse2 mmi/;
add_proto qw/uint64_t vpx_sum_squares_2d_i16/, "const int16_t *src, int stride, int size";
@@ -945,7 +954,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
# Block subtraction
#
- add_proto qw/void vpx_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
+ add_proto qw/void vpx_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src8_ptr, ptrdiff_t src_stride, const uint8_t *pred8_ptr, ptrdiff_t pred_stride, int bd";
#
# Single block SAD
@@ -990,13 +999,13 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
# Avg
#
- add_proto qw/unsigned int vpx_highbd_avg_8x8/, "const uint8_t *, int p";
+ add_proto qw/unsigned int vpx_highbd_avg_8x8/, "const uint8_t *s8, int p";
specialize qw/vpx_highbd_avg_8x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_avg_4x4/, "const uint8_t *, int p";
+ add_proto qw/unsigned int vpx_highbd_avg_4x4/, "const uint8_t *s8, int p";
specialize qw/vpx_highbd_avg_4x4 sse2/;
- add_proto qw/void vpx_highbd_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
+ add_proto qw/void vpx_highbd_minmax_8x8/, "const uint8_t *s8, int p, const uint8_t *d8, int dp, int *min, int *max";
add_proto qw/unsigned int vpx_highbd_sad64x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
specialize qw/vpx_highbd_sad64x64_avg sse2/;
@@ -1038,43 +1047,43 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
# Multi-block SAD, comparing a reference to N independent blocks
#
- add_proto qw/void vpx_highbd_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad64x64x4d sse2/;
- add_proto qw/void vpx_highbd_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad64x32x4d sse2/;
- add_proto qw/void vpx_highbd_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad32x64x4d sse2/;
- add_proto qw/void vpx_highbd_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad32x32x4d sse2/;
- add_proto qw/void vpx_highbd_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad32x16x4d sse2/;
- add_proto qw/void vpx_highbd_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad16x32x4d sse2/;
- add_proto qw/void vpx_highbd_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad16x16x4d sse2/;
- add_proto qw/void vpx_highbd_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad16x8x4d sse2/;
- add_proto qw/void vpx_highbd_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad8x16x4d sse2/;
- add_proto qw/void vpx_highbd_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad8x8x4d sse2/;
- add_proto qw/void vpx_highbd_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad8x4x4d sse2/;
- add_proto qw/void vpx_highbd_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad4x8x4d sse2/;
- add_proto qw/void vpx_highbd_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array";
+ add_proto qw/void vpx_highbd_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_array[], int ref_stride, uint32_t *sad_array";
specialize qw/vpx_highbd_sad4x4x4d sse2/;
#
@@ -1091,70 +1100,70 @@ if (vpx_config("CONFIG_ENCODERS") eq "yes" || vpx_config("CONFIG_POSTPROC") eq "
#
# Variance
#
-add_proto qw/unsigned int vpx_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance64x64 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance64x32 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance32x64 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance32x32 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance32x16 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance16x32 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance16x16 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance16x8 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance8x16 sse2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance8x8 sse2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance8x4 sse2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance4x8 sse2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_variance4x4 sse2 neon msa mmi vsx/;
#
# Specialty Variance
#
-add_proto qw/void vpx_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+add_proto qw/void vpx_get16x16var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
specialize qw/vpx_get16x16var sse2 avx2 neon msa vsx/;
-add_proto qw/void vpx_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+add_proto qw/void vpx_get8x8var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
specialize qw/vpx_get8x8var sse2 neon msa vsx/;
-add_proto qw/unsigned int vpx_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_mse16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_mse16x16 sse2 avx2 neon msa mmi vsx/;
-add_proto qw/unsigned int vpx_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_mse16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_mse16x8 sse2 avx2 msa mmi vsx/;
-add_proto qw/unsigned int vpx_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_mse8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_mse8x16 sse2 msa mmi vsx/;
-add_proto qw/unsigned int vpx_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+add_proto qw/unsigned int vpx_mse8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_mse8x8 sse2 msa mmi vsx/;
add_proto qw/unsigned int vpx_get_mb_ss/, "const int16_t *";
specialize qw/vpx_get_mb_ss sse2 msa vsx/;
-add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride";
+add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride";
specialize qw/vpx_get4x4sse_cs neon msa vsx/;
add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
@@ -1163,218 +1172,218 @@ add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred,
#
# Subpixel Variance
#
-add_proto qw/uint32_t vpx_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance64x64 avx2 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance64x32 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance32x64 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance32x32 avx2 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance32x16 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance16x32 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance16x16 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance16x8 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance8x16 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance8x8 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance8x4 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance4x8 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+add_proto qw/uint32_t vpx_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance4x4 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance64x64 neon avx2 msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance64x32 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance32x64 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance32x32 neon avx2 msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance32x16 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance16x32 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance16x16 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance16x8 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance8x16 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance8x8 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance8x4 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance4x8 neon msa mmi sse2 ssse3/;
-add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance4x4 neon msa mmi sse2 ssse3/;
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
- add_proto qw/unsigned int vpx_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance64x64 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance64x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance32x64 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance32x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance32x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance16x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance16x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance16x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance8x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_variance8x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_variance4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance64x64 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance64x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance32x64 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance32x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance32x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance16x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance16x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance16x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance8x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_variance8x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_variance4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance64x64 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance64x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance32x64 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance32x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance32x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance16x32 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance16x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance16x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance8x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_variance8x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_variance4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- add_proto qw/void vpx_highbd_8_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_8_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void vpx_highbd_8_get16x16var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void vpx_highbd_8_get8x8var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_10_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_10_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void vpx_highbd_10_get16x16var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void vpx_highbd_10_get8x8var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_12_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/void vpx_highbd_12_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void vpx_highbd_12_get16x16var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
+ add_proto qw/void vpx_highbd_12_get8x8var/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
- add_proto qw/unsigned int vpx_highbd_8_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_mse16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_mse16x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_8_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_8_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_mse16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_mse8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_8_mse8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_8_mse8x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_mse16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_mse16x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_10_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_10_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_mse16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_mse8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_10_mse8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_10_mse8x8 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_mse16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_mse16x16 sse2/;
- add_proto qw/unsigned int vpx_highbd_12_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
- add_proto qw/unsigned int vpx_highbd_12_mse8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_mse16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_mse8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ add_proto qw/unsigned int vpx_highbd_12_mse8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vpx_highbd_12_mse8x8 sse2/;
add_proto qw/void vpx_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint16_t *pred, int width, int height, const uint16_t *ref, int ref_stride";
@@ -1382,221 +1391,221 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
# Subpixel Variance
#
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance64x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance64x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance32x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance32x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance32x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance16x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance16x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance16x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance8x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance8x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_12_sub_pixel_variance8x4 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance64x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance64x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance32x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance32x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance32x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance16x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance16x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance16x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance8x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance8x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_10_sub_pixel_variance8x4 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance64x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance64x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance32x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance32x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance32x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance16x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance16x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance16x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance8x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance8x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_highbd_8_sub_pixel_variance8x4 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x4 sse2/;
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x4 sse2/;
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x64 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x32 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x16 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x8 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x4 sse2/;
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
- add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
+ add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
} # CONFIG_VP9_HIGHBITDEPTH
@@ -1610,7 +1619,7 @@ if (vpx_config("CONFIG_POSTPROC") eq "yes" || vpx_config("CONFIG_VP9_POSTPROC")
add_proto qw/void vpx_mbpost_proc_down/, "unsigned char *dst, int pitch, int rows, int cols,int flimit";
specialize qw/vpx_mbpost_proc_down sse2 neon msa vsx/;
- add_proto qw/void vpx_mbpost_proc_across_ip/, "unsigned char *dst, int pitch, int rows, int cols,int flimit";
+ add_proto qw/void vpx_mbpost_proc_across_ip/, "unsigned char *src, int pitch, int rows, int cols,int flimit";
specialize qw/vpx_mbpost_proc_across_ip sse2 neon msa vsx/;
add_proto qw/void vpx_post_proc_down_and_across_mb_row/, "unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size";
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_filter.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_filter.h
index 05eb572651b..54357ee6cae 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_filter.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/vpx_filter.h
@@ -11,6 +11,7 @@
#ifndef VPX_VPX_DSP_VPX_FILTER_H_
#define VPX_VPX_DSP_VPX_FILTER_H_
+#include <assert.h>
#include "vpx/vpx_integer.h"
#ifdef __cplusplus
@@ -26,6 +27,14 @@ extern "C" {
typedef int16_t InterpKernel[SUBPEL_TAPS];
+static INLINE int vpx_get_filter_taps(const int16_t *const filter) {
+ assert(filter[3] != 128);
+ if (!filter[0] && !filter[1] && !filter[2])
+ return 2;
+ else
+ return 8;
+}
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/avg_pred_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/avg_pred_sse2.c
index e7db75559ac..e4e1e0e7a2c 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/avg_pred_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/avg_pred_sse2.c
@@ -15,10 +15,10 @@
#include "vpx/vpx_integer.h"
#include "vpx_dsp/x86/mem_sse2.h"
-void vpx_comp_avg_pred_sse2(uint8_t *comp, const uint8_t *pred, int width,
+void vpx_comp_avg_pred_sse2(uint8_t *comp_pred, const uint8_t *pred, int width,
int height, const uint8_t *ref, int ref_stride) {
- /* comp and pred must be 16 byte aligned. */
- assert(((intptr_t)comp & 0xf) == 0);
+ /* comp_pred and pred must be 16 byte aligned. */
+ assert(((intptr_t)comp_pred & 0xf) == 0);
assert(((intptr_t)pred & 0xf) == 0);
if (width > 8) {
int x, y;
@@ -27,17 +27,17 @@ void vpx_comp_avg_pred_sse2(uint8_t *comp, const uint8_t *pred, int width,
const __m128i p = _mm_load_si128((const __m128i *)(pred + x));
const __m128i r = _mm_loadu_si128((const __m128i *)(ref + x));
const __m128i avg = _mm_avg_epu8(p, r);
- _mm_store_si128((__m128i *)(comp + x), avg);
+ _mm_store_si128((__m128i *)(comp_pred + x), avg);
}
- comp += width;
+ comp_pred += width;
pred += width;
ref += ref_stride;
}
} else { // width must be 4 or 8.
int i;
- // Process 16 elements at a time. comp and pred have width == stride and
- // therefore live in contigious memory. 4*4, 4*8, 8*4, 8*8, and 8*16 are all
- // divisible by 16 so just ref needs to be massaged when loading.
+ // Process 16 elements at a time. comp_pred and pred have width == stride
+ // and therefore live in contigious memory. 4*4, 4*8, 8*4, 8*8, and 8*16 are
+ // all divisible by 16 so just ref needs to be massaged when loading.
for (i = 0; i < width * height; i += 16) {
const __m128i p = _mm_load_si128((const __m128i *)pred);
__m128i r;
@@ -60,10 +60,10 @@ void vpx_comp_avg_pred_sse2(uint8_t *comp, const uint8_t *pred, int width,
ref += 2 * ref_stride;
}
avg = _mm_avg_epu8(p, r);
- _mm_store_si128((__m128i *)comp, avg);
+ _mm_store_si128((__m128i *)comp_pred, avg);
pred += 16;
- comp += 16;
+ comp_pred += 16;
}
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h
index bb427c8a38f..8398ec3c116 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve.h
@@ -23,44 +23,59 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch,
#define FUN_CONV_1D(name, offset, step_q4, dir, src_start, avg, opt) \
void vpx_convolve8_##name##_##opt( \
const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
- ptrdiff_t dst_stride, const InterpKernel *filter_kernel, int x0_q4, \
+ ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, \
int x_step_q4, int y0_q4, int y_step_q4, int w, int h) { \
- const int16_t *filter = filter_kernel[offset]; \
+ const int16_t *filter_row = filter[offset]; \
(void)x0_q4; \
(void)x_step_q4; \
(void)y0_q4; \
(void)y_step_q4; \
- assert(filter[3] != 128); \
+ assert(filter_row[3] != 128); \
assert(step_q4 == 16); \
- if (filter[0] | filter[1] | filter[2]) { \
+ if (filter_row[0] | filter_row[1] | filter_row[6] | filter_row[7]) { \
while (w >= 16) { \
vpx_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \
- dst_stride, h, filter); \
+ dst_stride, h, filter_row); \
src += 16; \
dst += 16; \
w -= 16; \
} \
if (w == 8) { \
vpx_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst, \
- dst_stride, h, filter); \
+ dst_stride, h, filter_row); \
} else if (w == 4) { \
vpx_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst, \
- dst_stride, h, filter); \
+ dst_stride, h, filter_row); \
+ } \
+ } else if (filter_row[2] | filter_row[5]) { \
+ while (w >= 16) { \
+ vpx_filter_block1d16_##dir##4_##avg##opt(src_start, src_stride, dst, \
+ dst_stride, h, filter_row); \
+ src += 16; \
+ dst += 16; \
+ w -= 16; \
+ } \
+ if (w == 8) { \
+ vpx_filter_block1d8_##dir##4_##avg##opt(src_start, src_stride, dst, \
+ dst_stride, h, filter_row); \
+ } else if (w == 4) { \
+ vpx_filter_block1d4_##dir##4_##avg##opt(src_start, src_stride, dst, \
+ dst_stride, h, filter_row); \
} \
} else { \
while (w >= 16) { \
vpx_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst, \
- dst_stride, h, filter); \
+ dst_stride, h, filter_row); \
src += 16; \
dst += 16; \
w -= 16; \
} \
if (w == 8) { \
vpx_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst, \
- dst_stride, h, filter); \
+ dst_stride, h, filter_row); \
} else if (w == 4) { \
vpx_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst, \
- dst_stride, h, filter); \
+ dst_stride, h, filter_row); \
} \
} \
}
@@ -106,64 +121,86 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
unsigned int output_height,
const int16_t *filter, int bd);
-#define HIGH_FUN_CONV_1D(name, offset, step_q4, dir, src_start, avg, opt) \
- void vpx_highbd_convolve8_##name##_##opt( \
- const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, \
- ptrdiff_t dst_stride, const InterpKernel *filter_kernel, int x0_q4, \
- int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd) { \
- const int16_t *filter = filter_kernel[offset]; \
- if (step_q4 == 16 && filter[3] != 128) { \
- if (filter[0] | filter[1] | filter[2]) { \
- while (w >= 16) { \
- vpx_highbd_filter_block1d16_##dir##8_##avg##opt( \
- src_start, src_stride, dst, dst_stride, h, filter, bd); \
- src += 16; \
- dst += 16; \
- w -= 16; \
- } \
- while (w >= 8) { \
- vpx_highbd_filter_block1d8_##dir##8_##avg##opt( \
- src_start, src_stride, dst, dst_stride, h, filter, bd); \
- src += 8; \
- dst += 8; \
- w -= 8; \
- } \
- while (w >= 4) { \
- vpx_highbd_filter_block1d4_##dir##8_##avg##opt( \
- src_start, src_stride, dst, dst_stride, h, filter, bd); \
- src += 4; \
- dst += 4; \
- w -= 4; \
- } \
- } else { \
- while (w >= 16) { \
- vpx_highbd_filter_block1d16_##dir##2_##avg##opt( \
- src, src_stride, dst, dst_stride, h, filter, bd); \
- src += 16; \
- dst += 16; \
- w -= 16; \
- } \
- while (w >= 8) { \
- vpx_highbd_filter_block1d8_##dir##2_##avg##opt( \
- src, src_stride, dst, dst_stride, h, filter, bd); \
- src += 8; \
- dst += 8; \
- w -= 8; \
- } \
- while (w >= 4) { \
- vpx_highbd_filter_block1d4_##dir##2_##avg##opt( \
- src, src_stride, dst, dst_stride, h, filter, bd); \
- src += 4; \
- dst += 4; \
- w -= 4; \
- } \
- } \
- } \
- if (w) { \
- vpx_highbd_convolve8_##name##_c(src, src_stride, dst, dst_stride, \
- filter_kernel, x0_q4, x_step_q4, y0_q4, \
- y_step_q4, w, h, bd); \
- } \
+#define HIGH_FUN_CONV_1D(name, offset, step_q4, dir, src_start, avg, opt) \
+ void vpx_highbd_convolve8_##name##_##opt( \
+ const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst, \
+ ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, \
+ int x_step_q4, int y0_q4, int y_step_q4, int w, int h, int bd) { \
+ const int16_t *filter_row = filter[offset]; \
+ if (step_q4 == 16 && filter_row[3] != 128) { \
+ if (filter_row[0] | filter_row[1] | filter_row[6] | filter_row[7]) { \
+ while (w >= 16) { \
+ vpx_highbd_filter_block1d16_##dir##8_##avg##opt( \
+ src_start, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 16; \
+ dst += 16; \
+ w -= 16; \
+ } \
+ while (w >= 8) { \
+ vpx_highbd_filter_block1d8_##dir##8_##avg##opt( \
+ src_start, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 8; \
+ dst += 8; \
+ w -= 8; \
+ } \
+ while (w >= 4) { \
+ vpx_highbd_filter_block1d4_##dir##8_##avg##opt( \
+ src_start, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 4; \
+ dst += 4; \
+ w -= 4; \
+ } \
+ } else if (filter_row[2] | filter_row[5]) { \
+ while (w >= 16) { \
+ vpx_highbd_filter_block1d16_##dir##4_##avg##opt( \
+ src_start, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 16; \
+ dst += 16; \
+ w -= 16; \
+ } \
+ while (w >= 8) { \
+ vpx_highbd_filter_block1d8_##dir##4_##avg##opt( \
+ src_start, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 8; \
+ dst += 8; \
+ w -= 8; \
+ } \
+ while (w >= 4) { \
+ vpx_highbd_filter_block1d4_##dir##4_##avg##opt( \
+ src_start, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 4; \
+ dst += 4; \
+ w -= 4; \
+ } \
+ } else { \
+ while (w >= 16) { \
+ vpx_highbd_filter_block1d16_##dir##2_##avg##opt( \
+ src, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 16; \
+ dst += 16; \
+ w -= 16; \
+ } \
+ while (w >= 8) { \
+ vpx_highbd_filter_block1d8_##dir##2_##avg##opt( \
+ src, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 8; \
+ dst += 8; \
+ w -= 8; \
+ } \
+ while (w >= 4) { \
+ vpx_highbd_filter_block1d4_##dir##2_##avg##opt( \
+ src, src_stride, dst, dst_stride, h, filter_row, bd); \
+ src += 4; \
+ dst += 4; \
+ w -= 4; \
+ } \
+ } \
+ } \
+ if (w) { \
+ vpx_highbd_convolve8_##name##_c(src, src_stride, dst, dst_stride, \
+ filter, x0_q4, x_step_q4, y0_q4, \
+ y_step_q4, w, h, bd); \
+ } \
}
#define HIGH_FUN_CONV_2D(avg, opt) \
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h
index 343af9fd0d0..99bc9637fcb 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_avx2.h
@@ -100,6 +100,63 @@ static INLINE __m128i convolve8_8_avx2(const __m256i *const s,
return sum1;
}
+static INLINE __m256i mm256_loadu2_si128(const void *lo, const void *hi) {
+ const __m256i tmp =
+ _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)lo));
+ return _mm256_inserti128_si256(tmp, _mm_loadu_si128((const __m128i *)hi), 1);
+}
+
+static INLINE __m256i mm256_loadu2_epi64(const void *lo, const void *hi) {
+ const __m256i tmp =
+ _mm256_castsi128_si256(_mm_loadl_epi64((const __m128i *)lo));
+ return _mm256_inserti128_si256(tmp, _mm_loadl_epi64((const __m128i *)hi), 1);
+}
+
+static INLINE void mm256_store2_si128(__m128i *const dst_ptr_1,
+ __m128i *const dst_ptr_2,
+ const __m256i *const src) {
+ _mm_store_si128(dst_ptr_1, _mm256_castsi256_si128(*src));
+ _mm_store_si128(dst_ptr_2, _mm256_extractf128_si256(*src, 1));
+}
+
+static INLINE void mm256_storeu2_epi64(__m128i *const dst_ptr_1,
+ __m128i *const dst_ptr_2,
+ const __m256i *const src) {
+ _mm_storel_epi64(dst_ptr_1, _mm256_castsi256_si128(*src));
+ _mm_storel_epi64(dst_ptr_2, _mm256_extractf128_si256(*src, 1));
+}
+
+static INLINE void mm256_storeu2_epi32(__m128i *const dst_ptr_1,
+ __m128i *const dst_ptr_2,
+ const __m256i *const src) {
+ *((uint32_t *)(dst_ptr_1)) = _mm_cvtsi128_si32(_mm256_castsi256_si128(*src));
+ *((uint32_t *)(dst_ptr_2)) =
+ _mm_cvtsi128_si32(_mm256_extractf128_si256(*src, 1));
+}
+
+static INLINE __m256i mm256_round_epi32(const __m256i *const src,
+ const __m256i *const half_depth,
+ const int depth) {
+ const __m256i nearest_src = _mm256_add_epi32(*src, *half_depth);
+ return _mm256_srai_epi32(nearest_src, depth);
+}
+
+static INLINE __m256i mm256_round_epi16(const __m256i *const src,
+ const __m256i *const half_depth,
+ const int depth) {
+ const __m256i nearest_src = _mm256_adds_epi16(*src, *half_depth);
+ return _mm256_srai_epi16(nearest_src, depth);
+}
+
+static INLINE __m256i mm256_madd_add_epi32(const __m256i *const src_0,
+ const __m256i *const src_1,
+ const __m256i *const ker_0,
+ const __m256i *const ker_1) {
+ const __m256i tmp_0 = _mm256_madd_epi16(*src_0, *ker_0);
+ const __m256i tmp_1 = _mm256_madd_epi16(*src_1, *ker_1);
+ return _mm256_add_epi32(tmp_0, tmp_1);
+}
+
#undef MM256_BROADCASTSI128_SI256
#endif // VPX_VPX_DSP_X86_CONVOLVE_AVX2_H_
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_sse2.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_sse2.h
new file mode 100644
index 00000000000..84435463949
--- /dev/null
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/convolve_sse2.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_VPX_DSP_X86_CONVOLVE_SSE2_H_
+#define VPX_VPX_DSP_X86_CONVOLVE_SSE2_H_
+
+#include <emmintrin.h> // SSE2
+
+#include "./vpx_config.h"
+
+// Interprets the input register as 16-bit words 7 6 5 4 3 2 1 0, then returns
+// values at index 2 and 3 to return 3 2 3 2 3 2 3 2 as 16-bit words
+static INLINE __m128i extract_quarter_2_epi16_sse2(const __m128i *const reg) {
+ __m128i tmp = _mm_unpacklo_epi32(*reg, *reg);
+ return _mm_unpackhi_epi64(tmp, tmp);
+}
+
+// Interprets the input register as 16-bit words 7 6 5 4 3 2 1 0, then returns
+// values at index 2 and 3 to return 5 4 5 4 5 4 5 4 as 16-bit words.
+static INLINE __m128i extract_quarter_3_epi16_sse2(const __m128i *const reg) {
+ __m128i tmp = _mm_unpackhi_epi32(*reg, *reg);
+ return _mm_unpacklo_epi64(tmp, tmp);
+}
+
+// Interprets src as 8-bit words, zero extends to form 16-bit words, then
+// multiplies with ker and add the adjacent results to form 32-bit words.
+// Finally adds the result from 1 and 2 together.
+static INLINE __m128i mm_madd_add_epi8_sse2(const __m128i *const src_1,
+ const __m128i *const src_2,
+ const __m128i *const ker_1,
+ const __m128i *const ker_2) {
+ const __m128i src_1_half = _mm_unpacklo_epi8(*src_1, _mm_setzero_si128());
+ const __m128i src_2_half = _mm_unpacklo_epi8(*src_2, _mm_setzero_si128());
+ const __m128i madd_1 = _mm_madd_epi16(src_1_half, *ker_1);
+ const __m128i madd_2 = _mm_madd_epi16(src_2_half, *ker_2);
+ return _mm_add_epi32(madd_1, madd_2);
+}
+
+// Interprets src as 16-bit words, then multiplies with ker and add the
+// adjacent results to form 32-bit words. Finally adds the result from 1 and 2
+// together.
+static INLINE __m128i mm_madd_add_epi16_sse2(const __m128i *const src_1,
+ const __m128i *const src_2,
+ const __m128i *const ker_1,
+ const __m128i *const ker_2) {
+ const __m128i madd_1 = _mm_madd_epi16(*src_1, *ker_1);
+ const __m128i madd_2 = _mm_madd_epi16(*src_2, *ker_2);
+ return _mm_add_epi32(madd_1, madd_2);
+}
+
+static INLINE __m128i mm_madd_packs_epi16_sse2(const __m128i *const src_0,
+ const __m128i *const src_1,
+ const __m128i *const ker) {
+ const __m128i madd_1 = _mm_madd_epi16(*src_0, *ker);
+ const __m128i madd_2 = _mm_madd_epi16(*src_1, *ker);
+ return _mm_packs_epi32(madd_1, madd_2);
+}
+
+// Interleaves src_1 and src_2
+static INLINE __m128i mm_zip_epi32_sse2(const __m128i *const src_1,
+ const __m128i *const src_2) {
+ const __m128i tmp_1 = _mm_unpacklo_epi32(*src_1, *src_2);
+ const __m128i tmp_2 = _mm_unpackhi_epi32(*src_1, *src_2);
+ return _mm_packs_epi32(tmp_1, tmp_2);
+}
+
+static INLINE __m128i mm_round_epi32_sse2(const __m128i *const src,
+ const __m128i *const half_depth,
+ const int depth) {
+ const __m128i nearest_src = _mm_add_epi32(*src, *half_depth);
+ return _mm_srai_epi32(nearest_src, depth);
+}
+
+static INLINE __m128i mm_round_epi16_sse2(const __m128i *const src,
+ const __m128i *const half_depth,
+ const int depth) {
+ const __m128i nearest_src = _mm_adds_epi16(*src, *half_depth);
+ return _mm_srai_epi16(nearest_src, depth);
+}
+
+#endif // VPX_VPX_DSP_X86_CONVOLVE_SSE2_H_
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/deblock_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/deblock_sse2.asm
index 97cb43b6711..9d8e5e3e090 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/deblock_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/deblock_sse2.asm
@@ -232,237 +232,6 @@ sym(vpx_post_proc_down_and_across_mb_row_sse2):
ret
%undef flimit
-;void vpx_mbpost_proc_down_sse2(unsigned char *dst,
-; int pitch, int rows, int cols,int flimit)
-extern sym(vpx_rv)
-global sym(vpx_mbpost_proc_down_sse2) PRIVATE
-sym(vpx_mbpost_proc_down_sse2):
- push rbp
- mov rbp, rsp
- SHADOW_ARGS_TO_STACK 5
- SAVE_XMM 7
- GET_GOT rbx
- push rsi
- push rdi
- ; end prolog
-
- ALIGN_STACK 16, rax
- sub rsp, 128+16
-
- ; unsigned char d[16][8] at [rsp]
- ; create flimit2 at [rsp+128]
- mov eax, dword ptr arg(4) ;flimit
- mov [rsp+128], eax
- mov [rsp+128+4], eax
- mov [rsp+128+8], eax
- mov [rsp+128+12], eax
-%define flimit4 [rsp+128]
-
-%if ABI_IS_32BIT=0
- lea r8, [GLOBAL(sym(vpx_rv))]
-%endif
-
- ;rows +=8;
- add dword arg(2), 8
-
- ;for(c=0; c<cols; c+=8)
-.loop_col:
- mov rsi, arg(0) ; s
- pxor xmm0, xmm0 ;
-
- movsxd rax, dword ptr arg(1) ;pitch ;
-
- ; this copies the last row down into the border 8 rows
- mov rdi, rsi
- mov rdx, arg(2)
- sub rdx, 9
- imul rdx, rax
- lea rdi, [rdi+rdx]
- movq xmm1, QWORD ptr[rdi] ; first row
- mov rcx, 8
-.init_borderd: ; initialize borders
- lea rdi, [rdi + rax]
- movq [rdi], xmm1
-
- dec rcx
- jne .init_borderd
-
- neg rax ; rax = -pitch
-
- ; this copies the first row up into the border 8 rows
- mov rdi, rsi
- movq xmm1, QWORD ptr[rdi] ; first row
- mov rcx, 8
-.init_border: ; initialize borders
- lea rdi, [rdi + rax]
- movq [rdi], xmm1
-
- dec rcx
- jne .init_border
-
-
-
- lea rsi, [rsi + rax*8]; ; rdi = s[-pitch*8]
- neg rax
-
- pxor xmm5, xmm5
- pxor xmm6, xmm6 ;
-
- pxor xmm7, xmm7 ;
- mov rdi, rsi
-
- mov rcx, 15 ;
-
-.loop_initvar:
- movq xmm1, QWORD PTR [rdi];
- punpcklbw xmm1, xmm0 ;
-
- paddw xmm5, xmm1 ;
- pmullw xmm1, xmm1 ;
-
- movdqa xmm2, xmm1 ;
- punpcklwd xmm1, xmm0 ;
-
- punpckhwd xmm2, xmm0 ;
- paddd xmm6, xmm1 ;
-
- paddd xmm7, xmm2 ;
- lea rdi, [rdi+rax] ;
-
- dec rcx
- jne .loop_initvar
- ;save the var and sum
- xor rdx, rdx
-.loop_row:
- movq xmm1, QWORD PTR [rsi] ; [s-pitch*8]
- movq xmm2, QWORD PTR [rdi] ; [s+pitch*7]
-
- punpcklbw xmm1, xmm0
- punpcklbw xmm2, xmm0
-
- paddw xmm5, xmm2
- psubw xmm5, xmm1
-
- pmullw xmm2, xmm2
- movdqa xmm4, xmm2
-
- punpcklwd xmm2, xmm0
- punpckhwd xmm4, xmm0
-
- paddd xmm6, xmm2
- paddd xmm7, xmm4
-
- pmullw xmm1, xmm1
- movdqa xmm2, xmm1
-
- punpcklwd xmm1, xmm0
- psubd xmm6, xmm1
-
- punpckhwd xmm2, xmm0
- psubd xmm7, xmm2
-
-
- movdqa xmm3, xmm6
- pslld xmm3, 4
-
- psubd xmm3, xmm6
- movdqa xmm1, xmm5
-
- movdqa xmm4, xmm5
- pmullw xmm1, xmm1
-
- pmulhw xmm4, xmm4
- movdqa xmm2, xmm1
-
- punpcklwd xmm1, xmm4
- punpckhwd xmm2, xmm4
-
- movdqa xmm4, xmm7
- pslld xmm4, 4
-
- psubd xmm4, xmm7
-
- psubd xmm3, xmm1
- psubd xmm4, xmm2
-
- psubd xmm3, flimit4
- psubd xmm4, flimit4
-
- psrad xmm3, 31
- psrad xmm4, 31
-
- packssdw xmm3, xmm4
- packsswb xmm3, xmm0
-
- movq xmm1, QWORD PTR [rsi+rax*8]
-
- movq xmm2, xmm1
- punpcklbw xmm1, xmm0
-
- paddw xmm1, xmm5
- mov rcx, rdx
-
- and rcx, 127
-%if ABI_IS_32BIT=1 && CONFIG_PIC=1
- push rax
- lea rax, [GLOBAL(sym(vpx_rv))]
- movdqu xmm4, [rax + rcx*2] ;vpx_rv[rcx*2]
- pop rax
-%elif ABI_IS_32BIT=0
- movdqu xmm4, [r8 + rcx*2] ;vpx_rv[rcx*2]
-%else
- movdqu xmm4, [sym(vpx_rv) + rcx*2]
-%endif
-
- paddw xmm1, xmm4
- ;paddw xmm1, eight8s
- psraw xmm1, 4
-
- packuswb xmm1, xmm0
- pand xmm1, xmm3
-
- pandn xmm3, xmm2
- por xmm1, xmm3
-
- and rcx, 15
- movq QWORD PTR [rsp + rcx*8], xmm1 ;d[rcx*8]
-
- cmp edx, 8
- jl .skip_assignment
-
- mov rcx, rdx
- sub rcx, 8
- and rcx, 15
- movq mm0, [rsp + rcx*8] ;d[rcx*8]
- movq [rsi], mm0
-
-.skip_assignment:
- lea rsi, [rsi+rax]
-
- lea rdi, [rdi+rax]
- add rdx, 1
-
- cmp edx, dword arg(2) ;rows
- jl .loop_row
-
- add dword arg(0), 8 ; s += 8
- sub dword arg(3), 8 ; cols -= 8
- cmp dword arg(3), 0
- jg .loop_col
-
- add rsp, 128+16
- pop rsp
-
- ; begin epilog
- pop rdi
- pop rsi
- RESTORE_GOT
- RESTORE_XMM
- UNSHADOW_ARGS
- pop rbp
- ret
-%undef flimit4
-
;void vpx_mbpost_proc_across_ip_sse2(unsigned char *src,
; int pitch, int rows, int cols,int flimit)
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_convolve_avx2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_convolve_avx2.c
index ef94522a3b5..0ffa7f2d412 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_convolve_avx2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_convolve_avx2.c
@@ -9,9 +9,9 @@
*/
#include <immintrin.h>
-
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/convolve.h"
+#include "vpx_dsp/x86/convolve_avx2.h"
// -----------------------------------------------------------------------------
// Copy and average
@@ -20,7 +20,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
uint16_t *dst, ptrdiff_t dst_stride,
const InterpKernel *filter, int x0_q4,
int x_step_q4, int y0_q4, int y_step_q4,
- int width, int h, int bd) {
+ int w, int h, int bd) {
(void)filter;
(void)x0_q4;
(void)x_step_q4;
@@ -28,8 +28,8 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
(void)y_step_q4;
(void)bd;
- assert(width % 4 == 0);
- if (width > 32) { // width = 64
+ assert(w % 4 == 0);
+ if (w > 32) { // w = 64
do {
const __m256i p0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i p1 = _mm256_loadu_si256((const __m256i *)(src + 16));
@@ -43,7 +43,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride;
h--;
} while (h > 0);
- } else if (width > 16) { // width = 32
+ } else if (w > 16) { // w = 32
do {
const __m256i p0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i p1 = _mm256_loadu_si256((const __m256i *)(src + 16));
@@ -53,7 +53,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride;
h--;
} while (h > 0);
- } else if (width > 8) { // width = 16
+ } else if (w > 8) { // w = 16
__m256i p0, p1;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
@@ -67,7 +67,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride;
h -= 2;
} while (h > 0);
- } else if (width > 4) { // width = 8
+ } else if (w > 4) { // w = 8
__m128i p0, p1;
do {
p0 = _mm_loadu_si128((const __m128i *)src);
@@ -81,7 +81,7 @@ void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride;
h -= 2;
} while (h > 0);
- } else { // width = 4
+ } else { // w = 4
__m128i p0, p1;
do {
p0 = _mm_loadl_epi64((const __m128i *)src);
@@ -102,7 +102,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
uint16_t *dst, ptrdiff_t dst_stride,
const InterpKernel *filter, int x0_q4,
int x_step_q4, int y0_q4, int y_step_q4,
- int width, int h, int bd) {
+ int w, int h, int bd) {
(void)filter;
(void)x0_q4;
(void)x_step_q4;
@@ -110,8 +110,8 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
(void)y_step_q4;
(void)bd;
- assert(width % 4 == 0);
- if (width > 32) { // width = 64
+ assert(w % 4 == 0);
+ if (w > 32) { // w = 64
__m256i p0, p1, p2, p3, u0, u1, u2, u3;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
@@ -130,7 +130,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride;
h--;
} while (h > 0);
- } else if (width > 16) { // width = 32
+ } else if (w > 16) { // w = 32
__m256i p0, p1, u0, u1;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
@@ -143,7 +143,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride;
h--;
} while (h > 0);
- } else if (width > 8) { // width = 16
+ } else if (w > 8) { // w = 16
__m256i p0, p1, u0, u1;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
@@ -158,7 +158,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride << 1;
h -= 2;
} while (h > 0);
- } else if (width > 4) { // width = 8
+ } else if (w > 4) { // w = 8
__m128i p0, p1, u0, u1;
do {
p0 = _mm_loadu_si128((const __m128i *)src);
@@ -172,7 +172,7 @@ void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
dst += dst_stride << 1;
h -= 2;
} while (h > 0);
- } else { // width = 4
+ } else { // w = 4
__m128i p0, p1, u0, u1;
do {
p0 = _mm_loadl_epi64((const __m128i *)src);
@@ -209,6 +209,7 @@ static const uint8_t signal_pattern_2[32] = { 6, 7, 8, 9, 8, 9, 10, 11,
static const uint32_t signal_index[8] = { 2, 3, 4, 5, 2, 3, 4, 5 };
#define CONV8_ROUNDING_BITS (7)
+#define CONV8_ROUNDING_NUM (1 << (CONV8_ROUNDING_BITS - 1))
// -----------------------------------------------------------------------------
// Horizontal Filtering
@@ -923,6 +924,200 @@ static void vpx_highbd_filter_block1d16_h8_avg_avx2(
} while (height > 0);
}
+static void vpx_highbd_filter_block1d4_h4_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
+ // We extract the middle four elements of the kernel into two registers in
+ // the form
+ // ... k[3] k[2] k[3] k[2]
+ // ... k[5] k[4] k[5] k[4]
+ // Then we shuffle the source into
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Calling multiply and add gives us half of the sum. Calling add on the two
+ // halves gives us the output. Since avx2 allows us to use 256-bit buffer, we
+ // can do this two rows at a time.
+
+ __m256i src_reg, src_reg_shift_0, src_reg_shift_2;
+ __m256i res_reg;
+ __m256i idx_shift_0 =
+ _mm256_setr_epi8(0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9, 0, 1, 2,
+ 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9);
+ __m256i idx_shift_2 =
+ _mm256_setr_epi8(4, 5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13, 4,
+ 5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13);
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg, kernel_reg_23,
+ kernel_reg_45; // Segments of the kernel used
+ const __m256i reg_round =
+ _mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
+ const ptrdiff_t unrolled_src_stride = src_stride << 1;
+ const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
+ int h;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
+ kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
+
+ for (h = height; h >= 2; h -= 2) {
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr, src_ptr + src_stride);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Get the output
+ res_reg = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Round the result
+ res_reg = mm256_round_epi32(&res_reg, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Finally combine to get the final dst
+ res_reg = _mm256_packus_epi32(res_reg, res_reg);
+ res_reg = _mm256_min_epi16(res_reg, reg_max);
+ mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ src_ptr += unrolled_src_stride;
+ dst_ptr += unrolled_dst_stride;
+ }
+
+ // Repeat for the last row if needed
+ if (h > 0) {
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr, src_ptr + 4);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Get the output
+ res_reg = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Round the result
+ res_reg = mm256_round_epi32(&res_reg, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Finally combine to get the final dst
+ res_reg = _mm256_packus_epi32(res_reg, res_reg);
+ res_reg = _mm256_min_epi16(res_reg, reg_max);
+ _mm_storel_epi64((__m128i *)dst_ptr, _mm256_castsi256_si128(res_reg));
+ }
+}
+
+void vpx_highbd_filter_block1d8_h4_avx2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will extract the middle four elements of the kernel into two registers
+ // in the form
+ // ... k[3] k[2] k[3] k[2]
+ // ... k[5] k[4] k[5] k[4]
+ // Then we shuffle the source into
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Calling multiply and add gives us half of the sum of the first half.
+ // Calling add gives us first half of the output. Repat again to get the whole
+ // output. Since avx2 allows us to use 256-bit buffer, we can do this two rows
+ // at a time.
+
+ __m256i src_reg, src_reg_shift_0, src_reg_shift_2;
+ __m256i res_reg, res_first, res_last;
+ __m256i idx_shift_0 =
+ _mm256_setr_epi8(0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9, 0, 1, 2,
+ 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9);
+ __m256i idx_shift_2 =
+ _mm256_setr_epi8(4, 5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13, 4,
+ 5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13);
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg, kernel_reg_23,
+ kernel_reg_45; // Segments of the kernel used
+ const __m256i reg_round =
+ _mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
+ const ptrdiff_t unrolled_src_stride = src_stride << 1;
+ const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
+ int h;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
+ kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
+
+ for (h = height; h >= 2; h -= 2) {
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr, src_ptr + src_stride);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Result for first half
+ res_first = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Do again to get the second half of dst
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr + 4, src_ptr + src_stride + 4);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Result for second half
+ res_last = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Round each result
+ res_first = mm256_round_epi32(&res_first, &reg_round, CONV8_ROUNDING_BITS);
+ res_last = mm256_round_epi32(&res_last, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Finally combine to get the final dst
+ res_reg = _mm256_packus_epi32(res_first, res_last);
+ res_reg = _mm256_min_epi16(res_reg, reg_max);
+ mm256_store2_si128((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ src_ptr += unrolled_src_stride;
+ dst_ptr += unrolled_dst_stride;
+ }
+
+ // Repeat for the last row if needed
+ if (h > 0) {
+ src_reg = _mm256_loadu_si256((const __m256i *)src_ptr);
+ // Reorder into 2 1 1 2
+ src_reg = _mm256_permute4x64_epi64(src_reg, 0x94);
+
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ res_reg = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
+ &kernel_reg_23, &kernel_reg_45);
+
+ res_reg = mm256_round_epi32(&res_first, &reg_round, CONV8_ROUNDING_BITS);
+
+ res_reg = _mm256_packus_epi32(res_reg, res_reg);
+ res_reg = _mm256_permute4x64_epi64(res_reg, 0x8);
+
+ _mm_store_si128((__m128i *)dst_ptr, _mm256_castsi256_si128(res_reg));
+ }
+}
+
+static void vpx_highbd_filter_block1d16_h4_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
+ vpx_highbd_filter_block1d8_h4_avx2(src_ptr, src_stride, dst_ptr, dst_stride,
+ height, kernel, bd);
+ vpx_highbd_filter_block1d8_h4_avx2(src_ptr + 8, src_stride, dst_ptr + 8,
+ dst_stride, height, kernel, bd);
+}
+
static void vpx_highbd_filter_block1d8_v8_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
@@ -1058,39 +1253,239 @@ static void vpx_highbd_filter_block1d8_v2_avg_avx2(
} while (height > 0);
}
-void vpx_highbd_filter_block1d4_h8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
- ptrdiff_t, uint32_t, const int16_t *,
- int);
-void vpx_highbd_filter_block1d4_h2_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
- ptrdiff_t, uint32_t, const int16_t *,
- int);
-void vpx_highbd_filter_block1d4_v8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
- ptrdiff_t, uint32_t, const int16_t *,
- int);
-void vpx_highbd_filter_block1d4_v2_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
- ptrdiff_t, uint32_t, const int16_t *,
- int);
+void vpx_highbd_filter_block1d4_v4_avx2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will load two rows of pixels and rearrange them into the form
+ // ... s[1,0] s[0,0] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel partial output. Then
+ // we can call add with another row to get the output.
+
+ // Register for source s[-1:3, :]
+ __m256i src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
+ __m256i src_reg_m1001, src_reg_1223;
+
+ // Result after multiply and add
+ __m256i res_reg;
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg, kernel_reg_23, kernel_reg_45; // Segments of kernel used
+
+ const __m256i reg_round =
+ _mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
+ kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
+
+ // Row -1 to row 0
+ src_reg_m10 = mm256_loadu2_epi64((const __m128i *)src_ptr,
+ (const __m128i *)(src_ptr + src_stride));
+
+ // Row 0 to row 1
+ src_reg_1 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
+ src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
+
+ // First three rows
+ src_reg_m1001 = _mm256_unpacklo_epi16(src_reg_m10, src_reg_01);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm256_castsi128_si256(
+ _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3)));
+
+ src_reg_12 = _mm256_inserti128_si256(src_reg_1,
+ _mm256_castsi256_si128(src_reg_2), 1);
+
+ src_reg_3 = _mm256_castsi128_si256(
+ _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4)));
+
+ src_reg_23 = _mm256_inserti128_si256(src_reg_2,
+ _mm256_castsi256_si128(src_reg_3), 1);
+
+ // Last three rows
+ src_reg_1223 = _mm256_unpacklo_epi16(src_reg_12, src_reg_23);
+
+ // Output
+ res_reg = mm256_madd_add_epi32(&src_reg_m1001, &src_reg_1223,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Round the words
+ res_reg = mm256_round_epi32(&res_reg, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Combine to get the result
+ res_reg = _mm256_packus_epi32(res_reg, res_reg);
+ res_reg = _mm256_min_epi16(res_reg, reg_max);
+
+ // Save the result
+ mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m1001 = src_reg_1223;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_highbd_filter_block1d8_v4_avx2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will load two rows of pixels and rearrange them into the form
+ // ... s[1,0] s[0,0] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel partial output. Then
+ // we can call add with another row to get the output.
+
+ // Register for source s[-1:3, :]
+ __m256i src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
+ __m256i src_reg_m1001_lo, src_reg_m1001_hi, src_reg_1223_lo, src_reg_1223_hi;
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg, kernel_reg_23, kernel_reg_45; // Segments of kernel
+
+ // Result after multiply and add
+ __m256i res_reg, res_reg_lo, res_reg_hi;
+
+ const __m256i reg_round =
+ _mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
+ kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
+
+ // Row -1 to row 0
+ src_reg_m10 = mm256_loadu2_si128((const __m128i *)src_ptr,
+ (const __m128i *)(src_ptr + src_stride));
+
+ // Row 0 to row 1
+ src_reg_1 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
+ src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
+
+ // First three rows
+ src_reg_m1001_lo = _mm256_unpacklo_epi16(src_reg_m10, src_reg_01);
+ src_reg_m1001_hi = _mm256_unpackhi_epi16(src_reg_m10, src_reg_01);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3)));
+
+ src_reg_12 = _mm256_inserti128_si256(src_reg_1,
+ _mm256_castsi256_si128(src_reg_2), 1);
+
+ src_reg_3 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4)));
+
+ src_reg_23 = _mm256_inserti128_si256(src_reg_2,
+ _mm256_castsi256_si128(src_reg_3), 1);
+
+ // Last three rows
+ src_reg_1223_lo = _mm256_unpacklo_epi16(src_reg_12, src_reg_23);
+ src_reg_1223_hi = _mm256_unpackhi_epi16(src_reg_12, src_reg_23);
+
+ // Output from first half
+ res_reg_lo = mm256_madd_add_epi32(&src_reg_m1001_lo, &src_reg_1223_lo,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Output from second half
+ res_reg_hi = mm256_madd_add_epi32(&src_reg_m1001_hi, &src_reg_1223_hi,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Round the words
+ res_reg_lo =
+ mm256_round_epi32(&res_reg_lo, &reg_round, CONV8_ROUNDING_BITS);
+ res_reg_hi =
+ mm256_round_epi32(&res_reg_hi, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Combine to get the result
+ res_reg = _mm256_packus_epi32(res_reg_lo, res_reg_hi);
+ res_reg = _mm256_min_epi16(res_reg, reg_max);
+
+ // Save the result
+ mm256_store2_si128((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m1001_lo = src_reg_1223_lo;
+ src_reg_m1001_hi = src_reg_1223_hi;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_highbd_filter_block1d16_v4_avx2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride,
+ uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ vpx_highbd_filter_block1d8_v4_avx2(src_ptr, src_stride, dst_ptr, dst_stride,
+ height, kernel, bd);
+ vpx_highbd_filter_block1d8_v4_avx2(src_ptr + 8, src_stride, dst_ptr + 8,
+ dst_stride, height, kernel, bd);
+}
+
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_sse2;
+
#define vpx_highbd_filter_block1d4_h8_avx2 vpx_highbd_filter_block1d4_h8_sse2
#define vpx_highbd_filter_block1d4_h2_avx2 vpx_highbd_filter_block1d4_h2_sse2
#define vpx_highbd_filter_block1d4_v8_avx2 vpx_highbd_filter_block1d4_v8_sse2
#define vpx_highbd_filter_block1d4_v2_avx2 vpx_highbd_filter_block1d4_v2_sse2
+#define vpx_highbd_filter_block1d16_v4_avg_avx2 \
+ vpx_highbd_filter_block1d16_v8_avg_avx2
+#define vpx_highbd_filter_block1d16_h4_avg_avx2 \
+ vpx_highbd_filter_block1d16_h8_avg_avx2
+#define vpx_highbd_filter_block1d8_v4_avg_avx2 \
+ vpx_highbd_filter_block1d8_v8_avg_avx2
+#define vpx_highbd_filter_block1d8_h4_avg_avx2 \
+ vpx_highbd_filter_block1d8_h8_avg_avx2
+#define vpx_highbd_filter_block1d4_v4_avg_avx2 \
+ vpx_highbd_filter_block1d4_v8_avg_avx2
+#define vpx_highbd_filter_block1d4_h4_avg_avx2 \
+ vpx_highbd_filter_block1d4_h8_avg_avx2
+
HIGH_FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2);
HIGH_FUN_CONV_1D(vert, y0_q4, y_step_q4, v, src - src_stride * 3, , avx2);
HIGH_FUN_CONV_2D(, avx2);
-void vpx_highbd_filter_block1d4_h8_avg_sse2(const uint16_t *, ptrdiff_t,
- uint16_t *, ptrdiff_t, uint32_t,
- const int16_t *, int);
-void vpx_highbd_filter_block1d4_h2_avg_sse2(const uint16_t *, ptrdiff_t,
- uint16_t *, ptrdiff_t, uint32_t,
- const int16_t *, int);
-void vpx_highbd_filter_block1d4_v8_avg_sse2(const uint16_t *, ptrdiff_t,
- uint16_t *, ptrdiff_t, uint32_t,
- const int16_t *, int);
-void vpx_highbd_filter_block1d4_v2_avg_sse2(const uint16_t *, ptrdiff_t,
- uint16_t *, ptrdiff_t, uint32_t,
- const int16_t *, int);
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_avg_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_avg_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_avg_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_avg_sse2;
+
#define vpx_highbd_filter_block1d4_h8_avg_avx2 \
vpx_highbd_filter_block1d4_h8_avg_sse2
#define vpx_highbd_filter_block1d4_h2_avg_avx2 \
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_intrapred_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_intrapred_sse2.asm
index c61b62104f8..caf506ac07e 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_intrapred_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_intrapred_sse2.asm
@@ -256,7 +256,7 @@ cglobal highbd_v_predictor_32x32, 3, 4, 4, dst, stride, above
REP_RET
INIT_XMM sse2
-cglobal highbd_tm_predictor_4x4, 5, 5, 6, dst, stride, above, left, bps
+cglobal highbd_tm_predictor_4x4, 5, 5, 6, dst, stride, above, left, bd
movd m1, [aboveq-2]
movq m0, [aboveq]
pshuflw m1, m1, 0x0
@@ -264,7 +264,7 @@ cglobal highbd_tm_predictor_4x4, 5, 5, 6, dst, stride, above, left, bps
movlhps m1, m1 ; tl tl tl tl tl tl tl tl
; Get the values to compute the maximum value at this bit depth
pcmpeqw m3, m3
- movd m4, bpsd
+ movd m4, bdd
psubw m0, m1 ; t1-tl t2-tl t3-tl t4-tl
psllw m3, m4
pcmpeqw m2, m2
@@ -295,7 +295,7 @@ cglobal highbd_tm_predictor_4x4, 5, 5, 6, dst, stride, above, left, bps
RET
INIT_XMM sse2
-cglobal highbd_tm_predictor_8x8, 5, 6, 5, dst, stride, above, left, bps, one
+cglobal highbd_tm_predictor_8x8, 5, 6, 5, dst, stride, above, left, bd, one
movd m1, [aboveq-2]
mova m0, [aboveq]
pshuflw m1, m1, 0x0
@@ -304,7 +304,7 @@ cglobal highbd_tm_predictor_8x8, 5, 6, 5, dst, stride, above, left, bps, one
pxor m3, m3
pxor m4, m4
pinsrw m3, oned, 0
- pinsrw m4, bpsd, 0
+ pinsrw m4, bdd, 0
pshuflw m3, m3, 0x0
DEFINE_ARGS dst, stride, line, left
punpcklqdq m3, m3
@@ -339,14 +339,14 @@ cglobal highbd_tm_predictor_8x8, 5, 6, 5, dst, stride, above, left, bps, one
REP_RET
INIT_XMM sse2
-cglobal highbd_tm_predictor_16x16, 5, 5, 8, dst, stride, above, left, bps
+cglobal highbd_tm_predictor_16x16, 5, 5, 8, dst, stride, above, left, bd
movd m2, [aboveq-2]
mova m0, [aboveq]
mova m1, [aboveq+16]
pshuflw m2, m2, 0x0
; Get the values to compute the maximum value at this bit depth
pcmpeqw m3, m3
- movd m4, bpsd
+ movd m4, bdd
punpcklqdq m2, m2
psllw m3, m4
pcmpeqw m5, m5
@@ -386,7 +386,7 @@ cglobal highbd_tm_predictor_16x16, 5, 5, 8, dst, stride, above, left, bps
REP_RET
INIT_XMM sse2
-cglobal highbd_tm_predictor_32x32, 5, 5, 8, dst, stride, above, left, bps
+cglobal highbd_tm_predictor_32x32, 5, 5, 8, dst, stride, above, left, bd
movd m0, [aboveq-2]
mova m1, [aboveq]
mova m2, [aboveq+16]
@@ -395,7 +395,7 @@ cglobal highbd_tm_predictor_32x32, 5, 5, 8, dst, stride, above, left, bps
pshuflw m0, m0, 0x0
; Get the values to compute the maximum value at this bit depth
pcmpeqw m5, m5
- movd m6, bpsd
+ movd m6, bdd
psllw m5, m6
pcmpeqw m7, m7
pxor m6, m6 ; min possible value
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c
index ec22db9f4cd..f7fb40d5159 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c
@@ -47,13 +47,13 @@ static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) {
// TODO(debargha, peter): Break up large functions into smaller ones
// in this file.
-void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
- const uint8_t *_blimit,
- const uint8_t *_limit,
- const uint8_t *_thresh, int bd) {
+void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
const __m128i zero = _mm_set1_epi16(0);
const __m128i one = _mm_set1_epi16(1);
- __m128i blimit, limit, thresh;
+ __m128i blimit_v, limit_v, thresh_v;
__m128i q7, p7, q6, p6, q5, p5, q4, p4, q3, p3, q2, p2, q1, p1, q0, p0;
__m128i mask, hev, flat, flat2, abs_p1p0, abs_q1q0;
__m128i ps1, qs1, ps0, qs0;
@@ -70,35 +70,35 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
__m128i eight, four;
if (bd == 8) {
- blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero);
- limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero);
- thresh = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero);
+ blimit_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero);
+ limit_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero);
+ thresh_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero);
} else if (bd == 10) {
- blimit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2);
- limit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
- thresh = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
+ blimit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero), 2);
+ limit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero), 2);
+ thresh_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero), 2);
} else { // bd == 12
- blimit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4);
- limit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
- thresh = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
+ blimit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero), 4);
+ limit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero), 4);
+ thresh_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero), 4);
}
- q4 = _mm_load_si128((__m128i *)(s + 4 * p));
- p4 = _mm_load_si128((__m128i *)(s - 5 * p));
- q3 = _mm_load_si128((__m128i *)(s + 3 * p));
- p3 = _mm_load_si128((__m128i *)(s - 4 * p));
- q2 = _mm_load_si128((__m128i *)(s + 2 * p));
- p2 = _mm_load_si128((__m128i *)(s - 3 * p));
- q1 = _mm_load_si128((__m128i *)(s + 1 * p));
- p1 = _mm_load_si128((__m128i *)(s - 2 * p));
- q0 = _mm_load_si128((__m128i *)(s + 0 * p));
- p0 = _mm_load_si128((__m128i *)(s - 1 * p));
+ q4 = _mm_load_si128((__m128i *)(s + 4 * pitch));
+ p4 = _mm_load_si128((__m128i *)(s - 5 * pitch));
+ q3 = _mm_load_si128((__m128i *)(s + 3 * pitch));
+ p3 = _mm_load_si128((__m128i *)(s - 4 * pitch));
+ q2 = _mm_load_si128((__m128i *)(s + 2 * pitch));
+ p2 = _mm_load_si128((__m128i *)(s - 3 * pitch));
+ q1 = _mm_load_si128((__m128i *)(s + 1 * pitch));
+ p1 = _mm_load_si128((__m128i *)(s - 2 * pitch));
+ q0 = _mm_load_si128((__m128i *)(s + 0 * pitch));
+ p0 = _mm_load_si128((__m128i *)(s - 1 * pitch));
// highbd_filter_mask
abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1));
@@ -111,14 +111,14 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
// highbd_hev_mask (in C code this is actually called from highbd_filter4)
flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu16(flat, thresh);
+ hev = _mm_subs_epu16(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0); // abs(p0 - q0) * 2
abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); // abs(p1 - q1) / 2
- mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
- mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
+ mask = _mm_and_si128(mask, _mm_adds_epu16(limit_v, one));
work = _mm_max_epi16(
_mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)),
_mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)));
@@ -132,7 +132,7 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
_mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3)));
mask = _mm_max_epi16(work, mask);
- mask = _mm_subs_epu16(mask, limit);
+ mask = _mm_subs_epu16(mask, limit_v);
mask = _mm_cmpeq_epi16(mask, zero); // return ~mask
// lp filter
@@ -207,12 +207,12 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
// (because, in both vars, each block of 16 either all 1s or all 0s)
flat = _mm_and_si128(flat, mask);
- p5 = _mm_load_si128((__m128i *)(s - 6 * p));
- q5 = _mm_load_si128((__m128i *)(s + 5 * p));
- p6 = _mm_load_si128((__m128i *)(s - 7 * p));
- q6 = _mm_load_si128((__m128i *)(s + 6 * p));
- p7 = _mm_load_si128((__m128i *)(s - 8 * p));
- q7 = _mm_load_si128((__m128i *)(s + 7 * p));
+ p5 = _mm_load_si128((__m128i *)(s - 6 * pitch));
+ q5 = _mm_load_si128((__m128i *)(s + 5 * pitch));
+ p6 = _mm_load_si128((__m128i *)(s - 7 * pitch));
+ q6 = _mm_load_si128((__m128i *)(s + 6 * pitch));
+ p7 = _mm_load_si128((__m128i *)(s - 8 * pitch));
+ q7 = _mm_load_si128((__m128i *)(s + 7 * pitch));
// highbd_flat_mask5 (arguments passed in are p0, q0, p4-p7, q4-q7
// but referred to as p0-p4 & q0-q4 in fn)
@@ -389,8 +389,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
flat2_q6 = _mm_and_si128(flat2, flat2_q6);
// get values for when (flat2 && flat && mask)
q6 = _mm_or_si128(q6, flat2_q6); // full list of q6 values
- _mm_store_si128((__m128i *)(s - 7 * p), p6);
- _mm_store_si128((__m128i *)(s + 6 * p), q6);
+ _mm_store_si128((__m128i *)(s - 7 * pitch), p6);
+ _mm_store_si128((__m128i *)(s + 6 * pitch), q6);
p5 = _mm_andnot_si128(flat2, p5);
// p5 remains unchanged if !(flat2 && flat && mask)
@@ -404,8 +404,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
// get values for when (flat2 && flat && mask)
q5 = _mm_or_si128(q5, flat2_q5);
// full list of q5 values
- _mm_store_si128((__m128i *)(s - 6 * p), p5);
- _mm_store_si128((__m128i *)(s + 5 * p), q5);
+ _mm_store_si128((__m128i *)(s - 6 * pitch), p5);
+ _mm_store_si128((__m128i *)(s + 5 * pitch), q5);
p4 = _mm_andnot_si128(flat2, p4);
// p4 remains unchanged if !(flat2 && flat && mask)
@@ -417,8 +417,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
flat2_q4 = _mm_and_si128(flat2, flat2_q4);
// get values for when (flat2 && flat && mask)
q4 = _mm_or_si128(q4, flat2_q4); // full list of q4 values
- _mm_store_si128((__m128i *)(s - 5 * p), p4);
- _mm_store_si128((__m128i *)(s + 4 * p), q4);
+ _mm_store_si128((__m128i *)(s - 5 * pitch), p4);
+ _mm_store_si128((__m128i *)(s + 4 * pitch), q4);
p3 = _mm_andnot_si128(flat2, p3);
// p3 takes value from highbd_filter8 if !(flat2 && flat && mask)
@@ -430,8 +430,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
flat2_q3 = _mm_and_si128(flat2, flat2_q3);
// get values for when (flat2 && flat && mask)
q3 = _mm_or_si128(q3, flat2_q3); // full list of q3 values
- _mm_store_si128((__m128i *)(s - 4 * p), p3);
- _mm_store_si128((__m128i *)(s + 3 * p), q3);
+ _mm_store_si128((__m128i *)(s - 4 * pitch), p3);
+ _mm_store_si128((__m128i *)(s + 3 * pitch), q3);
p2 = _mm_andnot_si128(flat2, p2);
// p2 takes value from highbd_filter8 if !(flat2 && flat && mask)
@@ -444,8 +444,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
flat2_q2 = _mm_and_si128(flat2, flat2_q2);
// get values for when (flat2 && flat && mask)
q2 = _mm_or_si128(q2, flat2_q2); // full list of q2 values
- _mm_store_si128((__m128i *)(s - 3 * p), p2);
- _mm_store_si128((__m128i *)(s + 2 * p), q2);
+ _mm_store_si128((__m128i *)(s - 3 * pitch), p2);
+ _mm_store_si128((__m128i *)(s + 2 * pitch), q2);
p1 = _mm_andnot_si128(flat2, p1);
// p1 takes value from highbd_filter8 if !(flat2 && flat && mask)
@@ -457,8 +457,8 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
flat2_q1 = _mm_and_si128(flat2, flat2_q1);
// get values for when (flat2 && flat && mask)
q1 = _mm_or_si128(q1, flat2_q1); // full list of q1 values
- _mm_store_si128((__m128i *)(s - 2 * p), p1);
- _mm_store_si128((__m128i *)(s + 1 * p), q1);
+ _mm_store_si128((__m128i *)(s - 2 * pitch), p1);
+ _mm_store_si128((__m128i *)(s + 1 * pitch), q1);
p0 = _mm_andnot_si128(flat2, p0);
// p0 takes value from highbd_filter8 if !(flat2 && flat && mask)
@@ -470,22 +470,22 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
flat2_q0 = _mm_and_si128(flat2, flat2_q0);
// get values for when (flat2 && flat && mask)
q0 = _mm_or_si128(q0, flat2_q0); // full list of q0 values
- _mm_store_si128((__m128i *)(s - 1 * p), p0);
- _mm_store_si128((__m128i *)(s - 0 * p), q0);
+ _mm_store_si128((__m128i *)(s - 1 * pitch), p0);
+ _mm_store_si128((__m128i *)(s - 0 * pitch), q0);
}
-void vpx_highbd_lpf_horizontal_16_dual_sse2(uint16_t *s, int p,
- const uint8_t *_blimit,
- const uint8_t *_limit,
- const uint8_t *_thresh, int bd) {
- vpx_highbd_lpf_horizontal_16_sse2(s, p, _blimit, _limit, _thresh, bd);
- vpx_highbd_lpf_horizontal_16_sse2(s + 8, p, _blimit, _limit, _thresh, bd);
+void vpx_highbd_lpf_horizontal_16_dual_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
+ vpx_highbd_lpf_horizontal_16_sse2(s, pitch, blimit, limit, thresh, bd);
+ vpx_highbd_lpf_horizontal_16_sse2(s + 8, pitch, blimit, limit, thresh, bd);
}
-void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
- const uint8_t *_blimit,
- const uint8_t *_limit,
- const uint8_t *_thresh, int bd) {
+void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
DECLARE_ALIGNED(16, uint16_t, flat_op2[16]);
DECLARE_ALIGNED(16, uint16_t, flat_op1[16]);
DECLARE_ALIGNED(16, uint16_t, flat_op0[16]);
@@ -493,16 +493,16 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
DECLARE_ALIGNED(16, uint16_t, flat_oq1[16]);
DECLARE_ALIGNED(16, uint16_t, flat_oq0[16]);
const __m128i zero = _mm_set1_epi16(0);
- __m128i blimit, limit, thresh;
+ __m128i blimit_v, limit_v, thresh_v;
__m128i mask, hev, flat;
- __m128i p3 = _mm_load_si128((__m128i *)(s - 4 * p));
- __m128i q3 = _mm_load_si128((__m128i *)(s + 3 * p));
- __m128i p2 = _mm_load_si128((__m128i *)(s - 3 * p));
- __m128i q2 = _mm_load_si128((__m128i *)(s + 2 * p));
- __m128i p1 = _mm_load_si128((__m128i *)(s - 2 * p));
- __m128i q1 = _mm_load_si128((__m128i *)(s + 1 * p));
- __m128i p0 = _mm_load_si128((__m128i *)(s - 1 * p));
- __m128i q0 = _mm_load_si128((__m128i *)(s + 0 * p));
+ __m128i p3 = _mm_load_si128((__m128i *)(s - 4 * pitch));
+ __m128i q3 = _mm_load_si128((__m128i *)(s + 3 * pitch));
+ __m128i p2 = _mm_load_si128((__m128i *)(s - 3 * pitch));
+ __m128i q2 = _mm_load_si128((__m128i *)(s + 2 * pitch));
+ __m128i p1 = _mm_load_si128((__m128i *)(s - 2 * pitch));
+ __m128i q1 = _mm_load_si128((__m128i *)(s + 1 * pitch));
+ __m128i p0 = _mm_load_si128((__m128i *)(s - 1 * pitch));
+ __m128i q0 = _mm_load_si128((__m128i *)(s + 0 * pitch));
const __m128i one = _mm_set1_epi16(1);
const __m128i ffff = _mm_cmpeq_epi16(one, one);
__m128i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work;
@@ -519,25 +519,25 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
__m128i filter1, filter2;
if (bd == 8) {
- blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero);
- limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero);
- thresh = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero);
+ blimit_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero);
+ limit_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero);
+ thresh_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero);
t80 = _mm_set1_epi16(0x80);
} else if (bd == 10) {
- blimit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2);
- limit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
- thresh = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
+ blimit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero), 2);
+ limit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero), 2);
+ thresh_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero), 2);
t80 = _mm_set1_epi16(0x200);
} else { // bd == 12
- blimit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4);
- limit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
- thresh = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
+ blimit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero), 4);
+ limit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero), 4);
+ thresh_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero), 4);
t80 = _mm_set1_epi16(0x800);
}
@@ -553,16 +553,16 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0));
abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1));
flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu16(flat, thresh);
+ hev = _mm_subs_epu16(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
- mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
// So taking maximums continues to work:
- mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
+ mask = _mm_and_si128(mask, _mm_adds_epu16(limit_v, one));
mask = _mm_max_epi16(abs_p1p0, mask);
// mask |= (abs(p1 - p0) > limit) * -1;
mask = _mm_max_epi16(abs_q1q0, mask);
@@ -576,7 +576,7 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
_mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3)),
_mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3)));
mask = _mm_max_epi16(work, mask);
- mask = _mm_subs_epu16(mask, limit);
+ mask = _mm_subs_epu16(mask, limit_v);
mask = _mm_cmpeq_epi16(mask, zero);
// flat_mask4
@@ -674,7 +674,7 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
q1 = _mm_and_si128(flat, q1);
q1 = _mm_or_si128(work_a, q1);
- work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
+ work_a = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
q2 = _mm_load_si128((__m128i *)flat_oq2);
work_a = _mm_andnot_si128(flat, work_a);
q2 = _mm_and_si128(flat, q2);
@@ -694,43 +694,43 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
p1 = _mm_and_si128(flat, p1);
p1 = _mm_or_si128(work_a, p1);
- work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
+ work_a = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
p2 = _mm_load_si128((__m128i *)flat_op2);
work_a = _mm_andnot_si128(flat, work_a);
p2 = _mm_and_si128(flat, p2);
p2 = _mm_or_si128(work_a, p2);
- _mm_store_si128((__m128i *)(s - 3 * p), p2);
- _mm_store_si128((__m128i *)(s - 2 * p), p1);
- _mm_store_si128((__m128i *)(s - 1 * p), p0);
- _mm_store_si128((__m128i *)(s + 0 * p), q0);
- _mm_store_si128((__m128i *)(s + 1 * p), q1);
- _mm_store_si128((__m128i *)(s + 2 * p), q2);
+ _mm_store_si128((__m128i *)(s - 3 * pitch), p2);
+ _mm_store_si128((__m128i *)(s - 2 * pitch), p1);
+ _mm_store_si128((__m128i *)(s - 1 * pitch), p0);
+ _mm_store_si128((__m128i *)(s + 0 * pitch), q0);
+ _mm_store_si128((__m128i *)(s + 1 * pitch), q1);
+ _mm_store_si128((__m128i *)(s + 2 * pitch), q2);
}
void vpx_highbd_lpf_horizontal_8_dual_sse2(
- uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
- const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
- const uint8_t *_thresh1, int bd) {
- vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
- vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
+ uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
+ const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1, int bd) {
+ vpx_highbd_lpf_horizontal_8_sse2(s, pitch, blimit0, limit0, thresh0, bd);
+ vpx_highbd_lpf_horizontal_8_sse2(s + 8, pitch, blimit1, limit1, thresh1, bd);
}
-void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
- const uint8_t *_blimit,
- const uint8_t *_limit,
- const uint8_t *_thresh, int bd) {
+void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
const __m128i zero = _mm_set1_epi16(0);
- __m128i blimit, limit, thresh;
+ __m128i blimit_v, limit_v, thresh_v;
__m128i mask, hev, flat;
- __m128i p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
- __m128i p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
- __m128i p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
- __m128i p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
- __m128i q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
- __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
- __m128i q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
- __m128i q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
+ __m128i p3 = _mm_loadu_si128((__m128i *)(s - 4 * pitch));
+ __m128i p2 = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
+ __m128i p1 = _mm_loadu_si128((__m128i *)(s - 2 * pitch));
+ __m128i p0 = _mm_loadu_si128((__m128i *)(s - 1 * pitch));
+ __m128i q0 = _mm_loadu_si128((__m128i *)(s - 0 * pitch));
+ __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
+ __m128i q2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
+ __m128i q3 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
const __m128i abs_p1p0 =
_mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1));
const __m128i abs_q1q0 =
@@ -760,33 +760,33 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
__m128i filter1, filter2;
if (bd == 8) {
- blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero);
- limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero);
- thresh = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero);
+ blimit_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero);
+ limit_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero);
+ thresh_v = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero);
t80 = _mm_set1_epi16(0x80);
tff80 = _mm_set1_epi16(0xff80);
tffe0 = _mm_set1_epi16(0xffe0);
t1f = _mm_srli_epi16(_mm_set1_epi16(0x1fff), 8);
t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 8);
} else if (bd == 10) {
- blimit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2);
- limit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2);
- thresh = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2);
+ blimit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero), 2);
+ limit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero), 2);
+ thresh_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero), 2);
t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), 2);
tff80 = _mm_slli_epi16(_mm_set1_epi16(0xff80), 2);
tffe0 = _mm_slli_epi16(_mm_set1_epi16(0xffe0), 2);
t1f = _mm_srli_epi16(_mm_set1_epi16(0x1fff), 6);
t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 6);
} else { // bd == 12
- blimit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4);
- limit = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4);
- thresh = _mm_slli_epi16(
- _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4);
+ blimit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)blimit), zero), 4);
+ limit_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)limit), zero), 4);
+ thresh_v = _mm_slli_epi16(
+ _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)thresh), zero), 4);
t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), 4);
tff80 = _mm_slli_epi16(_mm_set1_epi16(0xff80), 4);
tffe0 = _mm_slli_epi16(_mm_set1_epi16(0xffe0), 4);
@@ -794,23 +794,23 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 4);
}
- ps1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80);
- ps0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80);
- qs0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80);
- qs1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80);
+ ps1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 2 * pitch)), t80);
+ ps0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 1 * pitch)), t80);
+ qs0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 0 * pitch)), t80);
+ qs1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 1 * pitch)), t80);
// filter_mask and hev_mask
flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu16(flat, thresh);
+ hev = _mm_subs_epu16(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
- mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
// So taking maximums continues to work:
- mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
+ mask = _mm_and_si128(mask, _mm_adds_epu16(limit_v, one));
mask = _mm_max_epi16(flat, mask);
// mask |= (abs(p1 - p0) > limit) * -1;
// mask |= (abs(q1 - q0) > limit) * -1;
@@ -822,7 +822,7 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
_mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2)),
_mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3)));
mask = _mm_max_epi16(work, mask);
- mask = _mm_subs_epu16(mask, limit);
+ mask = _mm_subs_epu16(mask, limit_v);
mask = _mm_cmpeq_epi16(mask, zero);
// filter4
@@ -872,18 +872,18 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
p1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd),
t80);
- _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
- _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
- _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
- _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
+ _mm_storeu_si128((__m128i *)(s - 2 * pitch), p1);
+ _mm_storeu_si128((__m128i *)(s - 1 * pitch), p0);
+ _mm_storeu_si128((__m128i *)(s + 0 * pitch), q0);
+ _mm_storeu_si128((__m128i *)(s + 1 * pitch), q1);
}
void vpx_highbd_lpf_horizontal_4_dual_sse2(
- uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
- const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
- const uint8_t *_thresh1, int bd) {
- vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, bd);
- vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
+ uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
+ const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1, int bd) {
+ vpx_highbd_lpf_horizontal_4_sse2(s, pitch, blimit0, limit0, thresh0, bd);
+ vpx_highbd_lpf_horizontal_4_sse2(s + 8, pitch, blimit1, limit1, thresh1, bd);
}
static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[],
@@ -998,9 +998,9 @@ static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, int in_p,
highbd_transpose(src1, in_p, dest1, out_p, 1);
}
-void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
- const uint8_t *limit, const uint8_t *thresh,
- int bd) {
+void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit, const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
uint16_t *src[1];
uint16_t *dst[1];
@@ -1009,7 +1009,7 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
src[0] = s - 4;
dst[0] = t_dst;
- highbd_transpose(src, p, dst, 8, 1);
+ highbd_transpose(src, pitch, dst, 8, 1);
// Loop filtering
vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
@@ -1018,11 +1018,11 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
dst[0] = s - 4;
// Transpose back
- highbd_transpose(src, 8, dst, p, 1);
+ highbd_transpose(src, 8, dst, pitch, 1);
}
void vpx_highbd_lpf_vertical_4_dual_sse2(
- uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+ uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
@@ -1030,7 +1030,7 @@ void vpx_highbd_lpf_vertical_4_dual_sse2(
uint16_t *dst[2];
// Transpose 8x16
- highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
+ highbd_transpose8x16(s - 4, s - 4 + pitch * 8, pitch, t_dst, 16);
// Loop filtering
vpx_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
@@ -1038,15 +1038,15 @@ void vpx_highbd_lpf_vertical_4_dual_sse2(
src[0] = t_dst;
src[1] = t_dst + 8;
dst[0] = s - 4;
- dst[1] = s - 4 + p * 8;
+ dst[1] = s - 4 + pitch * 8;
// Transpose back
- highbd_transpose(src, 16, dst, p, 2);
+ highbd_transpose(src, 16, dst, pitch, 2);
}
-void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
- const uint8_t *limit, const uint8_t *thresh,
- int bd) {
+void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit, const uint8_t *limit,
+ const uint8_t *thresh, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]);
uint16_t *src[1];
uint16_t *dst[1];
@@ -1055,7 +1055,7 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
src[0] = s - 4;
dst[0] = t_dst;
- highbd_transpose(src, p, dst, 8, 1);
+ highbd_transpose(src, pitch, dst, 8, 1);
// Loop filtering
vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd);
@@ -1064,11 +1064,11 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
dst[0] = s - 4;
// Transpose back
- highbd_transpose(src, 8, dst, p, 1);
+ highbd_transpose(src, 8, dst, pitch, 1);
}
void vpx_highbd_lpf_vertical_8_dual_sse2(
- uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+ uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
@@ -1076,7 +1076,7 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(
uint16_t *dst[2];
// Transpose 8x16
- highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
+ highbd_transpose8x16(s - 4, s - 4 + pitch * 8, pitch, t_dst, 16);
// Loop filtering
vpx_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
@@ -1085,13 +1085,14 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(
src[1] = t_dst + 8;
dst[0] = s - 4;
- dst[1] = s - 4 + p * 8;
+ dst[1] = s - 4 + pitch * 8;
// Transpose back
- highbd_transpose(src, 16, dst, p, 2);
+ highbd_transpose(src, 16, dst, pitch, 2);
}
-void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
+void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int pitch,
+ const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]);
@@ -1104,7 +1105,7 @@ void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
dst[1] = t_dst + 8 * 8;
// Transpose 16x8
- highbd_transpose(src, p, dst, 8, 2);
+ highbd_transpose(src, pitch, dst, 8, 2);
// Loop filtering
vpx_highbd_lpf_horizontal_16_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh,
@@ -1115,24 +1116,25 @@ void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
dst[1] = s;
// Transpose back
- highbd_transpose(src, 8, dst, p, 2);
+ highbd_transpose(src, 8, dst, pitch, 2);
}
-void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p,
+void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int pitch,
const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh, int bd) {
DECLARE_ALIGNED(16, uint16_t, t_dst[256]);
// Transpose 16x16
- highbd_transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16);
- highbd_transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
+ highbd_transpose8x16(s - 8, s - 8 + 8 * pitch, pitch, t_dst, 16);
+ highbd_transpose8x16(s, s + 8 * pitch, pitch, t_dst + 8 * 16, 16);
// Loop filtering
vpx_highbd_lpf_horizontal_16_dual_sse2(t_dst + 8 * 16, 16, blimit, limit,
thresh, bd);
// Transpose back
- highbd_transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p);
- highbd_transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * p, p);
+ highbd_transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, pitch);
+ highbd_transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * pitch,
+ pitch);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_subpel_variance_impl_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_subpel_variance_impl_sse2.asm
index db4aaf4ea0a..cefde0f57db 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_subpel_variance_impl_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_subpel_variance_impl_sse2.asm
@@ -32,12 +32,12 @@ SECTION .text
; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
; int x_offset, int y_offset,
-; const uint8_t *dst, ptrdiff_t dst_stride,
+; const uint8_t *ref, ptrdiff_t ref_stride,
; int height, unsigned int *sse);
;
; This function returns the SE and stores SSE in the given pointer.
-%macro SUM_SSE 6 ; src1, dst1, src2, dst2, sum, sse
+%macro SUM_SSE 6 ; src1, ref1, src2, ref2, sum, sse
psubw %3, %4
psubw %1, %2
mova %4, %3 ; make copies to manipulate to calc sum
@@ -95,13 +95,13 @@ SECTION .text
%if %2 == 1 ; avg
cglobal highbd_sub_pixel_avg_variance%1xh, 9, 10, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, \
- sec, sec_stride, height, sse
- %define sec_str sec_strideq
+ ref, ref_stride, \
+ second_pred, second_stride, height, sse
+ %define second_str second_strideq
%else
cglobal highbd_sub_pixel_variance%1xh, 7, 8, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, height, sse
+ ref, ref_stride, height, sse
%endif
%define block_height heightd
%define bilin_filter sseq
@@ -110,14 +110,14 @@ SECTION .text
%if %2 == 1 ; avg
cglobal highbd_sub_pixel_avg_variance%1xh, 7, 7, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, \
- sec, sec_stride, height, sse
+ ref, ref_stride, \
+ second_pred, second_stride, height, sse
%define block_height dword heightm
- %define sec_str sec_stridemp
+ %define second_str second_stridemp
%else
cglobal highbd_sub_pixel_variance%1xh, 7, 7, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, height, sse
+ ref, ref_stride, height, sse
%define block_height heightd
%endif
@@ -142,14 +142,14 @@ SECTION .text
%if %2 == 1 ; avg
cglobal highbd_sub_pixel_avg_variance%1xh, 7, 7, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, \
- sec, sec_stride, height, sse
+ ref, ref_stride, \
+ second_pred, second_stride, height, sse
%define block_height dword heightm
- %define sec_str sec_stridemp
+ %define second_str second_stridemp
%else
cglobal highbd_sub_pixel_variance%1xh, 7, 7, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, height, sse
+ ref, ref_stride, height, sse
%define block_height heightd
%endif
@@ -165,7 +165,7 @@ SECTION .text
sar block_height, 1
%endif
%if %2 == 1 ; avg
- shl sec_str, 1
+ shl second_str, 1
%endif
; FIXME(rbultje) replace by jumptable?
@@ -180,35 +180,35 @@ SECTION .text
%if %1 == 16
movu m0, [srcq]
movu m2, [srcq + 16]
- mova m1, [dstq]
- mova m3, [dstq + 16]
+ mova m1, [refq]
+ mova m3, [refq + 16]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m2, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m2, [second_predq+16]
%endif
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq + src_strideq*2]
- lea dstq, [dstq + dst_strideq*2]
+ lea refq, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
movu m2, [srcq + src_strideq*2]
- mova m1, [dstq]
- mova m3, [dstq + dst_strideq*2]
+ mova m1, [refq]
+ mova m3, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m2, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m2, [second_predq]
%endif
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq + src_strideq*4]
- lea dstq, [dstq + dst_strideq*4]
+ lea refq, [refq + ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -226,40 +226,40 @@ SECTION .text
movu m1, [srcq+16]
movu m4, [srcq+src_strideq*2]
movu m5, [srcq+src_strideq*2+16]
- mova m2, [dstq]
- mova m3, [dstq+16]
+ mova m2, [refq]
+ mova m3, [refq+16]
pavgw m0, m4
pavgw m1, m5
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
lea srcq, [srcq + src_strideq*2]
- lea dstq, [dstq + dst_strideq*2]
+ lea refq, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
movu m1, [srcq+src_strideq*2]
movu m5, [srcq+src_strideq*4]
- mova m2, [dstq]
- mova m3, [dstq+dst_strideq*2]
+ mova m2, [refq]
+ mova m3, [refq+ref_strideq*2]
pavgw m0, m1
pavgw m1, m5
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m1, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m1, [second_predq]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
lea srcq, [srcq + src_strideq*4]
- lea dstq, [dstq + dst_strideq*4]
+ lea refq, [refq + ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -302,8 +302,8 @@ SECTION .text
movu m1, [srcq + 16]
movu m4, [srcq+src_strideq*2]
movu m5, [srcq+src_strideq*2+16]
- mova m2, [dstq]
- mova m3, [dstq+16]
+ mova m2, [refq]
+ mova m3, [refq+16]
; FIXME(rbultje) instead of out=((num-x)*in1+x*in2+rnd)>>log2(num), we can
; also do out=in1+(((num-x)*(in2-in1)+rnd)>>log2(num)). Total number of
; instructions is the same (5), but it is 1 mul instead of 2, so might be
@@ -320,23 +320,23 @@ SECTION .text
psrlw m1, 4
psrlw m0, 4
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
lea srcq, [srcq + src_strideq*2]
- lea dstq, [dstq + dst_strideq*2]
+ lea refq, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
movu m1, [srcq+src_strideq*2]
movu m5, [srcq+src_strideq*4]
mova m4, m1
- mova m2, [dstq]
- mova m3, [dstq+dst_strideq*2]
+ mova m2, [refq]
+ mova m3, [refq+ref_strideq*2]
pmullw m1, filter_y_a
pmullw m5, filter_y_b
paddw m1, filter_rnd
@@ -348,16 +348,16 @@ SECTION .text
psrlw m1, 4
psrlw m0, 4
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m1, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m1, [second_predq]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
lea srcq, [srcq + src_strideq*4]
- lea dstq, [dstq + dst_strideq*4]
+ lea refq, [refq + ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -381,41 +381,41 @@ SECTION .text
movu m1, [srcq + 16]
movu m4, [srcq + 2]
movu m5, [srcq + 18]
- mova m2, [dstq]
- mova m3, [dstq + 16]
+ mova m2, [refq]
+ mova m3, [refq + 16]
pavgw m0, m4
pavgw m1, m5
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
lea srcq, [srcq + src_strideq*2]
- lea dstq, [dstq + dst_strideq*2]
+ lea refq, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
movu m1, [srcq + src_strideq*2]
movu m4, [srcq + 2]
movu m5, [srcq + src_strideq*2 + 2]
- mova m2, [dstq]
- mova m3, [dstq + dst_strideq*2]
+ mova m2, [refq]
+ mova m3, [refq + ref_strideq*2]
pavgw m0, m4
pavgw m1, m5
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m1, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m1, [second_predq]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
lea srcq, [srcq + src_strideq*4]
- lea dstq, [dstq + dst_strideq*4]
+ lea refq, [refq + ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -444,20 +444,20 @@ SECTION .text
pavgw m3, m5
pavgw m0, m2
pavgw m1, m3
- mova m4, [dstq]
- mova m5, [dstq + 16]
+ mova m4, [refq]
+ mova m5, [refq + 16]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m4, m1, m5, m6, m7
mova m0, m2
mova m1, m3
lea srcq, [srcq + src_strideq*2]
- lea dstq, [dstq + dst_strideq*2]
+ lea refq, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
@@ -473,20 +473,20 @@ SECTION .text
pavgw m3, m5
pavgw m0, m2
pavgw m2, m3
- mova m4, [dstq]
- mova m5, [dstq + dst_strideq*2]
+ mova m4, [refq]
+ mova m5, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m2, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m2, [second_predq]
%endif
SUM_SSE m0, m4, m2, m5, m6, m7
mova m0, m3
lea srcq, [srcq + src_strideq*4]
- lea dstq, [dstq + dst_strideq*4]
+ lea refq, [refq + ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -549,21 +549,21 @@ SECTION .text
paddw m0, filter_rnd
psrlw m1, 4
paddw m0, m2
- mova m2, [dstq]
+ mova m2, [refq]
psrlw m0, 4
- mova m3, [dstq+16]
+ mova m3, [refq+16]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
mova m0, m4
mova m1, m5
lea srcq, [srcq + src_strideq*2]
- lea dstq, [dstq + dst_strideq*2]
+ lea refq, [refq + ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
@@ -588,21 +588,21 @@ SECTION .text
paddw m0, filter_rnd
psrlw m4, 4
paddw m0, m2
- mova m2, [dstq]
+ mova m2, [refq]
psrlw m0, 4
- mova m3, [dstq+dst_strideq*2]
+ mova m3, [refq+ref_strideq*2]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m4, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m4, [second_predq]
%endif
SUM_SSE m0, m2, m4, m3, m6, m7
mova m0, m5
lea srcq, [srcq + src_strideq*4]
- lea dstq, [dstq + dst_strideq*4]
+ lea refq, [refq + ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -651,8 +651,8 @@ SECTION .text
movu m1, [srcq+16]
movu m2, [srcq+2]
movu m3, [srcq+18]
- mova m4, [dstq]
- mova m5, [dstq+16]
+ mova m4, [refq]
+ mova m5, [refq+16]
pmullw m1, filter_x_a
pmullw m3, filter_x_b
paddw m1, filter_rnd
@@ -664,23 +664,23 @@ SECTION .text
psrlw m1, 4
psrlw m0, 4
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m4, m1, m5, m6, m7
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
movu m1, [srcq+src_strideq*2]
movu m2, [srcq+2]
movu m3, [srcq+src_strideq*2+2]
- mova m4, [dstq]
- mova m5, [dstq+dst_strideq*2]
+ mova m4, [refq]
+ mova m5, [refq+ref_strideq*2]
pmullw m1, filter_x_a
pmullw m3, filter_x_b
paddw m1, filter_rnd
@@ -692,16 +692,16 @@ SECTION .text
psrlw m1, 4
psrlw m0, 4
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m1, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m1, [second_predq]
%endif
SUM_SSE m0, m4, m1, m5, m6, m7
lea srcq, [srcq+src_strideq*4]
- lea dstq, [dstq+dst_strideq*4]
+ lea refq, [refq+ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -773,24 +773,24 @@ SECTION .text
paddw m3, filter_rnd
paddw m2, m4
paddw m3, m5
- mova m4, [dstq]
- mova m5, [dstq+16]
+ mova m4, [refq]
+ mova m5, [refq+16]
psrlw m2, 4
psrlw m3, 4
pavgw m0, m2
pavgw m1, m3
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m4, m1, m5, m6, m7
mova m0, m2
mova m1, m3
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
@@ -814,24 +814,24 @@ SECTION .text
paddw m3, filter_rnd
paddw m2, m4
paddw m3, m5
- mova m4, [dstq]
- mova m5, [dstq+dst_strideq*2]
+ mova m4, [refq]
+ mova m5, [refq+ref_strideq*2]
psrlw m2, 4
psrlw m3, 4
pavgw m0, m2
pavgw m2, m3
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m2, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m2, [second_predq]
%endif
SUM_SSE m0, m4, m2, m5, m6, m7
mova m0, m3
lea srcq, [srcq+src_strideq*4]
- lea dstq, [dstq+dst_strideq*4]
+ lea refq, [refq+ref_strideq*4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
@@ -929,23 +929,23 @@ SECTION .text
pmullw m3, filter_y_b
paddw m0, m2
paddw m1, filter_rnd
- mova m2, [dstq]
+ mova m2, [refq]
paddw m1, m3
psrlw m0, 4
psrlw m1, 4
- mova m3, [dstq+16]
+ mova m3, [refq+16]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- pavgw m1, [secq+16]
+ pavgw m0, [second_predq]
+ pavgw m1, [second_predq+16]
%endif
SUM_SSE m0, m2, m1, m3, m6, m7
mova m0, m4
mova m1, m5
INC_SRC_BY_SRC_STRIDE
- lea dstq, [dstq + dst_strideq * 2]
+ lea refq, [refq + ref_strideq * 2]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%else ; %1 < 16
movu m0, [srcq]
@@ -983,23 +983,23 @@ SECTION .text
pmullw m3, filter_y_b
paddw m0, m2
paddw m4, filter_rnd
- mova m2, [dstq]
+ mova m2, [refq]
paddw m4, m3
psrlw m0, 4
psrlw m4, 4
- mova m3, [dstq+dst_strideq*2]
+ mova m3, [refq+ref_strideq*2]
%if %2 == 1 ; avg
- pavgw m0, [secq]
- add secq, sec_str
- pavgw m4, [secq]
+ pavgw m0, [second_predq]
+ add second_predq, second_str
+ pavgw m4, [second_predq]
%endif
SUM_SSE m0, m2, m4, m3, m6, m7
mova m0, m5
INC_SRC_BY_SRC_STRIDE
- lea dstq, [dstq + dst_strideq * 4]
+ lea refq, [refq + ref_strideq * 4]
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
%endif
dec block_height
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_impl_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_impl_sse2.asm
index e646767e190..a256a59ec0e 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_impl_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_impl_sse2.asm
@@ -16,9 +16,9 @@ SECTION .text
;unsigned int vpx_highbd_calc16x16var_sse2
;(
; unsigned char * src_ptr,
-; int source_stride,
+; int src_stride,
; unsigned char * ref_ptr,
-; int recon_stride,
+; int ref_stride,
; unsigned int * SSE,
; int * Sum
;)
@@ -36,8 +36,8 @@ sym(vpx_highbd_calc16x16var_sse2):
mov rsi, arg(0) ;[src_ptr]
mov rdi, arg(2) ;[ref_ptr]
- movsxd rax, DWORD PTR arg(1) ;[source_stride]
- movsxd rdx, DWORD PTR arg(3) ;[recon_stride]
+ movsxd rax, DWORD PTR arg(1) ;[src_stride]
+ movsxd rdx, DWORD PTR arg(3) ;[ref_stride]
add rax, rax ; source stride in bytes
add rdx, rdx ; recon stride in bytes
@@ -169,9 +169,9 @@ sym(vpx_highbd_calc16x16var_sse2):
;unsigned int vpx_highbd_calc8x8var_sse2
;(
; unsigned char * src_ptr,
-; int source_stride,
+; int src_stride,
; unsigned char * ref_ptr,
-; int recon_stride,
+; int ref_stride,
; unsigned int * SSE,
; int * Sum
;)
@@ -189,8 +189,8 @@ sym(vpx_highbd_calc8x8var_sse2):
mov rsi, arg(0) ;[src_ptr]
mov rdi, arg(2) ;[ref_ptr]
- movsxd rax, DWORD PTR arg(1) ;[source_stride]
- movsxd rdx, DWORD PTR arg(3) ;[recon_stride]
+ movsxd rax, DWORD PTR arg(1) ;[src_stride]
+ movsxd rdx, DWORD PTR arg(3) ;[ref_stride]
add rax, rax ; source stride in bytes
add rdx, rdx ; recon stride in bytes
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c
index a6f7c3d25df..d08da1c6766 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c
@@ -251,7 +251,7 @@ unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride,
#define DECL(w, opt) \
int vpx_highbd_sub_pixel_variance##w##xh_##opt( \
const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
- const uint16_t *dst, ptrdiff_t dst_stride, int height, \
+ const uint16_t *ref, ptrdiff_t ref_stride, int height, \
unsigned int *sse, void *unused0, void *unused);
#define DECLS(opt) \
DECL(8, opt); \
@@ -265,28 +265,28 @@ DECLS(sse2);
#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
+ const uint8_t *ref8, int ref_stride, uint32_t *sse_ptr) { \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \
+ src, src_stride, x_offset, y_offset, ref, ref_stride, h, &sse, NULL, \
NULL); \
if (w > wf) { \
unsigned int sse2; \
int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \
+ src + 16, src_stride, x_offset, y_offset, ref + 16, ref_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
+ src + 32, src_stride, x_offset, y_offset, ref + 32, ref_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
+ src + 48, src_stride, x_offset, y_offset, ref + 48, ref_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
@@ -298,29 +298,29 @@ DECLS(sse2);
\
uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
+ const uint8_t *ref8, int ref_stride, uint32_t *sse_ptr) { \
int64_t var; \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \
+ src, src_stride, x_offset, y_offset, ref, ref_stride, h, &sse, NULL, \
NULL); \
if (w > wf) { \
uint32_t sse2; \
int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \
+ src + 16, src_stride, x_offset, y_offset, ref + 16, ref_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
+ src + 32, src_stride, x_offset, y_offset, ref + 32, ref_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
- src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
+ src + 48, src_stride, x_offset, y_offset, ref + 48, ref_stride, h, \
&sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
@@ -335,40 +335,40 @@ DECLS(sse2);
\
uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
+ const uint8_t *ref8, int ref_stride, uint32_t *sse_ptr) { \
int start_row; \
uint32_t sse; \
int se = 0; \
int64_t var; \
uint64_t long_sse = 0; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
for (start_row = 0; start_row < h; start_row += 16) { \
uint32_t sse2; \
int height = h - start_row < 16 ? h - start_row : 16; \
int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
src + (start_row * src_stride), src_stride, x_offset, y_offset, \
- dst + (start_row * dst_stride), dst_stride, height, &sse2, NULL, \
+ ref + (start_row * ref_stride), ref_stride, height, &sse2, NULL, \
NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf) { \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 16 + (start_row * src_stride), src_stride, x_offset, \
- y_offset, dst + 16 + (start_row * dst_stride), dst_stride, height, \
+ y_offset, ref + 16 + (start_row * ref_stride), ref_stride, height, \
&sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf * 2) { \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 32 + (start_row * src_stride), src_stride, x_offset, \
- y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \
+ y_offset, ref + 32 + (start_row * ref_stride), ref_stride, \
height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \
src + 48 + (start_row * src_stride), src_stride, x_offset, \
- y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \
+ y_offset, ref + 48 + (start_row * ref_stride), ref_stride, \
height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
@@ -404,8 +404,8 @@ FNS(sse2);
#define DECL(w, opt) \
int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt( \
const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
- const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec, \
- ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
+ const uint16_t *ref, ptrdiff_t ref_stride, const uint16_t *second, \
+ ptrdiff_t second_stride, int height, unsigned int *sse, void *unused0, \
void *unused);
#define DECLS(opt1) \
DECL(16, opt1) \
@@ -418,30 +418,30 @@ DECLS(sse2);
#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
+ const uint8_t *ref8, int ref_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
+ src, src_stride, x_offset, y_offset, ref, ref_stride, sec, w, h, &sse, \
NULL, NULL); \
if (w > wf) { \
uint32_t sse2; \
int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \
+ src + 16, src_stride, x_offset, y_offset, ref + 16, ref_stride, \
sec + 16, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \
+ src + 32, src_stride, x_offset, y_offset, ref + 32, ref_stride, \
sec + 32, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \
+ src + 48, src_stride, x_offset, y_offset, ref + 48, ref_stride, \
sec + 48, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
@@ -453,31 +453,31 @@ DECLS(sse2);
\
uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
+ const uint8_t *ref8, int ref_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
int64_t var; \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
+ src, src_stride, x_offset, y_offset, ref, ref_stride, sec, w, h, &sse, \
NULL, NULL); \
if (w > wf) { \
uint32_t sse2; \
int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \
+ src + 16, src_stride, x_offset, y_offset, ref + 16, ref_stride, \
sec + 16, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \
+ src + 32, src_stride, x_offset, y_offset, ref + 32, ref_stride, \
sec + 32, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \
+ src + 48, src_stride, x_offset, y_offset, ref + 48, ref_stride, \
sec + 48, w, h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
@@ -492,7 +492,7 @@ DECLS(sse2);
\
uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
+ const uint8_t *ref8, int ref_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
int start_row; \
int64_t var; \
@@ -500,34 +500,34 @@ DECLS(sse2);
int se = 0; \
uint64_t long_sse = 0; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \
for (start_row = 0; start_row < h; start_row += 16) { \
uint32_t sse2; \
int height = h - start_row < 16 ? h - start_row : 16; \
int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + (start_row * src_stride), src_stride, x_offset, y_offset, \
- dst + (start_row * dst_stride), dst_stride, sec + (start_row * w), \
+ ref + (start_row * ref_stride), ref_stride, sec + (start_row * w), \
w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf) { \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 16 + (start_row * src_stride), src_stride, x_offset, \
- y_offset, dst + 16 + (start_row * dst_stride), dst_stride, \
+ y_offset, ref + 16 + (start_row * ref_stride), ref_stride, \
sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
if (w > wf * 2) { \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 32 + (start_row * src_stride), src_stride, x_offset, \
- y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \
+ y_offset, ref + 32 + (start_row * ref_stride), ref_stride, \
sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \
src + 48 + (start_row * src_stride), src_stride, x_offset, \
- y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \
+ y_offset, ref + 48 + (start_row * ref_stride), ref_stride, \
sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL); \
se += se2; \
long_sse += sse2; \
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_avx2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_avx2.c
index 6652a62dcfc..85a7314269d 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_avx2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_avx2.c
@@ -13,38 +13,38 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx_ports/mem.h"
-void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p,
- const unsigned char *_blimit,
- const unsigned char *_limit,
- const unsigned char *_thresh) {
+void vpx_lpf_horizontal_16_avx2(unsigned char *s, int pitch,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
__m128i mask, hev, flat, flat2;
const __m128i zero = _mm_set1_epi16(0);
const __m128i one = _mm_set1_epi8(1);
__m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1;
__m128i abs_p1p0;
- const __m128i thresh =
- _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0]));
- const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0]));
- const __m128i blimit =
- _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0]));
+ const __m128i thresh_v =
+ _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)thresh[0]));
+ const __m128i limit_v = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)limit[0]));
+ const __m128i blimit_v =
+ _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)blimit[0]));
- q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
+ q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * pitch));
q4p4 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p)));
- q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+ _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * pitch)));
+ q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * pitch));
q3p3 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p)));
- q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+ _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * pitch)));
+ q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * pitch));
q2p2 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p)));
- q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+ _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * pitch)));
+ q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * pitch));
q1p1 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * pitch)));
p1q1 = _mm_shuffle_epi32(q1p1, 78);
- q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+ q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * pitch));
q0p0 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * pitch)));
p0q0 = _mm_shuffle_epi32(q0p0, 78);
{
@@ -59,12 +59,12 @@ void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p,
abs_p1q1 =
_mm_or_si128(_mm_subs_epu8(q1p1, p1q1), _mm_subs_epu8(p1q1, q1p1));
flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_subs_epu8(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
- mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
mask = _mm_max_epu8(abs_p1p0, mask);
@@ -76,7 +76,7 @@ void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p,
_mm_or_si128(_mm_subs_epu8(q3p3, q2p2), _mm_subs_epu8(q2p2, q3p3)));
mask = _mm_max_epu8(work, mask);
mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
- mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_subs_epu8(mask, limit_v);
mask = _mm_cmpeq_epi8(mask, zero);
}
@@ -136,21 +136,21 @@ void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p,
flat = _mm_cmpeq_epi8(flat, zero);
flat = _mm_and_si128(flat, mask);
- q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
+ q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * pitch));
q5p5 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * pitch)));
- q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
+ q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * pitch));
q6p6 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * pitch)));
flat2 = _mm_max_epu8(
_mm_or_si128(_mm_subs_epu8(q4p4, q0p0), _mm_subs_epu8(q0p0, q4p4)),
_mm_or_si128(_mm_subs_epu8(q5p5, q0p0), _mm_subs_epu8(q0p0, q5p5)));
- q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
+ q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * pitch));
q7p7 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * pitch)));
work = _mm_max_epu8(
_mm_or_si128(_mm_subs_epu8(q6p6, q0p0), _mm_subs_epu8(q0p0, q6p6)),
@@ -321,44 +321,44 @@ void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p,
q6p6 = _mm_andnot_si128(flat2, q6p6);
flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6);
q6p6 = _mm_or_si128(q6p6, flat2_q6p6);
- _mm_storel_epi64((__m128i *)(s - 7 * p), q6p6);
- _mm_storeh_pi((__m64 *)(s + 6 * p), _mm_castsi128_ps(q6p6));
+ _mm_storel_epi64((__m128i *)(s - 7 * pitch), q6p6);
+ _mm_storeh_pi((__m64 *)(s + 6 * pitch), _mm_castsi128_ps(q6p6));
q5p5 = _mm_andnot_si128(flat2, q5p5);
flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5);
q5p5 = _mm_or_si128(q5p5, flat2_q5p5);
- _mm_storel_epi64((__m128i *)(s - 6 * p), q5p5);
- _mm_storeh_pi((__m64 *)(s + 5 * p), _mm_castsi128_ps(q5p5));
+ _mm_storel_epi64((__m128i *)(s - 6 * pitch), q5p5);
+ _mm_storeh_pi((__m64 *)(s + 5 * pitch), _mm_castsi128_ps(q5p5));
q4p4 = _mm_andnot_si128(flat2, q4p4);
flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4);
q4p4 = _mm_or_si128(q4p4, flat2_q4p4);
- _mm_storel_epi64((__m128i *)(s - 5 * p), q4p4);
- _mm_storeh_pi((__m64 *)(s + 4 * p), _mm_castsi128_ps(q4p4));
+ _mm_storel_epi64((__m128i *)(s - 5 * pitch), q4p4);
+ _mm_storeh_pi((__m64 *)(s + 4 * pitch), _mm_castsi128_ps(q4p4));
q3p3 = _mm_andnot_si128(flat2, q3p3);
flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3);
q3p3 = _mm_or_si128(q3p3, flat2_q3p3);
- _mm_storel_epi64((__m128i *)(s - 4 * p), q3p3);
- _mm_storeh_pi((__m64 *)(s + 3 * p), _mm_castsi128_ps(q3p3));
+ _mm_storel_epi64((__m128i *)(s - 4 * pitch), q3p3);
+ _mm_storeh_pi((__m64 *)(s + 3 * pitch), _mm_castsi128_ps(q3p3));
q2p2 = _mm_andnot_si128(flat2, q2p2);
flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2);
q2p2 = _mm_or_si128(q2p2, flat2_q2p2);
- _mm_storel_epi64((__m128i *)(s - 3 * p), q2p2);
- _mm_storeh_pi((__m64 *)(s + 2 * p), _mm_castsi128_ps(q2p2));
+ _mm_storel_epi64((__m128i *)(s - 3 * pitch), q2p2);
+ _mm_storeh_pi((__m64 *)(s + 2 * pitch), _mm_castsi128_ps(q2p2));
q1p1 = _mm_andnot_si128(flat2, q1p1);
flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1);
q1p1 = _mm_or_si128(q1p1, flat2_q1p1);
- _mm_storel_epi64((__m128i *)(s - 2 * p), q1p1);
- _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(q1p1));
+ _mm_storel_epi64((__m128i *)(s - 2 * pitch), q1p1);
+ _mm_storeh_pi((__m64 *)(s + 1 * pitch), _mm_castsi128_ps(q1p1));
q0p0 = _mm_andnot_si128(flat2, q0p0);
flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
- _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0);
- _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0));
+ _mm_storel_epi64((__m128i *)(s - 1 * pitch), q0p0);
+ _mm_storeh_pi((__m64 *)(s - 0 * pitch), _mm_castsi128_ps(q0p0));
}
}
@@ -367,10 +367,10 @@ DECLARE_ALIGNED(32, static const uint8_t, filt_loopfilter_avx2[32]) = {
8, 128, 9, 128, 10, 128, 11, 128, 12, 128, 13, 128, 14, 128, 15, 128
};
-void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
- const unsigned char *_blimit,
- const unsigned char *_limit,
- const unsigned char *_thresh) {
+void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int pitch,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
__m128i mask, hev, flat, flat2;
const __m128i zero = _mm_set1_epi16(0);
const __m128i one = _mm_set1_epi8(1);
@@ -380,32 +380,32 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
__m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4, q256_4,
p256_3, q256_3, p256_2, q256_2, p256_1, q256_1, p256_0, q256_0;
- const __m128i thresh =
- _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0]));
- const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0]));
- const __m128i blimit =
- _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0]));
-
- p256_4 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 5 * p)));
- p256_3 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 4 * p)));
- p256_2 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 3 * p)));
- p256_1 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 2 * p)));
- p256_0 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 1 * p)));
- q256_0 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 0 * p)));
- q256_1 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 1 * p)));
- q256_2 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 2 * p)));
- q256_3 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 3 * p)));
- q256_4 =
- _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 4 * p)));
+ const __m128i thresh_v =
+ _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)thresh[0]));
+ const __m128i limit_v = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)limit[0]));
+ const __m128i blimit_v =
+ _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)blimit[0]));
+
+ p256_4 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s - 5 * pitch)));
+ p256_3 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s - 4 * pitch)));
+ p256_2 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s - 3 * pitch)));
+ p256_1 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s - 2 * pitch)));
+ p256_0 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s - 1 * pitch)));
+ q256_0 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s - 0 * pitch)));
+ q256_1 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s + 1 * pitch)));
+ q256_2 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s + 2 * pitch)));
+ q256_3 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s + 3 * pitch)));
+ q256_4 = _mm256_castpd_si256(
+ _mm256_broadcast_pd((__m128d const *)(s + 4 * pitch)));
p4 = _mm256_castsi256_si128(p256_4);
p3 = _mm256_castsi256_si128(p256_3);
@@ -431,12 +431,12 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
_mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1));
__m128i work;
flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_subs_epu8(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
- mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
mask = _mm_max_epu8(flat, mask);
@@ -450,7 +450,7 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
_mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)),
_mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
mask = _mm_max_epu8(work, mask);
- mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_subs_epu8(mask, limit_v);
mask = _mm_cmpeq_epi8(mask, zero);
}
@@ -532,9 +532,9 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
flat = _mm_and_si128(flat, mask);
p256_5 = _mm256_castpd_si256(
- _mm256_broadcast_pd((__m128d const *)(s - 6 * p)));
+ _mm256_broadcast_pd((__m128d const *)(s - 6 * pitch)));
q256_5 = _mm256_castpd_si256(
- _mm256_broadcast_pd((__m128d const *)(s + 5 * p)));
+ _mm256_broadcast_pd((__m128d const *)(s + 5 * pitch)));
p5 = _mm256_castsi256_si128(p256_5);
q5 = _mm256_castsi256_si128(q256_5);
flat2 = _mm_max_epu8(
@@ -543,9 +543,9 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
flat2 = _mm_max_epu8(work, flat2);
p256_6 = _mm256_castpd_si256(
- _mm256_broadcast_pd((__m128d const *)(s - 7 * p)));
+ _mm256_broadcast_pd((__m128d const *)(s - 7 * pitch)));
q256_6 = _mm256_castpd_si256(
- _mm256_broadcast_pd((__m128d const *)(s + 6 * p)));
+ _mm256_broadcast_pd((__m128d const *)(s + 6 * pitch)));
p6 = _mm256_castsi256_si128(p256_6);
q6 = _mm256_castsi256_si128(q256_6);
work = _mm_max_epu8(
@@ -555,9 +555,9 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
flat2 = _mm_max_epu8(work, flat2);
p256_7 = _mm256_castpd_si256(
- _mm256_broadcast_pd((__m128d const *)(s - 8 * p)));
+ _mm256_broadcast_pd((__m128d const *)(s - 8 * pitch)));
q256_7 = _mm256_castpd_si256(
- _mm256_broadcast_pd((__m128d const *)(s + 7 * p)));
+ _mm256_broadcast_pd((__m128d const *)(s + 7 * pitch)));
p7 = _mm256_castsi256_si128(p256_7);
q7 = _mm256_castsi256_si128(q256_7);
work = _mm_max_epu8(
@@ -843,71 +843,71 @@ void vpx_lpf_horizontal_16_dual_avx2(unsigned char *s, int p,
p6 = _mm_andnot_si128(flat2, p6);
flat2_p6 = _mm_and_si128(flat2, flat2_p6);
p6 = _mm_or_si128(flat2_p6, p6);
- _mm_storeu_si128((__m128i *)(s - 7 * p), p6);
+ _mm_storeu_si128((__m128i *)(s - 7 * pitch), p6);
p5 = _mm_andnot_si128(flat2, p5);
flat2_p5 = _mm_and_si128(flat2, flat2_p5);
p5 = _mm_or_si128(flat2_p5, p5);
- _mm_storeu_si128((__m128i *)(s - 6 * p), p5);
+ _mm_storeu_si128((__m128i *)(s - 6 * pitch), p5);
p4 = _mm_andnot_si128(flat2, p4);
flat2_p4 = _mm_and_si128(flat2, flat2_p4);
p4 = _mm_or_si128(flat2_p4, p4);
- _mm_storeu_si128((__m128i *)(s - 5 * p), p4);
+ _mm_storeu_si128((__m128i *)(s - 5 * pitch), p4);
p3 = _mm_andnot_si128(flat2, p3);
flat2_p3 = _mm_and_si128(flat2, flat2_p3);
p3 = _mm_or_si128(flat2_p3, p3);
- _mm_storeu_si128((__m128i *)(s - 4 * p), p3);
+ _mm_storeu_si128((__m128i *)(s - 4 * pitch), p3);
p2 = _mm_andnot_si128(flat2, p2);
flat2_p2 = _mm_and_si128(flat2, flat2_p2);
p2 = _mm_or_si128(flat2_p2, p2);
- _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
+ _mm_storeu_si128((__m128i *)(s - 3 * pitch), p2);
p1 = _mm_andnot_si128(flat2, p1);
flat2_p1 = _mm_and_si128(flat2, flat2_p1);
p1 = _mm_or_si128(flat2_p1, p1);
- _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
+ _mm_storeu_si128((__m128i *)(s - 2 * pitch), p1);
p0 = _mm_andnot_si128(flat2, p0);
flat2_p0 = _mm_and_si128(flat2, flat2_p0);
p0 = _mm_or_si128(flat2_p0, p0);
- _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
+ _mm_storeu_si128((__m128i *)(s - 1 * pitch), p0);
q0 = _mm_andnot_si128(flat2, q0);
flat2_q0 = _mm_and_si128(flat2, flat2_q0);
q0 = _mm_or_si128(flat2_q0, q0);
- _mm_storeu_si128((__m128i *)(s - 0 * p), q0);
+ _mm_storeu_si128((__m128i *)(s - 0 * pitch), q0);
q1 = _mm_andnot_si128(flat2, q1);
flat2_q1 = _mm_and_si128(flat2, flat2_q1);
q1 = _mm_or_si128(flat2_q1, q1);
- _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
+ _mm_storeu_si128((__m128i *)(s + 1 * pitch), q1);
q2 = _mm_andnot_si128(flat2, q2);
flat2_q2 = _mm_and_si128(flat2, flat2_q2);
q2 = _mm_or_si128(flat2_q2, q2);
- _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
+ _mm_storeu_si128((__m128i *)(s + 2 * pitch), q2);
q3 = _mm_andnot_si128(flat2, q3);
flat2_q3 = _mm_and_si128(flat2, flat2_q3);
q3 = _mm_or_si128(flat2_q3, q3);
- _mm_storeu_si128((__m128i *)(s + 3 * p), q3);
+ _mm_storeu_si128((__m128i *)(s + 3 * pitch), q3);
q4 = _mm_andnot_si128(flat2, q4);
flat2_q4 = _mm_and_si128(flat2, flat2_q4);
q4 = _mm_or_si128(flat2_q4, q4);
- _mm_storeu_si128((__m128i *)(s + 4 * p), q4);
+ _mm_storeu_si128((__m128i *)(s + 4 * pitch), q4);
q5 = _mm_andnot_si128(flat2, q5);
flat2_q5 = _mm_and_si128(flat2, flat2_q5);
q5 = _mm_or_si128(flat2_q5, q5);
- _mm_storeu_si128((__m128i *)(s + 5 * p), q5);
+ _mm_storeu_si128((__m128i *)(s + 5 * pitch), q5);
q6 = _mm_andnot_si128(flat2, q6);
flat2_q6 = _mm_and_si128(flat2, flat2_q6);
q6 = _mm_or_si128(flat2_q6, q6);
- _mm_storeu_si128((__m128i *)(s + 6 * p), q6);
+ _mm_storeu_si128((__m128i *)(s + 6 * pitch), q6);
}
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c
index 853c4d270b1..20dcb0d225e 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/loopfilter_sse2.c
@@ -31,7 +31,7 @@ static INLINE __m128i abs_diff(__m128i a, __m128i b) {
/* const uint8_t hev = hev_mask(thresh, *op1, *op0, *oq0, *oq1); */ \
hev = \
_mm_unpacklo_epi8(_mm_max_epu8(flat, _mm_srli_si128(flat, 8)), zero); \
- hev = _mm_cmpgt_epi16(hev, thresh); \
+ hev = _mm_cmpgt_epi16(hev, thresh_v); \
hev = _mm_packs_epi16(hev, hev); \
\
/* const int8_t mask = filter_mask(*limit, *blimit, */ \
@@ -52,7 +52,7 @@ static INLINE __m128i abs_diff(__m128i a, __m128i b) {
flat = _mm_max_epu8(work, flat); \
flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8)); \
mask = _mm_unpacklo_epi64(mask, flat); \
- mask = _mm_subs_epu8(mask, limit); \
+ mask = _mm_subs_epu8(mask, limit_v); \
mask = _mm_cmpeq_epi8(mask, zero); \
mask = _mm_and_si128(mask, _mm_srli_si128(mask, 8)); \
} while (0)
@@ -104,27 +104,26 @@ static INLINE __m128i abs_diff(__m128i a, __m128i b) {
ps1ps0 = _mm_xor_si128(ps1ps0, t80); /* ^ 0x80 */ \
} while (0)
-void vpx_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */,
- const uint8_t *_blimit, const uint8_t *_limit,
- const uint8_t *_thresh) {
+void vpx_lpf_horizontal_4_sse2(uint8_t *s, int pitch, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
const __m128i zero = _mm_set1_epi16(0);
- const __m128i limit =
- _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)_blimit),
- _mm_loadl_epi64((const __m128i *)_limit));
- const __m128i thresh =
- _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)_thresh), zero);
+ const __m128i limit_v =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)blimit),
+ _mm_loadl_epi64((const __m128i *)limit));
+ const __m128i thresh_v =
+ _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)thresh), zero);
const __m128i ff = _mm_cmpeq_epi8(zero, zero);
__m128i q1p1, q0p0, p3p2, p2p1, p1p0, q3q2, q2q1, q1q0, ps1ps0, qs1qs0;
__m128i mask, hev;
- p3p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * p)),
- _mm_loadl_epi64((__m128i *)(s - 4 * p)));
- q1p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 2 * p)),
- _mm_loadl_epi64((__m128i *)(s + 1 * p)));
- q0p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 1 * p)),
- _mm_loadl_epi64((__m128i *)(s + 0 * p)));
- q3q2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s + 2 * p)),
- _mm_loadl_epi64((__m128i *)(s + 3 * p)));
+ p3p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s - 4 * pitch)));
+ q1p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 2 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s + 1 * pitch)));
+ q0p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 1 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s + 0 * pitch)));
+ q3q2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s + 2 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s + 3 * pitch)));
p1p0 = _mm_unpacklo_epi64(q0p0, q1p1);
p2p1 = _mm_unpacklo_epi64(q1p1, p3p2);
q1q0 = _mm_unpackhi_epi64(q0p0, q1p1);
@@ -133,41 +132,40 @@ void vpx_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */,
FILTER_HEV_MASK;
FILTER4;
- _mm_storeh_pi((__m64 *)(s - 2 * p), _mm_castsi128_ps(ps1ps0)); // *op1
- _mm_storel_epi64((__m128i *)(s - 1 * p), ps1ps0); // *op0
- _mm_storel_epi64((__m128i *)(s + 0 * p), qs1qs0); // *oq0
- _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(qs1qs0)); // *oq1
+ _mm_storeh_pi((__m64 *)(s - 2 * pitch), _mm_castsi128_ps(ps1ps0)); // *op1
+ _mm_storel_epi64((__m128i *)(s - 1 * pitch), ps1ps0); // *op0
+ _mm_storel_epi64((__m128i *)(s + 0 * pitch), qs1qs0); // *oq0
+ _mm_storeh_pi((__m64 *)(s + 1 * pitch), _mm_castsi128_ps(qs1qs0)); // *oq1
}
-void vpx_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */,
- const uint8_t *_blimit, const uint8_t *_limit,
- const uint8_t *_thresh) {
+void vpx_lpf_vertical_4_sse2(uint8_t *s, int pitch, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
const __m128i zero = _mm_set1_epi16(0);
- const __m128i limit =
- _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)_blimit),
- _mm_loadl_epi64((const __m128i *)_limit));
- const __m128i thresh =
- _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)_thresh), zero);
+ const __m128i limit_v =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)blimit),
+ _mm_loadl_epi64((const __m128i *)limit));
+ const __m128i thresh_v =
+ _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)thresh), zero);
const __m128i ff = _mm_cmpeq_epi8(zero, zero);
__m128i x0, x1, x2, x3;
__m128i q1p1, q0p0, p3p2, p2p1, p1p0, q3q2, q2q1, q1q0, ps1ps0, qs1qs0;
__m128i mask, hev;
// 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
- q1q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 0 * p - 4)),
- _mm_loadl_epi64((__m128i *)(s + 1 * p - 4)));
+ q1q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 0 * pitch - 4)),
+ _mm_loadl_epi64((__m128i *)(s + 1 * pitch - 4)));
// 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
- x1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 2 * p - 4)),
- _mm_loadl_epi64((__m128i *)(s + 3 * p - 4)));
+ x1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 2 * pitch - 4)),
+ _mm_loadl_epi64((__m128i *)(s + 3 * pitch - 4)));
// 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
- x2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 4 * p - 4)),
- _mm_loadl_epi64((__m128i *)(s + 5 * p - 4)));
+ x2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 4 * pitch - 4)),
+ _mm_loadl_epi64((__m128i *)(s + 5 * pitch - 4)));
// 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
- x3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 6 * p - 4)),
- _mm_loadl_epi64((__m128i *)(s + 7 * p - 4)));
+ x3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 6 * pitch - 4)),
+ _mm_loadl_epi64((__m128i *)(s + 7 * pitch - 4)));
// Transpose 8x8
// 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
@@ -213,52 +211,52 @@ void vpx_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */,
// 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
ps1ps0 = _mm_unpacklo_epi8(ps1ps0, x0);
- storeu_uint32(s + 0 * p - 2, _mm_cvtsi128_si32(ps1ps0));
+ storeu_uint32(s + 0 * pitch - 2, _mm_cvtsi128_si32(ps1ps0));
ps1ps0 = _mm_srli_si128(ps1ps0, 4);
- storeu_uint32(s + 1 * p - 2, _mm_cvtsi128_si32(ps1ps0));
+ storeu_uint32(s + 1 * pitch - 2, _mm_cvtsi128_si32(ps1ps0));
ps1ps0 = _mm_srli_si128(ps1ps0, 4);
- storeu_uint32(s + 2 * p - 2, _mm_cvtsi128_si32(ps1ps0));
+ storeu_uint32(s + 2 * pitch - 2, _mm_cvtsi128_si32(ps1ps0));
ps1ps0 = _mm_srli_si128(ps1ps0, 4);
- storeu_uint32(s + 3 * p - 2, _mm_cvtsi128_si32(ps1ps0));
+ storeu_uint32(s + 3 * pitch - 2, _mm_cvtsi128_si32(ps1ps0));
- storeu_uint32(s + 4 * p - 2, _mm_cvtsi128_si32(qs1qs0));
+ storeu_uint32(s + 4 * pitch - 2, _mm_cvtsi128_si32(qs1qs0));
qs1qs0 = _mm_srli_si128(qs1qs0, 4);
- storeu_uint32(s + 5 * p - 2, _mm_cvtsi128_si32(qs1qs0));
+ storeu_uint32(s + 5 * pitch - 2, _mm_cvtsi128_si32(qs1qs0));
qs1qs0 = _mm_srli_si128(qs1qs0, 4);
- storeu_uint32(s + 6 * p - 2, _mm_cvtsi128_si32(qs1qs0));
+ storeu_uint32(s + 6 * pitch - 2, _mm_cvtsi128_si32(qs1qs0));
qs1qs0 = _mm_srli_si128(qs1qs0, 4);
- storeu_uint32(s + 7 * p - 2, _mm_cvtsi128_si32(qs1qs0));
+ storeu_uint32(s + 7 * pitch - 2, _mm_cvtsi128_si32(qs1qs0));
}
-void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p,
- const unsigned char *_blimit,
- const unsigned char *_limit,
- const unsigned char *_thresh) {
+void vpx_lpf_horizontal_16_sse2(unsigned char *s, int pitch,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
const __m128i zero = _mm_set1_epi16(0);
const __m128i one = _mm_set1_epi8(1);
- const __m128i blimit = _mm_load_si128((const __m128i *)_blimit);
- const __m128i limit = _mm_load_si128((const __m128i *)_limit);
- const __m128i thresh = _mm_load_si128((const __m128i *)_thresh);
+ const __m128i blimit_v = _mm_load_si128((const __m128i *)blimit);
+ const __m128i limit_v = _mm_load_si128((const __m128i *)limit);
+ const __m128i thresh_v = _mm_load_si128((const __m128i *)thresh);
__m128i mask, hev, flat, flat2;
__m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1;
__m128i abs_p1p0;
- q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
+ q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * pitch));
q4p4 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p)));
- q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+ _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * pitch)));
+ q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * pitch));
q3p3 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p)));
- q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+ _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * pitch)));
+ q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * pitch));
q2p2 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p)));
- q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+ _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * pitch)));
+ q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * pitch));
q1p1 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * pitch)));
p1q1 = _mm_shuffle_epi32(q1p1, 78);
- q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+ q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * pitch));
q0p0 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * pitch)));
p0q0 = _mm_shuffle_epi32(q0p0, 78);
{
@@ -270,12 +268,12 @@ void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p,
abs_p0q0 = abs_diff(q0p0, p0q0);
abs_p1q1 = abs_diff(q1p1, p1q1);
flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_subs_epu8(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
- mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
mask = _mm_max_epu8(abs_p1p0, mask);
@@ -285,7 +283,7 @@ void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p,
work = _mm_max_epu8(abs_diff(q2p2, q1p1), abs_diff(q3p3, q2p2));
mask = _mm_max_epu8(work, mask);
mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
- mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_subs_epu8(mask, limit_v);
mask = _mm_cmpeq_epi8(mask, zero);
}
@@ -343,18 +341,18 @@ void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p,
flat = _mm_cmpeq_epi8(flat, zero);
flat = _mm_and_si128(flat, mask);
- q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
+ q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * pitch));
q5p5 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * pitch)));
- q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
+ q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * pitch));
q6p6 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * pitch)));
flat2 = _mm_max_epu8(abs_diff(q4p4, q0p0), abs_diff(q5p5, q0p0));
- q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
+ q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * pitch));
q7p7 = _mm_castps_si128(
- _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p)));
+ _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * pitch)));
work = _mm_max_epu8(abs_diff(q6p6, q0p0), abs_diff(q7p7, q0p0));
flat2 = _mm_max_epu8(work, flat2);
flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8));
@@ -521,44 +519,44 @@ void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p,
q6p6 = _mm_andnot_si128(flat2, q6p6);
flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6);
q6p6 = _mm_or_si128(q6p6, flat2_q6p6);
- _mm_storel_epi64((__m128i *)(s - 7 * p), q6p6);
- _mm_storeh_pi((__m64 *)(s + 6 * p), _mm_castsi128_ps(q6p6));
+ _mm_storel_epi64((__m128i *)(s - 7 * pitch), q6p6);
+ _mm_storeh_pi((__m64 *)(s + 6 * pitch), _mm_castsi128_ps(q6p6));
q5p5 = _mm_andnot_si128(flat2, q5p5);
flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5);
q5p5 = _mm_or_si128(q5p5, flat2_q5p5);
- _mm_storel_epi64((__m128i *)(s - 6 * p), q5p5);
- _mm_storeh_pi((__m64 *)(s + 5 * p), _mm_castsi128_ps(q5p5));
+ _mm_storel_epi64((__m128i *)(s - 6 * pitch), q5p5);
+ _mm_storeh_pi((__m64 *)(s + 5 * pitch), _mm_castsi128_ps(q5p5));
q4p4 = _mm_andnot_si128(flat2, q4p4);
flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4);
q4p4 = _mm_or_si128(q4p4, flat2_q4p4);
- _mm_storel_epi64((__m128i *)(s - 5 * p), q4p4);
- _mm_storeh_pi((__m64 *)(s + 4 * p), _mm_castsi128_ps(q4p4));
+ _mm_storel_epi64((__m128i *)(s - 5 * pitch), q4p4);
+ _mm_storeh_pi((__m64 *)(s + 4 * pitch), _mm_castsi128_ps(q4p4));
q3p3 = _mm_andnot_si128(flat2, q3p3);
flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3);
q3p3 = _mm_or_si128(q3p3, flat2_q3p3);
- _mm_storel_epi64((__m128i *)(s - 4 * p), q3p3);
- _mm_storeh_pi((__m64 *)(s + 3 * p), _mm_castsi128_ps(q3p3));
+ _mm_storel_epi64((__m128i *)(s - 4 * pitch), q3p3);
+ _mm_storeh_pi((__m64 *)(s + 3 * pitch), _mm_castsi128_ps(q3p3));
q2p2 = _mm_andnot_si128(flat2, q2p2);
flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2);
q2p2 = _mm_or_si128(q2p2, flat2_q2p2);
- _mm_storel_epi64((__m128i *)(s - 3 * p), q2p2);
- _mm_storeh_pi((__m64 *)(s + 2 * p), _mm_castsi128_ps(q2p2));
+ _mm_storel_epi64((__m128i *)(s - 3 * pitch), q2p2);
+ _mm_storeh_pi((__m64 *)(s + 2 * pitch), _mm_castsi128_ps(q2p2));
q1p1 = _mm_andnot_si128(flat2, q1p1);
flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1);
q1p1 = _mm_or_si128(q1p1, flat2_q1p1);
- _mm_storel_epi64((__m128i *)(s - 2 * p), q1p1);
- _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(q1p1));
+ _mm_storel_epi64((__m128i *)(s - 2 * pitch), q1p1);
+ _mm_storeh_pi((__m64 *)(s + 1 * pitch), _mm_castsi128_ps(q1p1));
q0p0 = _mm_andnot_si128(flat2, q0p0);
flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
- _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0);
- _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0));
+ _mm_storel_epi64((__m128i *)(s - 1 * pitch), q0p0);
+ _mm_storeh_pi((__m64 *)(s - 0 * pitch), _mm_castsi128_ps(q0p0));
}
}
@@ -592,15 +590,15 @@ static INLINE __m128i filter16_mask(const __m128i *const flat,
return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result);
}
-void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int p,
- const unsigned char *_blimit,
- const unsigned char *_limit,
- const unsigned char *_thresh) {
+void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int pitch,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
const __m128i zero = _mm_set1_epi16(0);
const __m128i one = _mm_set1_epi8(1);
- const __m128i blimit = _mm_load_si128((const __m128i *)_blimit);
- const __m128i limit = _mm_load_si128((const __m128i *)_limit);
- const __m128i thresh = _mm_load_si128((const __m128i *)_thresh);
+ const __m128i blimit_v = _mm_load_si128((const __m128i *)blimit);
+ const __m128i limit_v = _mm_load_si128((const __m128i *)limit);
+ const __m128i thresh_v = _mm_load_si128((const __m128i *)thresh);
__m128i mask, hev, flat, flat2;
__m128i p7, p6, p5;
__m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
@@ -610,22 +608,22 @@ void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int p,
__m128i max_abs_p1p0q1q0;
- p7 = _mm_loadu_si128((__m128i *)(s - 8 * p));
- p6 = _mm_loadu_si128((__m128i *)(s - 7 * p));
- p5 = _mm_loadu_si128((__m128i *)(s - 6 * p));
- p4 = _mm_loadu_si128((__m128i *)(s - 5 * p));
- p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
- p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
- p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
- p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
- q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
- q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
- q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
- q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
- q4 = _mm_loadu_si128((__m128i *)(s + 4 * p));
- q5 = _mm_loadu_si128((__m128i *)(s + 5 * p));
- q6 = _mm_loadu_si128((__m128i *)(s + 6 * p));
- q7 = _mm_loadu_si128((__m128i *)(s + 7 * p));
+ p7 = _mm_loadu_si128((__m128i *)(s - 8 * pitch));
+ p6 = _mm_loadu_si128((__m128i *)(s - 7 * pitch));
+ p5 = _mm_loadu_si128((__m128i *)(s - 6 * pitch));
+ p4 = _mm_loadu_si128((__m128i *)(s - 5 * pitch));
+ p3 = _mm_loadu_si128((__m128i *)(s - 4 * pitch));
+ p2 = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
+ p1 = _mm_loadu_si128((__m128i *)(s - 2 * pitch));
+ p0 = _mm_loadu_si128((__m128i *)(s - 1 * pitch));
+ q0 = _mm_loadu_si128((__m128i *)(s - 0 * pitch));
+ q1 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
+ q2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
+ q3 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
+ q4 = _mm_loadu_si128((__m128i *)(s + 4 * pitch));
+ q5 = _mm_loadu_si128((__m128i *)(s + 5 * pitch));
+ q6 = _mm_loadu_si128((__m128i *)(s + 6 * pitch));
+ q7 = _mm_loadu_si128((__m128i *)(s + 7 * pitch));
{
const __m128i abs_p1p0 = abs_diff(p1, p0);
@@ -639,7 +637,7 @@ void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int p,
abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
- mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
mask = _mm_max_epu8(max_abs_p1p0q1q0, mask);
@@ -649,7 +647,7 @@ void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int p,
mask = _mm_max_epu8(work, mask);
work = _mm_max_epu8(abs_diff(q2, q1), abs_diff(q3, q2));
mask = _mm_max_epu8(work, mask);
- mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_subs_epu8(mask, limit_v);
mask = _mm_cmpeq_epi8(mask, zero);
}
@@ -695,7 +693,7 @@ void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int p,
oq0 = _mm_xor_si128(q0, t80);
oq1 = _mm_xor_si128(q1, t80);
- hev = _mm_subs_epu8(max_abs_p1p0q1q0, thresh);
+ hev = _mm_subs_epu8(max_abs_p1p0q1q0, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
filt = _mm_and_si128(_mm_subs_epi8(op1, oq1), hev);
@@ -852,82 +850,82 @@ void vpx_lpf_horizontal_16_dual_sse2(unsigned char *s, int p,
f_hi = _mm_add_epi16(_mm_add_epi16(p5_hi, eight), f_hi);
p6 = filter16_mask(&flat2, &p6, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 7 * p), p6);
+ _mm_storeu_si128((__m128i *)(s - 7 * pitch), p6);
f_lo = filter_add2_sub2(&f_lo, &q1_lo, &p5_lo, &p6_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q1_hi, &p5_hi, &p6_hi, &p7_hi);
p5 = filter16_mask(&flat2, &p5, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 6 * p), p5);
+ _mm_storeu_si128((__m128i *)(s - 6 * pitch), p5);
f_lo = filter_add2_sub2(&f_lo, &q2_lo, &p4_lo, &p5_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q2_hi, &p4_hi, &p5_hi, &p7_hi);
p4 = filter16_mask(&flat2, &p4, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 5 * p), p4);
+ _mm_storeu_si128((__m128i *)(s - 5 * pitch), p4);
f_lo = filter_add2_sub2(&f_lo, &q3_lo, &p3_lo, &p4_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q3_hi, &p3_hi, &p4_hi, &p7_hi);
p3 = filter16_mask(&flat2, &p3, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 4 * p), p3);
+ _mm_storeu_si128((__m128i *)(s - 4 * pitch), p3);
f_lo = filter_add2_sub2(&f_lo, &q4_lo, &p2_lo, &p3_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q4_hi, &p2_hi, &p3_hi, &p7_hi);
op2 = filter16_mask(&flat2, &op2, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 3 * p), op2);
+ _mm_storeu_si128((__m128i *)(s - 3 * pitch), op2);
f_lo = filter_add2_sub2(&f_lo, &q5_lo, &p1_lo, &p2_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q5_hi, &p1_hi, &p2_hi, &p7_hi);
op1 = filter16_mask(&flat2, &op1, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 2 * p), op1);
+ _mm_storeu_si128((__m128i *)(s - 2 * pitch), op1);
f_lo = filter_add2_sub2(&f_lo, &q6_lo, &p0_lo, &p1_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q6_hi, &p0_hi, &p1_hi, &p7_hi);
op0 = filter16_mask(&flat2, &op0, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 1 * p), op0);
+ _mm_storeu_si128((__m128i *)(s - 1 * pitch), op0);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q0_lo, &p0_lo, &p7_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q0_hi, &p0_hi, &p7_hi);
oq0 = filter16_mask(&flat2, &oq0, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s - 0 * p), oq0);
+ _mm_storeu_si128((__m128i *)(s - 0 * pitch), oq0);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q1_lo, &p6_lo, &q0_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q1_hi, &p6_hi, &q0_hi);
oq1 = filter16_mask(&flat2, &oq1, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s + 1 * p), oq1);
+ _mm_storeu_si128((__m128i *)(s + 1 * pitch), oq1);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q2_lo, &p5_lo, &q1_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q2_hi, &p5_hi, &q1_hi);
oq2 = filter16_mask(&flat2, &oq2, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s + 2 * p), oq2);
+ _mm_storeu_si128((__m128i *)(s + 2 * pitch), oq2);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q3_lo, &p4_lo, &q2_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q3_hi, &p4_hi, &q2_hi);
q3 = filter16_mask(&flat2, &q3, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s + 3 * p), q3);
+ _mm_storeu_si128((__m128i *)(s + 3 * pitch), q3);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q4_lo, &p3_lo, &q3_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q4_hi, &p3_hi, &q3_hi);
q4 = filter16_mask(&flat2, &q4, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s + 4 * p), q4);
+ _mm_storeu_si128((__m128i *)(s + 4 * pitch), q4);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q5_lo, &p2_lo, &q4_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q5_hi, &p2_hi, &q4_hi);
q5 = filter16_mask(&flat2, &q5, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s + 5 * p), q5);
+ _mm_storeu_si128((__m128i *)(s + 5 * pitch), q5);
f_lo = filter_add2_sub2(&f_lo, &q7_lo, &q6_lo, &p1_lo, &q5_lo);
f_hi = filter_add2_sub2(&f_hi, &q7_hi, &q6_hi, &p1_hi, &q5_hi);
q6 = filter16_mask(&flat2, &q6, &f_lo, &f_hi);
- _mm_storeu_si128((__m128i *)(s + 6 * p), q6);
+ _mm_storeu_si128((__m128i *)(s + 6 * pitch), q6);
}
// wide flat
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
}
-void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
- const unsigned char *_blimit,
- const unsigned char *_limit,
- const unsigned char *_thresh) {
+void vpx_lpf_horizontal_8_sse2(unsigned char *s, int pitch,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
@@ -935,21 +933,21 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]);
DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
const __m128i zero = _mm_set1_epi16(0);
- const __m128i blimit = _mm_load_si128((const __m128i *)_blimit);
- const __m128i limit = _mm_load_si128((const __m128i *)_limit);
- const __m128i thresh = _mm_load_si128((const __m128i *)_thresh);
+ const __m128i blimit_v = _mm_load_si128((const __m128i *)blimit);
+ const __m128i limit_v = _mm_load_si128((const __m128i *)limit);
+ const __m128i thresh_v = _mm_load_si128((const __m128i *)thresh);
__m128i mask, hev, flat;
__m128i p3, p2, p1, p0, q0, q1, q2, q3;
__m128i q3p3, q2p2, q1p1, q0p0, p1q1, p0q0;
- q3p3 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 4 * p)),
- _mm_loadl_epi64((__m128i *)(s + 3 * p)));
- q2p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * p)),
- _mm_loadl_epi64((__m128i *)(s + 2 * p)));
- q1p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 2 * p)),
- _mm_loadl_epi64((__m128i *)(s + 1 * p)));
- q0p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 1 * p)),
- _mm_loadl_epi64((__m128i *)(s - 0 * p)));
+ q3p3 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 4 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s + 3 * pitch)));
+ q2p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s + 2 * pitch)));
+ q1p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 2 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s + 1 * pitch)));
+ q0p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 1 * pitch)),
+ _mm_loadl_epi64((__m128i *)(s - 0 * pitch)));
p1q1 = _mm_shuffle_epi32(q1p1, 78);
p0q0 = _mm_shuffle_epi32(q0p0, 78);
@@ -965,12 +963,12 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
abs_p0q0 = abs_diff(q0p0, p0q0);
abs_p1q1 = abs_diff(q1p1, p1q1);
flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
- hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_subs_epu8(flat, thresh_v);
hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
- mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit_v);
mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
// mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
mask = _mm_max_epu8(abs_p1p0, mask);
@@ -980,7 +978,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
work = _mm_max_epu8(abs_diff(q2p2, q1p1), abs_diff(q3p3, q2p2));
mask = _mm_max_epu8(work, mask);
mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
- mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_subs_epu8(mask, limit_v);
mask = _mm_cmpeq_epi8(mask, zero);
// flat_mask4
@@ -998,14 +996,22 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
unsigned char *src = s;
{
__m128i workp_a, workp_b, workp_shft;
- p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero);
- p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero);
- p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero);
- p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero);
- q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero);
- q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero);
- q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero);
- q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero);
+ p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * pitch)),
+ zero);
+ p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * pitch)),
+ zero);
+ p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * pitch)),
+ zero);
+ p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * pitch)),
+ zero);
+ q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * pitch)),
+ zero);
+ q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * pitch)),
+ zero);
+ q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * pitch)),
+ zero);
+ q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * pitch)),
+ zero);
workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1));
workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0);
@@ -1051,13 +1057,13 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
const __m128i t80 = _mm_set1_epi8(0x80);
const __m128i t1 = _mm_set1_epi8(0x1);
const __m128i ps1 =
- _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)), t80);
+ _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * pitch)), t80);
const __m128i ps0 =
- _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)), t80);
+ _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * pitch)), t80);
const __m128i qs0 =
- _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)), t80);
+ _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * pitch)), t80);
const __m128i qs1 =
- _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)), t80);
+ _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * pitch)), t80);
__m128i filt;
__m128i work_a;
__m128i filter1, filter2;
@@ -1103,7 +1109,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
q1 = _mm_and_si128(flat, q1);
q1 = _mm_or_si128(work_a, q1);
- work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
+ work_a = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
q2 = _mm_loadl_epi64((__m128i *)flat_oq2);
work_a = _mm_andnot_si128(flat, work_a);
q2 = _mm_and_si128(flat, q2);
@@ -1121,27 +1127,25 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p,
p1 = _mm_and_si128(flat, p1);
p1 = _mm_or_si128(work_a, p1);
- work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
+ work_a = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
p2 = _mm_loadl_epi64((__m128i *)flat_op2);
work_a = _mm_andnot_si128(flat, work_a);
p2 = _mm_and_si128(flat, p2);
p2 = _mm_or_si128(work_a, p2);
- _mm_storel_epi64((__m128i *)(s - 3 * p), p2);
- _mm_storel_epi64((__m128i *)(s - 2 * p), p1);
- _mm_storel_epi64((__m128i *)(s - 1 * p), p0);
- _mm_storel_epi64((__m128i *)(s + 0 * p), q0);
- _mm_storel_epi64((__m128i *)(s + 1 * p), q1);
- _mm_storel_epi64((__m128i *)(s + 2 * p), q2);
+ _mm_storel_epi64((__m128i *)(s - 3 * pitch), p2);
+ _mm_storel_epi64((__m128i *)(s - 2 * pitch), p1);
+ _mm_storel_epi64((__m128i *)(s - 1 * pitch), p0);
+ _mm_storel_epi64((__m128i *)(s + 0 * pitch), q0);
+ _mm_storel_epi64((__m128i *)(s + 1 * pitch), q1);
+ _mm_storel_epi64((__m128i *)(s + 2 * pitch), q2);
}
}
-void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
- const uint8_t *_limit0,
- const uint8_t *_thresh0,
- const uint8_t *_blimit1,
- const uint8_t *_limit1,
- const uint8_t *_thresh1) {
+void vpx_lpf_horizontal_8_dual_sse2(
+ uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
+ const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+ const uint8_t *thresh1) {
DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
@@ -1150,26 +1154,26 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
const __m128i zero = _mm_set1_epi16(0);
const __m128i blimit =
- _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_blimit0),
- _mm_load_si128((const __m128i *)_blimit1));
+ _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)blimit0),
+ _mm_load_si128((const __m128i *)blimit1));
const __m128i limit =
- _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_limit0),
- _mm_load_si128((const __m128i *)_limit1));
+ _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)limit0),
+ _mm_load_si128((const __m128i *)limit1));
const __m128i thresh =
- _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_thresh0),
- _mm_load_si128((const __m128i *)_thresh1));
+ _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)thresh0),
+ _mm_load_si128((const __m128i *)thresh1));
__m128i mask, hev, flat;
__m128i p3, p2, p1, p0, q0, q1, q2, q3;
- p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
- p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
- p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
- p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
- q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
- q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
- q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
- q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
+ p3 = _mm_loadu_si128((__m128i *)(s - 4 * pitch));
+ p2 = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
+ p1 = _mm_loadu_si128((__m128i *)(s - 2 * pitch));
+ p0 = _mm_loadu_si128((__m128i *)(s - 1 * pitch));
+ q0 = _mm_loadu_si128((__m128i *)(s - 0 * pitch));
+ q1 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
+ q2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
+ q3 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
{
const __m128i abs_p1p0 =
_mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1));
@@ -1228,14 +1232,22 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
do {
__m128i workp_a, workp_b, workp_shft;
- p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero);
- p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero);
- p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero);
- p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero);
- q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero);
- q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero);
- q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero);
- q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero);
+ p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * pitch)),
+ zero);
+ p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * pitch)),
+ zero);
+ p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * pitch)),
+ zero);
+ p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * pitch)),
+ zero);
+ q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * pitch)),
+ zero);
+ q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * pitch)),
+ zero);
+ q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * pitch)),
+ zero);
+ q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * pitch)),
+ zero);
workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1));
workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0);
@@ -1287,13 +1299,13 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
const __m128i t7f = _mm_set1_epi8(0x7f);
const __m128i ps1 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * pitch)), t80);
const __m128i ps0 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * pitch)), t80);
const __m128i qs0 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * pitch)), t80);
const __m128i qs1 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * pitch)), t80);
__m128i filt;
__m128i work_a;
__m128i filter1, filter2;
@@ -1345,7 +1357,7 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
q1 = _mm_and_si128(flat, q1);
q1 = _mm_or_si128(work_a, q1);
- work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
+ work_a = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
q2 = _mm_load_si128((__m128i *)flat_oq2);
work_a = _mm_andnot_si128(flat, work_a);
q2 = _mm_and_si128(flat, q2);
@@ -1363,49 +1375,49 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0,
p1 = _mm_and_si128(flat, p1);
p1 = _mm_or_si128(work_a, p1);
- work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
+ work_a = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
p2 = _mm_load_si128((__m128i *)flat_op2);
work_a = _mm_andnot_si128(flat, work_a);
p2 = _mm_and_si128(flat, p2);
p2 = _mm_or_si128(work_a, p2);
- _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
- _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
- _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
- _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
- _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
- _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
+ _mm_storeu_si128((__m128i *)(s - 3 * pitch), p2);
+ _mm_storeu_si128((__m128i *)(s - 2 * pitch), p1);
+ _mm_storeu_si128((__m128i *)(s - 1 * pitch), p0);
+ _mm_storeu_si128((__m128i *)(s + 0 * pitch), q0);
+ _mm_storeu_si128((__m128i *)(s + 1 * pitch), q1);
+ _mm_storeu_si128((__m128i *)(s + 2 * pitch), q2);
}
}
-void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
- const unsigned char *_blimit0,
- const unsigned char *_limit0,
- const unsigned char *_thresh0,
- const unsigned char *_blimit1,
- const unsigned char *_limit1,
- const unsigned char *_thresh1) {
+void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int pitch,
+ const unsigned char *blimit0,
+ const unsigned char *limit0,
+ const unsigned char *thresh0,
+ const unsigned char *blimit1,
+ const unsigned char *limit1,
+ const unsigned char *thresh1) {
const __m128i blimit =
- _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_blimit0),
- _mm_load_si128((const __m128i *)_blimit1));
+ _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)blimit0),
+ _mm_load_si128((const __m128i *)blimit1));
const __m128i limit =
- _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_limit0),
- _mm_load_si128((const __m128i *)_limit1));
+ _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)limit0),
+ _mm_load_si128((const __m128i *)limit1));
const __m128i thresh =
- _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_thresh0),
- _mm_load_si128((const __m128i *)_thresh1));
+ _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)thresh0),
+ _mm_load_si128((const __m128i *)thresh1));
const __m128i zero = _mm_set1_epi16(0);
__m128i p3, p2, p1, p0, q0, q1, q2, q3;
__m128i mask, hev, flat;
- p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
- p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
- p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
- p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
- q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
- q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
- q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
- q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
+ p3 = _mm_loadu_si128((__m128i *)(s - 4 * pitch));
+ p2 = _mm_loadu_si128((__m128i *)(s - 3 * pitch));
+ p1 = _mm_loadu_si128((__m128i *)(s - 2 * pitch));
+ p0 = _mm_loadu_si128((__m128i *)(s - 1 * pitch));
+ q0 = _mm_loadu_si128((__m128i *)(s - 0 * pitch));
+ q1 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
+ q2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
+ q3 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
// filter_mask and hev_mask
{
@@ -1456,13 +1468,13 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
const __m128i t7f = _mm_set1_epi8(0x7f);
const __m128i ps1 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * pitch)), t80);
const __m128i ps0 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * pitch)), t80);
const __m128i qs0 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * pitch)), t80);
const __m128i qs1 =
- _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80);
+ _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * pitch)), t80);
__m128i filt;
__m128i work_a;
__m128i filter1, filter2;
@@ -1507,10 +1519,10 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p,
p0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
p1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
- _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
- _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
- _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
- _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
+ _mm_storeu_si128((__m128i *)(s - 2 * pitch), p1);
+ _mm_storeu_si128((__m128i *)(s - 1 * pitch), p0);
+ _mm_storeu_si128((__m128i *)(s + 0 * pitch), q0);
+ _mm_storeu_si128((__m128i *)(s + 1 * pitch), q1);
}
}
@@ -1650,7 +1662,7 @@ static INLINE void transpose(unsigned char *src[], int in_p,
} while (++idx8x8 < num_8x8_to_transpose);
}
-void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
+void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
@@ -1659,7 +1671,7 @@ void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
unsigned char *dst[2];
// Transpose 8x16
- transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
+ transpose8x16(s - 4, s - 4 + pitch * 8, pitch, t_dst, 16);
// Loop filtering
vpx_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
@@ -1667,13 +1679,13 @@ void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
src[0] = t_dst;
src[1] = t_dst + 8;
dst[0] = s - 4;
- dst[1] = s - 4 + p * 8;
+ dst[1] = s - 4 + pitch * 8;
// Transpose back
- transpose(src, 16, dst, p, 2);
+ transpose(src, 16, dst, pitch, 2);
}
-void vpx_lpf_vertical_8_sse2(unsigned char *s, int p,
+void vpx_lpf_vertical_8_sse2(unsigned char *s, int pitch,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh) {
@@ -1685,7 +1697,7 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p,
src[0] = s - 4;
dst[0] = t_dst;
- transpose(src, p, dst, 8, 1);
+ transpose(src, pitch, dst, 8, 1);
// Loop filtering
vpx_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh);
@@ -1694,10 +1706,10 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p,
dst[0] = s - 4;
// Transpose back
- transpose(src, 8, dst, p, 1);
+ transpose(src, 8, dst, pitch, 1);
}
-void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
+void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
const uint8_t *thresh1) {
@@ -1706,7 +1718,7 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
unsigned char *dst[2];
// Transpose 8x16
- transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
+ transpose8x16(s - 4, s - 4 + pitch * 8, pitch, t_dst, 16);
// Loop filtering
vpx_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0,
@@ -1715,13 +1727,13 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0,
src[1] = t_dst + 8;
dst[0] = s - 4;
- dst[1] = s - 4 + p * 8;
+ dst[1] = s - 4 + pitch * 8;
// Transpose back
- transpose(src, 16, dst, p, 2);
+ transpose(src, 16, dst, pitch, 2);
}
-void vpx_lpf_vertical_16_sse2(unsigned char *s, int p,
+void vpx_lpf_vertical_16_sse2(unsigned char *s, int pitch,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh) {
@@ -1735,7 +1747,7 @@ void vpx_lpf_vertical_16_sse2(unsigned char *s, int p,
dst[1] = t_dst + 8 * 8;
// Transpose 16x8
- transpose(src, p, dst, 8, 2);
+ transpose(src, pitch, dst, 8, 2);
// Loop filtering
vpx_lpf_horizontal_16_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh);
@@ -1746,22 +1758,22 @@ void vpx_lpf_vertical_16_sse2(unsigned char *s, int p,
dst[1] = s;
// Transpose back
- transpose(src, 8, dst, p, 2);
+ transpose(src, 8, dst, pitch, 2);
}
-void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int p,
+void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int pitch,
const uint8_t *blimit, const uint8_t *limit,
const uint8_t *thresh) {
DECLARE_ALIGNED(16, unsigned char, t_dst[256]);
// Transpose 16x16
- transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16);
- transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
+ transpose8x16(s - 8, s - 8 + 8 * pitch, pitch, t_dst, 16);
+ transpose8x16(s, s + 8 * pitch, pitch, t_dst + 8 * 16, 16);
// Loop filtering
vpx_lpf_horizontal_16_dual_sse2(t_dst + 8 * 16, 16, blimit, limit, thresh);
// Transpose back
- transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p);
- transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * p, p);
+ transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, pitch);
+ transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * pitch, pitch);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h
index 5209a062882..258ab38e606 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/mem_sse2.h
@@ -26,6 +26,17 @@ static INLINE uint32_t loadu_uint32(const void *src) {
return v;
}
+static INLINE __m128i load_unaligned_u32(const void *a) {
+ uint32_t val;
+ memcpy(&val, a, sizeof(val));
+ return _mm_cvtsi32_si128(val);
+}
+
+static INLINE void store_unaligned_u32(void *const a, const __m128i v) {
+ const uint32_t val = _mm_cvtsi128_si32(v);
+ memcpy(a, &val, sizeof(val));
+}
+
#define mm_storelu(dst, v) memcpy((dst), (const char *)&(v), 8)
#define mm_storehu(dst, v) memcpy((dst), (const char *)&(v) + 8, 8)
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/post_proc_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/post_proc_sse2.c
new file mode 100644
index 00000000000..d1029afc4fe
--- /dev/null
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/post_proc_sse2.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h>
+
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/x86/mem_sse2.h"
+
+extern const int16_t vpx_rv[];
+
+void vpx_mbpost_proc_down_sse2(unsigned char *dst, int pitch, int rows,
+ int cols, int flimit) {
+ int col;
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i f = _mm_set1_epi32(flimit);
+ DECLARE_ALIGNED(16, int16_t, above_context[8 * 8]);
+
+ // 8 columns are processed at a time.
+ // If rows is less than 8 the bottom border extension fails.
+ assert(cols % 8 == 0);
+ assert(rows >= 8);
+
+ for (col = 0; col < cols; col += 8) {
+ int row, i;
+ __m128i s = _mm_loadl_epi64((__m128i *)dst);
+ __m128i sum, sumsq_0, sumsq_1;
+ __m128i tmp_0, tmp_1;
+ __m128i below_context;
+
+ s = _mm_unpacklo_epi8(s, zero);
+
+ for (i = 0; i < 8; ++i) {
+ _mm_store_si128((__m128i *)above_context + i, s);
+ }
+
+ // sum *= 9
+ sum = _mm_slli_epi16(s, 3);
+ sum = _mm_add_epi16(s, sum);
+
+ // sum^2 * 9 == (sum * 9) * sum
+ tmp_0 = _mm_mullo_epi16(sum, s);
+ tmp_1 = _mm_mulhi_epi16(sum, s);
+
+ sumsq_0 = _mm_unpacklo_epi16(tmp_0, tmp_1);
+ sumsq_1 = _mm_unpackhi_epi16(tmp_0, tmp_1);
+
+ // Prime sum/sumsq
+ for (i = 1; i <= 6; ++i) {
+ __m128i a = _mm_loadl_epi64((__m128i *)(dst + i * pitch));
+ a = _mm_unpacklo_epi8(a, zero);
+ sum = _mm_add_epi16(sum, a);
+ a = _mm_mullo_epi16(a, a);
+ sumsq_0 = _mm_add_epi32(sumsq_0, _mm_unpacklo_epi16(a, zero));
+ sumsq_1 = _mm_add_epi32(sumsq_1, _mm_unpackhi_epi16(a, zero));
+ }
+
+ for (row = 0; row < rows + 8; row++) {
+ const __m128i above =
+ _mm_load_si128((__m128i *)above_context + (row & 7));
+ __m128i this_row = _mm_loadl_epi64((__m128i *)(dst + row * pitch));
+ __m128i above_sq, below_sq;
+ __m128i mask_0, mask_1;
+ __m128i multmp_0, multmp_1;
+ __m128i rv;
+ __m128i out;
+
+ this_row = _mm_unpacklo_epi8(this_row, zero);
+
+ if (row + 7 < rows) {
+ // Instead of copying the end context we just stop loading when we get
+ // to the last one.
+ below_context = _mm_loadl_epi64((__m128i *)(dst + (row + 7) * pitch));
+ below_context = _mm_unpacklo_epi8(below_context, zero);
+ }
+
+ sum = _mm_sub_epi16(sum, above);
+ sum = _mm_add_epi16(sum, below_context);
+
+ // context^2 fits in 16 bits. Don't need to mulhi and combine. Just zero
+ // extend. Unfortunately we can't do below_sq - above_sq in 16 bits
+ // because x86 does not have unpack with sign extension.
+ above_sq = _mm_mullo_epi16(above, above);
+ sumsq_0 = _mm_sub_epi32(sumsq_0, _mm_unpacklo_epi16(above_sq, zero));
+ sumsq_1 = _mm_sub_epi32(sumsq_1, _mm_unpackhi_epi16(above_sq, zero));
+
+ below_sq = _mm_mullo_epi16(below_context, below_context);
+ sumsq_0 = _mm_add_epi32(sumsq_0, _mm_unpacklo_epi16(below_sq, zero));
+ sumsq_1 = _mm_add_epi32(sumsq_1, _mm_unpackhi_epi16(below_sq, zero));
+
+ // sumsq * 16 - sumsq == sumsq * 15
+ mask_0 = _mm_slli_epi32(sumsq_0, 4);
+ mask_0 = _mm_sub_epi32(mask_0, sumsq_0);
+ mask_1 = _mm_slli_epi32(sumsq_1, 4);
+ mask_1 = _mm_sub_epi32(mask_1, sumsq_1);
+
+ multmp_0 = _mm_mullo_epi16(sum, sum);
+ multmp_1 = _mm_mulhi_epi16(sum, sum);
+
+ mask_0 = _mm_sub_epi32(mask_0, _mm_unpacklo_epi16(multmp_0, multmp_1));
+ mask_1 = _mm_sub_epi32(mask_1, _mm_unpackhi_epi16(multmp_0, multmp_1));
+
+ // mask - f gives a negative value when mask < f
+ mask_0 = _mm_sub_epi32(mask_0, f);
+ mask_1 = _mm_sub_epi32(mask_1, f);
+
+ // Shift the sign bit down to create a mask
+ mask_0 = _mm_srai_epi32(mask_0, 31);
+ mask_1 = _mm_srai_epi32(mask_1, 31);
+
+ mask_0 = _mm_packs_epi32(mask_0, mask_1);
+
+ rv = _mm_loadu_si128((__m128i const *)(vpx_rv + (row & 127)));
+
+ mask_1 = _mm_add_epi16(rv, sum);
+ mask_1 = _mm_add_epi16(mask_1, this_row);
+ mask_1 = _mm_srai_epi16(mask_1, 4);
+
+ mask_1 = _mm_and_si128(mask_0, mask_1);
+ mask_0 = _mm_andnot_si128(mask_0, this_row);
+ out = _mm_or_si128(mask_1, mask_0);
+
+ _mm_storel_epi64((__m128i *)(dst + row * pitch),
+ _mm_packus_epi16(out, zero));
+
+ _mm_store_si128((__m128i *)above_context + ((row + 8) & 7), this_row);
+ }
+
+ dst += 8;
+ }
+}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_avx.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_avx.c
index 6f4489004dc..d0a8d514eb1 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_avx.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_avx.c
@@ -17,15 +17,15 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
-#include "vpx_dsp/x86/quantize_x86.h"
+#include "vpx_dsp/x86/quantize_sse2.h"
void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
const __m128i zero = _mm_setzero_si128();
const __m256i big_zero = _mm256_setzero_si256();
int index;
@@ -37,7 +37,7 @@ void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
__m128i all_zero;
__m128i eob = zero, eob0;
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
@@ -97,8 +97,7 @@ void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff0, dqcoeff_ptr);
store_tran_low(coeff1, dqcoeff_ptr + 8);
- eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0,
- zero);
+ eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
}
// AC only loop.
@@ -141,20 +140,22 @@ void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff0, dqcoeff_ptr + index);
store_tran_low(coeff1, dqcoeff_ptr + index + 8);
- eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
- index, zero);
+ eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, index,
+ zero);
eob = _mm_max_epi16(eob, eob0);
}
*eob_ptr = accumulate_eob(eob);
}
-void vpx_quantize_b_32x32_avx(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void vpx_quantize_b_32x32_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
const __m128i zero = _mm_setzero_si128();
const __m128i one = _mm_set1_epi16(1);
const __m256i big_zero = _mm256_setzero_si256();
@@ -167,7 +168,7 @@ void vpx_quantize_b_32x32_avx(
__m128i all_zero;
__m128i eob = zero, eob0;
- (void)scan_ptr;
+ (void)scan;
(void)n_coeffs;
(void)skip_block;
assert(!skip_block);
@@ -253,8 +254,7 @@ void vpx_quantize_b_32x32_avx(
store_tran_low(coeff0, dqcoeff_ptr);
store_tran_low(coeff1, dqcoeff_ptr + 8);
- eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0,
- zero);
+ eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
}
// AC only loop.
@@ -306,8 +306,8 @@ void vpx_quantize_b_32x32_avx(
store_tran_low(coeff0, dqcoeff_ptr + index);
store_tran_low(coeff1, dqcoeff_ptr + index + 8);
- eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
- index, zero);
+ eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, index,
+ zero);
eob = _mm_max_epi16(eob, eob0);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c
index c020b398c3b..fa098a3a0a5 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.c
@@ -15,15 +15,15 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
-#include "vpx_dsp/x86/quantize_x86.h"
+#include "vpx_dsp/x86/quantize_sse2.h"
void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr, const int16_t *scan_ptr,
- const int16_t *iscan_ptr) {
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
const __m128i zero = _mm_setzero_si128();
int index = 16;
@@ -33,7 +33,7 @@ void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
__m128i cmp_mask0, cmp_mask1;
__m128i eob, eob0;
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
@@ -81,8 +81,7 @@ void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff0, dqcoeff_ptr);
store_tran_low(coeff1, dqcoeff_ptr + 8);
- eob =
- scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0, zero);
+ eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
// AC only loop.
while (index < n_coeffs) {
@@ -115,8 +114,8 @@ void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff0, dqcoeff_ptr + index);
store_tran_low(coeff1, dqcoeff_ptr + index + 8);
- eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
- index, zero);
+ eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, index,
+ zero);
eob = _mm_max_epi16(eob, eob0);
index += 16;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_x86.h b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.h
index bb9e32f71eb..1f858781087 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_x86.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_sse2.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VPX_VPX_DSP_X86_QUANTIZE_X86_H_
-#define VPX_VPX_DSP_X86_QUANTIZE_X86_H_
+#ifndef VPX_VPX_DSP_X86_QUANTIZE_SSE2_H_
+#define VPX_VPX_DSP_X86_QUANTIZE_SSE2_H_
#include <emmintrin.h>
@@ -48,17 +48,17 @@ static INLINE __m128i calculate_dqcoeff(__m128i qcoeff, __m128i dequant) {
return _mm_mullo_epi16(qcoeff, dequant);
}
-// Scan 16 values for eob reference in scan_ptr. Use masks (-1) from comparing
-// to zbin to add 1 to the index in 'scan'.
+// Scan 16 values for eob reference in scan. Use masks (-1) from comparing to
+// zbin to add 1 to the index in 'scan'.
static INLINE __m128i scan_for_eob(__m128i *coeff0, __m128i *coeff1,
const __m128i zbin_mask0,
const __m128i zbin_mask1,
- const int16_t *scan_ptr, const int index,
+ const int16_t *scan, const int index,
const __m128i zero) {
const __m128i zero_coeff0 = _mm_cmpeq_epi16(*coeff0, zero);
const __m128i zero_coeff1 = _mm_cmpeq_epi16(*coeff1, zero);
- __m128i scan0 = _mm_load_si128((const __m128i *)(scan_ptr + index));
- __m128i scan1 = _mm_load_si128((const __m128i *)(scan_ptr + index + 8));
+ __m128i scan0 = _mm_load_si128((const __m128i *)(scan + index));
+ __m128i scan1 = _mm_load_si128((const __m128i *)(scan + index + 8));
__m128i eob0, eob1;
// Add one to convert from indices to counts
scan0 = _mm_sub_epi16(scan0, zbin_mask0);
@@ -79,4 +79,4 @@ static INLINE int16_t accumulate_eob(__m128i eob) {
return _mm_extract_epi16(eob, 1);
}
-#endif // VPX_VPX_DSP_X86_QUANTIZE_X86_H_
+#endif // VPX_VPX_DSP_X86_QUANTIZE_SSE2_H_
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_ssse3.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_ssse3.c
index 3f528e1a978..e96f9f99058 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_ssse3.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/quantize_ssse3.c
@@ -14,7 +14,7 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
-#include "vpx_dsp/x86/quantize_x86.h"
+#include "vpx_dsp/x86/quantize_sse2.h"
void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
@@ -22,7 +22,7 @@ void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+ const int16_t *scan, const int16_t *iscan) {
const __m128i zero = _mm_setzero_si128();
int index = 16;
@@ -32,7 +32,7 @@ void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
__m128i cmp_mask0, cmp_mask1;
__m128i eob, eob0;
- (void)scan_ptr;
+ (void)scan;
(void)skip_block;
assert(!skip_block);
@@ -74,8 +74,7 @@ void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff0, dqcoeff_ptr);
store_tran_low(coeff1, dqcoeff_ptr + 8);
- eob =
- scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0, zero);
+ eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
// AC only loop.
while (index < n_coeffs) {
@@ -106,8 +105,8 @@ void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
store_tran_low(coeff0, dqcoeff_ptr + index);
store_tran_low(coeff1, dqcoeff_ptr + index + 8);
- eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
- index, zero);
+ eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, index,
+ zero);
eob = _mm_max_epi16(eob, eob0);
index += 16;
@@ -116,12 +115,14 @@ void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
*eob_ptr = accumulate_eob(eob);
}
-void vpx_quantize_b_32x32_ssse3(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+void vpx_quantize_b_32x32_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
const __m128i zero = _mm_setzero_si128();
const __m128i one = _mm_set1_epi16(1);
int index;
@@ -133,7 +134,7 @@ void vpx_quantize_b_32x32_ssse3(
__m128i all_zero;
__m128i eob = zero, eob0;
- (void)scan_ptr;
+ (void)scan;
(void)n_coeffs;
(void)skip_block;
assert(!skip_block);
@@ -226,8 +227,7 @@ void vpx_quantize_b_32x32_ssse3(
store_tran_low(coeff0, dqcoeff_ptr);
store_tran_low(coeff1, dqcoeff_ptr + 8);
- eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr, 0,
- zero);
+ eob = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, 0, zero);
}
// AC only loop.
@@ -283,8 +283,8 @@ void vpx_quantize_b_32x32_ssse3(
store_tran_low(coeff0, dqcoeff_ptr + index);
store_tran_low(coeff1, dqcoeff_ptr + index + 8);
- eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan_ptr,
- index, zero);
+ eob0 = scan_for_eob(&coeff0, &coeff1, cmp_mask0, cmp_mask1, iscan, index,
+ zero);
eob = _mm_max_epi16(eob, eob0);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx2.c
index 2c6b36f17d4..b18fecf709c 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx2.c
@@ -12,26 +12,26 @@
#include "vpx/vpx_integer.h"
static INLINE void calc_final(const __m256i *const sums /*[4]*/,
- uint32_t res[4]) {
+ uint32_t sad_array[4]) {
const __m256i t0 = _mm256_hadd_epi32(sums[0], sums[1]);
const __m256i t1 = _mm256_hadd_epi32(sums[2], sums[3]);
const __m256i t2 = _mm256_hadd_epi32(t0, t1);
const __m128i sum = _mm_add_epi32(_mm256_castsi256_si128(t2),
_mm256_extractf128_si256(t2, 1));
- _mm_storeu_si128((__m128i *)res, sum);
+ _mm_storeu_si128((__m128i *)sad_array, sum);
}
-void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
+void vpx_sad32x32x4d_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
+ uint32_t sad_array[4]) {
int i;
const uint8_t *refs[4];
__m256i sums[4];
- refs[0] = ref[0];
- refs[1] = ref[1];
- refs[2] = ref[2];
- refs[3] = ref[3];
+ refs[0] = ref_array[0];
+ refs[1] = ref_array[1];
+ refs[2] = ref_array[2];
+ refs[3] = ref_array[3];
sums[0] = _mm256_setzero_si256();
sums[1] = _mm256_setzero_si256();
sums[2] = _mm256_setzero_si256();
@@ -40,46 +40,46 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
for (i = 0; i < 32; i++) {
__m256i r[4];
- // load src and all refs
- const __m256i s = _mm256_load_si256((const __m256i *)src);
+ // load src and all ref[]
+ const __m256i s = _mm256_load_si256((const __m256i *)src_ptr);
r[0] = _mm256_loadu_si256((const __m256i *)refs[0]);
r[1] = _mm256_loadu_si256((const __m256i *)refs[1]);
r[2] = _mm256_loadu_si256((const __m256i *)refs[2]);
r[3] = _mm256_loadu_si256((const __m256i *)refs[3]);
- // sum of the absolute differences between every ref-i to src
+ // sum of the absolute differences between every ref[] to src
r[0] = _mm256_sad_epu8(r[0], s);
r[1] = _mm256_sad_epu8(r[1], s);
r[2] = _mm256_sad_epu8(r[2], s);
r[3] = _mm256_sad_epu8(r[3], s);
- // sum every ref-i
+ // sum every ref[]
sums[0] = _mm256_add_epi32(sums[0], r[0]);
sums[1] = _mm256_add_epi32(sums[1], r[1]);
sums[2] = _mm256_add_epi32(sums[2], r[2]);
sums[3] = _mm256_add_epi32(sums[3], r[3]);
- src += src_stride;
+ src_ptr += src_stride;
refs[0] += ref_stride;
refs[1] += ref_stride;
refs[2] += ref_stride;
refs[3] += ref_stride;
}
- calc_final(sums, res);
+ calc_final(sums, sad_array);
}
-void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
+void vpx_sad64x64x4d_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
+ uint32_t sad_array[4]) {
__m256i sums[4];
int i;
const uint8_t *refs[4];
- refs[0] = ref[0];
- refs[1] = ref[1];
- refs[2] = ref[2];
- refs[3] = ref[3];
+ refs[0] = ref_array[0];
+ refs[1] = ref_array[1];
+ refs[2] = ref_array[2];
+ refs[3] = ref_array[3];
sums[0] = _mm256_setzero_si256();
sums[1] = _mm256_setzero_si256();
sums[2] = _mm256_setzero_si256();
@@ -87,9 +87,9 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
for (i = 0; i < 64; i++) {
__m256i r_lo[4], r_hi[4];
- // load 64 bytes from src and all refs
- const __m256i s_lo = _mm256_load_si256((const __m256i *)src);
- const __m256i s_hi = _mm256_load_si256((const __m256i *)(src + 32));
+ // load 64 bytes from src and all ref[]
+ const __m256i s_lo = _mm256_load_si256((const __m256i *)src_ptr);
+ const __m256i s_hi = _mm256_load_si256((const __m256i *)(src_ptr + 32));
r_lo[0] = _mm256_loadu_si256((const __m256i *)refs[0]);
r_hi[0] = _mm256_loadu_si256((const __m256i *)(refs[0] + 32));
r_lo[1] = _mm256_loadu_si256((const __m256i *)refs[1]);
@@ -99,7 +99,7 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
r_lo[3] = _mm256_loadu_si256((const __m256i *)refs[3]);
r_hi[3] = _mm256_loadu_si256((const __m256i *)(refs[3] + 32));
- // sum of the absolute differences between every ref-i to src
+ // sum of the absolute differences between every ref[] to src
r_lo[0] = _mm256_sad_epu8(r_lo[0], s_lo);
r_lo[1] = _mm256_sad_epu8(r_lo[1], s_lo);
r_lo[2] = _mm256_sad_epu8(r_lo[2], s_lo);
@@ -109,7 +109,7 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
r_hi[2] = _mm256_sad_epu8(r_hi[2], s_hi);
r_hi[3] = _mm256_sad_epu8(r_hi[3], s_hi);
- // sum every ref-i
+ // sum every ref[]
sums[0] = _mm256_add_epi32(sums[0], r_lo[0]);
sums[1] = _mm256_add_epi32(sums[1], r_lo[1]);
sums[2] = _mm256_add_epi32(sums[2], r_lo[2]);
@@ -119,12 +119,12 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
sums[2] = _mm256_add_epi32(sums[2], r_hi[2]);
sums[3] = _mm256_add_epi32(sums[3], r_hi[3]);
- src += src_stride;
+ src_ptr += src_stride;
refs[0] += ref_stride;
refs[1] += ref_stride;
refs[2] += ref_stride;
refs[3] += ref_stride;
}
- calc_final(sums, res);
+ calc_final(sums, sad_array);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx512.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx512.c
index 5f2ab6ea71b..4c5d70464de 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx512.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/sad4d_avx512.c
@@ -11,8 +11,8 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
-void vpx_sad64x64x4d_avx512(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
+void vpx_sad64x64x4d_avx512(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_array[4], int ref_stride,
uint32_t res[4]) {
__m512i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
__m512i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
@@ -20,33 +20,33 @@ void vpx_sad64x64x4d_avx512(const uint8_t *src, int src_stride,
int i;
const uint8_t *ref0, *ref1, *ref2, *ref3;
- ref0 = ref[0];
- ref1 = ref[1];
- ref2 = ref[2];
- ref3 = ref[3];
+ ref0 = ref_array[0];
+ ref1 = ref_array[1];
+ ref2 = ref_array[2];
+ ref3 = ref_array[3];
sum_ref0 = _mm512_set1_epi16(0);
sum_ref1 = _mm512_set1_epi16(0);
sum_ref2 = _mm512_set1_epi16(0);
sum_ref3 = _mm512_set1_epi16(0);
for (i = 0; i < 64; i++) {
- // load src and all refs
- src_reg = _mm512_loadu_si512((const __m512i *)src);
+ // load src and all ref[]
+ src_reg = _mm512_loadu_si512((const __m512i *)src_ptr);
ref0_reg = _mm512_loadu_si512((const __m512i *)ref0);
ref1_reg = _mm512_loadu_si512((const __m512i *)ref1);
ref2_reg = _mm512_loadu_si512((const __m512i *)ref2);
ref3_reg = _mm512_loadu_si512((const __m512i *)ref3);
- // sum of the absolute differences between every ref-i to src
+ // sum of the absolute differences between every ref[] to src
ref0_reg = _mm512_sad_epu8(ref0_reg, src_reg);
ref1_reg = _mm512_sad_epu8(ref1_reg, src_reg);
ref2_reg = _mm512_sad_epu8(ref2_reg, src_reg);
ref3_reg = _mm512_sad_epu8(ref3_reg, src_reg);
- // sum every ref-i
+ // sum every ref[]
sum_ref0 = _mm512_add_epi32(sum_ref0, ref0_reg);
sum_ref1 = _mm512_add_epi32(sum_ref1, ref1_reg);
sum_ref2 = _mm512_add_epi32(sum_ref2, ref2_reg);
sum_ref3 = _mm512_add_epi32(sum_ref3, ref3_reg);
- src += src_stride;
+ src_ptr += src_stride;
ref0 += ref_stride;
ref1 += ref_stride;
ref2 += ref_stride;
@@ -55,7 +55,7 @@ void vpx_sad64x64x4d_avx512(const uint8_t *src, int src_stride,
{
__m256i sum256;
__m128i sum128;
- // in sum_ref-i the result is saved in the first 4 bytes
+ // in sum_ref[] the result is saved in the first 4 bytes
// the other 4 bytes are zeroed.
// sum_ref1 and sum_ref3 are shifted left by 4 bytes
sum_ref1 = _mm512_bslli_epi128(sum_ref1, 4);
@@ -65,7 +65,7 @@ void vpx_sad64x64x4d_avx512(const uint8_t *src, int src_stride,
sum_ref0 = _mm512_or_si512(sum_ref0, sum_ref1);
sum_ref2 = _mm512_or_si512(sum_ref2, sum_ref3);
- // merge every 64 bit from each sum_ref-i
+ // merge every 64 bit from each sum_ref[]
sum_mlow = _mm512_unpacklo_epi64(sum_ref0, sum_ref2);
sum_mhigh = _mm512_unpackhi_epi64(sum_ref0, sum_ref2);
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm
index 88967a3f14b..5adb9b8c3da 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm
@@ -41,12 +41,12 @@ SECTION .text
; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride,
; int x_offset, int y_offset,
-; const uint8_t *dst, ptrdiff_t dst_stride,
+; const uint8_t *ref, ptrdiff_t ref_stride,
; int height, unsigned int *sse);
;
; This function returns the SE and stores SSE in the given pointer.
-%macro SUM_SSE 6 ; src1, dst1, src2, dst2, sum, sse
+%macro SUM_SSE 6 ; src1, ref1, src2, ref2, sum, sse
psubw %3, %4
psubw %1, %2
paddw %5, %3
@@ -117,12 +117,12 @@ SECTION .text
%if ARCH_X86_64
%if %2 == 1 ; avg
cglobal sub_pixel_avg_variance%1xh, 9, 10, 13, src, src_stride, \
- x_offset, y_offset, dst, dst_stride, \
- sec, sec_stride, height, sse
- %define sec_str sec_strideq
+ x_offset, y_offset, ref, ref_stride, \
+ second_pred, second_stride, height, sse
+ %define second_str second_strideq
%else
cglobal sub_pixel_variance%1xh, 7, 8, 13, src, src_stride, \
- x_offset, y_offset, dst, dst_stride, \
+ x_offset, y_offset, ref, ref_stride, \
height, sse
%endif
%define block_height heightd
@@ -131,13 +131,13 @@ SECTION .text
%if CONFIG_PIC=1
%if %2 == 1 ; avg
cglobal sub_pixel_avg_variance%1xh, 7, 7, 13, src, src_stride, \
- x_offset, y_offset, dst, dst_stride, \
- sec, sec_stride, height, sse
+ x_offset, y_offset, ref, ref_stride, \
+ second_pred, second_stride, height, sse
%define block_height dword heightm
- %define sec_str sec_stridemp
+ %define second_str second_stridemp
%else
cglobal sub_pixel_variance%1xh, 7, 7, 13, src, src_stride, \
- x_offset, y_offset, dst, dst_stride, \
+ x_offset, y_offset, ref, ref_stride, \
height, sse
%define block_height heightd
%endif
@@ -155,7 +155,7 @@ SECTION .text
lea ecx, [GLOBAL(bilin_filter_m)]
mov g_bilin_filterm, ecx
- lea ecx, [GLOBAL(pw_8)]
+ lea ecx, [GLOBAL(pw_8)]
mov g_pw_8m, ecx
LOAD_IF_USED 0, 1 ; load eax, ecx back
@@ -163,13 +163,13 @@ SECTION .text
%if %2 == 1 ; avg
cglobal sub_pixel_avg_variance%1xh, 7, 7, 13, src, src_stride, \
x_offset, y_offset, \
- dst, dst_stride, sec, sec_stride, \
+ ref, ref_stride, second_pred, second_stride, \
height, sse
%define block_height dword heightm
- %define sec_str sec_stridemp
+ %define second_str second_stridemp
%else
cglobal sub_pixel_variance%1xh, 7, 7, 13, src, src_stride, \
- x_offset, y_offset, dst, dst_stride, \
+ x_offset, y_offset, ref, ref_stride, \
height, sse
%define block_height heightd
%endif
@@ -192,7 +192,7 @@ SECTION .text
%if %1 < 16
sar block_height, 1
%if %2 == 1 ; avg
- shl sec_str, 1
+ shl second_str, 1
%endif
%endif
@@ -207,9 +207,9 @@ SECTION .text
.x_zero_y_zero_loop:
%if %1 == 16
movu m0, [srcq]
- mova m1, [dstq]
+ mova m1, [refq]
%if %2 == 1 ; avg
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m3, m1, m5
punpcklbw m1, m5
%endif
@@ -223,7 +223,7 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
%if %2 == 1 ; avg
@@ -237,14 +237,14 @@ SECTION .text
movx m2, [srcq+src_strideq]
%endif
- movx m1, [dstq]
- movx m3, [dstq+dst_strideq]
+ movx m1, [refq]
+ movx m3, [refq+ref_strideq]
%if %2 == 1 ; avg
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
%else
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
%endif
punpcklbw m3, m5
@@ -265,10 +265,10 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_zero_y_zero_loop
@@ -283,11 +283,11 @@ SECTION .text
%if %1 == 16
movu m0, [srcq]
movu m4, [srcq+src_strideq]
- mova m1, [dstq]
+ mova m1, [refq]
pavgb m0, m4
punpckhbw m3, m1, m5
%if %2 == 1 ; avg
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
%endif
punpcklbw m1, m5
punpckhbw m2, m0, m5
@@ -295,7 +295,7 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m2, [srcq+src_strideq]
@@ -306,22 +306,22 @@ SECTION .text
movx m1, [srcq+src_strideq*2]
punpckldq m2, m1
%endif
- movx m1, [dstq]
+ movx m1, [refq]
%if %1 > 4
movlhps m0, m2
%else ; 4xh
punpckldq m0, m2
%endif
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
pavgb m0, m2
punpcklbw m1, m5
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpcklbw m3, m5
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else ; 4xh
- movh m4, [secq]
+ movh m4, [second_predq]
pavgb m0, m4
punpcklbw m3, m5
punpcklbw m0, m5
@@ -329,9 +329,9 @@ SECTION .text
%endif
%else ; !avg
movx m4, [srcq+src_strideq*2]
- movx m1, [dstq]
+ movx m1, [refq]
pavgb m0, m2
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
pavgb m2, m4
punpcklbw m0, m5
punpcklbw m2, m5
@@ -341,10 +341,10 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_zero_y_half_loop
@@ -386,7 +386,7 @@ SECTION .text
%if %1 == 16
movu m0, [srcq]
movu m4, [srcq+src_strideq]
- mova m1, [dstq]
+ mova m1, [refq]
%if cpuflag(ssse3)
punpckhbw m2, m0, m4
punpcklbw m0, m4
@@ -418,7 +418,7 @@ SECTION .text
%if %2 == 1 ; avg
; FIXME(rbultje) pipeline
packuswb m0, m2
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%endif
@@ -427,14 +427,14 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m2, [srcq+src_strideq]
movx m4, [srcq+src_strideq*2]
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
%if cpuflag(ssse3)
- movx m1, [dstq]
+ movx m1, [refq]
punpcklbw m0, m2
punpcklbw m2, m4
pmaddubsw m0, filter_y_a
@@ -454,7 +454,7 @@ SECTION .text
pmullw m4, filter_y_b
paddw m0, m1
paddw m2, filter_rnd
- movx m1, [dstq]
+ movx m1, [refq]
paddw m2, m4
%endif
psraw m0, 4
@@ -466,11 +466,11 @@ SECTION .text
%endif
packuswb m0, m2
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else ; 4xh
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
punpcklbw m0, m5
movhlps m2, m0
@@ -480,10 +480,10 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_zero_y_other_loop
@@ -504,11 +504,11 @@ SECTION .text
%if %1 == 16
movu m0, [srcq]
movu m4, [srcq+1]
- mova m1, [dstq]
+ mova m1, [refq]
pavgb m0, m4
punpckhbw m3, m1, m5
%if %2 == 1 ; avg
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
%endif
punpcklbw m1, m5
punpckhbw m2, m0, m5
@@ -516,7 +516,7 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m4, [srcq+1]
@@ -530,17 +530,17 @@ SECTION .text
movx m2, [srcq+src_strideq+1]
punpckldq m4, m2
%endif
- movx m1, [dstq]
- movx m3, [dstq+dst_strideq]
+ movx m1, [refq]
+ movx m3, [refq+ref_strideq]
pavgb m0, m4
punpcklbw m3, m5
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpcklbw m1, m5
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else ; 4xh
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
punpcklbw m1, m5
punpcklbw m0, m5
@@ -548,10 +548,10 @@ SECTION .text
%endif
%else ; !avg
movx m2, [srcq+src_strideq]
- movx m1, [dstq]
+ movx m1, [refq]
pavgb m0, m4
movx m4, [srcq+src_strideq+1]
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
pavgb m2, m4
punpcklbw m0, m5
punpcklbw m2, m5
@@ -561,10 +561,10 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_half_y_zero_loop
@@ -583,13 +583,13 @@ SECTION .text
.x_half_y_half_loop:
movu m4, [srcq]
movu m3, [srcq+1]
- mova m1, [dstq]
+ mova m1, [refq]
pavgb m4, m3
punpckhbw m3, m1, m5
pavgb m0, m4
%if %2 == 1 ; avg
punpcklbw m1, m5
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else
@@ -601,7 +601,7 @@ SECTION .text
mova m0, m4
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m3, [srcq+1]
@@ -628,13 +628,13 @@ SECTION .text
punpckldq m0, m2
pshuflw m4, m2, 0xe
%endif
- movx m1, [dstq]
+ movx m1, [refq]
pavgb m0, m2
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
%else
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
%endif
punpcklbw m3, m5
@@ -653,8 +653,8 @@ SECTION .text
pavgb m4, m1
pavgb m0, m2
pavgb m2, m4
- movx m1, [dstq]
- movx m3, [dstq+dst_strideq]
+ movx m1, [refq]
+ movx m3, [refq+ref_strideq]
punpcklbw m0, m5
punpcklbw m2, m5
punpcklbw m3, m5
@@ -664,10 +664,10 @@ SECTION .text
mova m0, m4
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_half_y_half_loop
@@ -713,7 +713,7 @@ SECTION .text
.x_half_y_other_loop:
movu m4, [srcq]
movu m2, [srcq+1]
- mova m1, [dstq]
+ mova m1, [refq]
pavgb m4, m2
%if cpuflag(ssse3)
punpckhbw m2, m0, m4
@@ -743,7 +743,7 @@ SECTION .text
%if %2 == 1 ; avg
; FIXME(rbultje) pipeline
packuswb m0, m2
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%endif
@@ -752,7 +752,7 @@ SECTION .text
mova m0, m4
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m3, [srcq+1]
@@ -768,9 +768,9 @@ SECTION .text
movx m3, [srcq+src_strideq+1]
pavgb m2, m1
pavgb m4, m3
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
%if cpuflag(ssse3)
- movx m1, [dstq]
+ movx m1, [refq]
punpcklbw m0, m2
punpcklbw m2, m4
pmaddubsw m0, filter_y_a
@@ -790,7 +790,7 @@ SECTION .text
pmullw m1, m4, filter_y_b
paddw m2, filter_rnd
paddw m2, m1
- movx m1, [dstq]
+ movx m1, [refq]
%endif
psraw m0, 4
psraw m2, 4
@@ -801,11 +801,11 @@ SECTION .text
%endif
packuswb m0, m2
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
punpcklbw m0, m5
movhlps m2, m0
@@ -816,10 +816,10 @@ SECTION .text
mova m0, m4
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_half_y_other_loop
@@ -867,7 +867,7 @@ SECTION .text
%if %1 == 16
movu m0, [srcq]
movu m4, [srcq+1]
- mova m1, [dstq]
+ mova m1, [refq]
%if cpuflag(ssse3)
punpckhbw m2, m0, m4
punpcklbw m0, m4
@@ -894,7 +894,7 @@ SECTION .text
%if %2 == 1 ; avg
; FIXME(rbultje) pipeline
packuswb m0, m2
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%endif
@@ -903,16 +903,16 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m1, [srcq+1]
movx m2, [srcq+src_strideq]
movx m4, [srcq+src_strideq+1]
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
%if cpuflag(ssse3)
punpcklbw m0, m1
- movx m1, [dstq]
+ movx m1, [refq]
punpcklbw m2, m4
pmaddubsw m0, filter_x_a
pmaddubsw m2, filter_x_a
@@ -932,7 +932,7 @@ SECTION .text
pmullw m4, filter_x_b
paddw m0, m1
paddw m2, filter_rnd
- movx m1, [dstq]
+ movx m1, [refq]
paddw m2, m4
%endif
psraw m0, 4
@@ -944,11 +944,11 @@ SECTION .text
%endif
packuswb m0, m2
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
punpcklbw m0, m5
movhlps m2, m0
@@ -958,10 +958,10 @@ SECTION .text
SUM_SSE m0, m1, m2, m3, m6, m7
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_other_y_zero_loop
@@ -1037,7 +1037,7 @@ SECTION .text
movu m4, [srcq]
movu m3, [srcq+1]
%if cpuflag(ssse3)
- mova m1, [dstq]
+ mova m1, [refq]
punpckhbw m2, m4, m3
punpcklbw m4, m3
pmaddubsw m2, filter_x_a
@@ -1063,7 +1063,7 @@ SECTION .text
paddw m2, filter_rnd
paddw m4, m3
paddw m2, m1
- mova m1, [dstq]
+ mova m1, [refq]
psraw m4, 4
psraw m2, 4
punpckhbw m3, m1, m5
@@ -1077,7 +1077,7 @@ SECTION .text
%endif
%if %2 == 1 ; avg
; FIXME(rbultje) pipeline
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
%endif
punpckhbw m2, m0, m5
punpcklbw m0, m5
@@ -1085,7 +1085,7 @@ SECTION .text
mova m0, m4
add srcq, src_strideq
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m1, [srcq+1]
@@ -1113,8 +1113,8 @@ SECTION .text
punpcklbw m4, m3
pmaddubsw m2, filter_x_a
pmaddubsw m4, filter_x_a
- movx m1, [dstq]
- movx m3, [dstq+dst_strideq]
+ movx m1, [refq]
+ movx m3, [refq+ref_strideq]
paddw m2, filter_rnd
paddw m4, filter_rnd
%else
@@ -1129,9 +1129,9 @@ SECTION .text
pmullw m3, filter_x_b
paddw m4, filter_rnd
paddw m2, m1
- movx m1, [dstq]
+ movx m1, [refq]
paddw m4, m3
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
%endif
psraw m2, 4
psraw m4, 4
@@ -1144,11 +1144,11 @@ SECTION .text
%endif
packuswb m0, m2
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
punpcklbw m0, m5
movhlps m2, m0
@@ -1160,10 +1160,10 @@ SECTION .text
mova m0, m4
lea srcq, [srcq+src_strideq*2]
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_other_y_half_loop
@@ -1254,7 +1254,7 @@ SECTION .text
%if cpuflag(ssse3)
movu m4, [srcq]
movu m3, [srcq+1]
- mova m1, [dstq]
+ mova m1, [refq]
punpckhbw m2, m4, m3
punpcklbw m4, m3
pmaddubsw m2, filter_x_a
@@ -1300,7 +1300,7 @@ SECTION .text
pmullw m0, filter_y_a
pmullw m3, filter_y_b
paddw m2, m1
- mova m1, [dstq]
+ mova m1, [refq]
paddw m0, filter_rnd
psraw m2, 4
paddw m0, m3
@@ -1311,7 +1311,7 @@ SECTION .text
%if %2 == 1 ; avg
; FIXME(rbultje) pipeline
packuswb m0, m2
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%endif
@@ -1319,7 +1319,7 @@ SECTION .text
mova m0, m4
INC_SRC_BY_SRC_STRIDE
- add dstq, dst_strideq
+ add refq, ref_strideq
%else ; %1 < 16
movx m0, [srcq]
movx m1, [srcq+1]
@@ -1355,8 +1355,8 @@ SECTION .text
punpcklbw m4, m3
pmaddubsw m2, filter_x_a
pmaddubsw m4, filter_x_a
- movx m3, [dstq+dst_strideq]
- movx m1, [dstq]
+ movx m3, [refq+ref_strideq]
+ movx m1, [refq]
paddw m2, filter_rnd
paddw m4, filter_rnd
psraw m2, 4
@@ -1395,9 +1395,9 @@ SECTION .text
pmullw m1, m4, filter_y_b
paddw m2, filter_rnd
paddw m0, m3
- movx m3, [dstq+dst_strideq]
+ movx m3, [refq+ref_strideq]
paddw m2, m1
- movx m1, [dstq]
+ movx m1, [refq]
psraw m0, 4
psraw m2, 4
punpcklbw m3, m5
@@ -1410,11 +1410,11 @@ SECTION .text
%endif
packuswb m0, m2
%if %1 > 4
- pavgb m0, [secq]
+ pavgb m0, [second_predq]
punpckhbw m2, m0, m5
punpcklbw m0, m5
%else
- movh m2, [secq]
+ movh m2, [second_predq]
pavgb m0, m2
punpcklbw m0, m5
movhlps m2, m0
@@ -1424,10 +1424,10 @@ SECTION .text
mova m0, m4
INC_SRC_BY_SRC_STRIDE
- lea dstq, [dstq+dst_strideq*2]
+ lea refq, [refq+ref_strideq*2]
%endif
%if %2 == 1 ; avg
- add secq, sec_str
+ add second_predq, second_str
%endif
dec block_height
jg .x_other_y_other_loop
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_avx2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_avx2.c
index d938b81ea2c..9232acbfbb3 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_avx2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_avx2.c
@@ -164,11 +164,11 @@ static INLINE void variance64_avx2(const uint8_t *src, const int src_stride,
}
}
-void vpx_get16x16var_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride, unsigned int *sse,
- int *sum) {
+void vpx_get16x16var_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
__m256i vsse, vsum;
- variance16_avx2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, sum);
}
@@ -224,8 +224,9 @@ void vpx_get16x16var_avx2(const uint8_t *src, int src_stride,
static INLINE void spv32_x0_y0(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg) {
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg) {
const __m256i zero_reg = _mm256_setzero_si256();
__m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
int i;
@@ -233,11 +234,11 @@ static INLINE void spv32_x0_y0(const uint8_t *src, int src_stride,
const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
const __m256i src_reg = _mm256_loadu_si256((__m256i const *)src);
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i avg_reg = _mm256_avg_epu8(src_reg, sec_reg);
exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
- sec += sec_stride;
+ second_pred += second_stride;
} else {
exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
@@ -251,9 +252,10 @@ static INLINE void spv32_x0_y0(const uint8_t *src, int src_stride,
// (x == 0, y == 4) or (x == 4, y == 0). sstep determines the direction.
static INLINE void spv32_half_zero(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride,
- int do_sec, int height, __m256i *sum_reg,
- __m256i *sse_reg, int sstep) {
+ const uint8_t *second_pred,
+ int second_stride, int do_sec, int height,
+ __m256i *sum_reg, __m256i *sse_reg,
+ int sstep) {
const __m256i zero_reg = _mm256_setzero_si256();
__m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
int i;
@@ -263,11 +265,11 @@ static INLINE void spv32_half_zero(const uint8_t *src, int src_stride,
const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i avg_reg = _mm256_avg_epu8(src_avg, sec_reg);
exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
- sec += sec_stride;
+ second_pred += second_stride;
} else {
exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(src_avg, zero_reg);
@@ -280,24 +282,27 @@ static INLINE void spv32_half_zero(const uint8_t *src, int src_stride,
static INLINE void spv32_x0_y4(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg) {
- spv32_half_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, sum_reg, sse_reg, src_stride);
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg) {
+ spv32_half_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, sum_reg, sse_reg, src_stride);
}
static INLINE void spv32_x4_y0(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg) {
- spv32_half_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, sum_reg, sse_reg, 1);
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg) {
+ spv32_half_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, sum_reg, sse_reg, 1);
}
static INLINE void spv32_x4_y4(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg) {
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg) {
const __m256i zero_reg = _mm256_setzero_si256();
const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
@@ -314,11 +319,11 @@ static INLINE void spv32_x4_y4(const uint8_t *src, int src_stride,
prev_src_avg = src_avg;
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i avg_reg = _mm256_avg_epu8(current_avg, sec_reg);
exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
- sec += sec_stride;
+ second_pred += second_stride;
} else {
exp_src_lo = _mm256_unpacklo_epi8(current_avg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(current_avg, zero_reg);
@@ -333,9 +338,10 @@ static INLINE void spv32_x4_y4(const uint8_t *src, int src_stride,
// (x == 0, y == bil) or (x == 4, y == bil). sstep determines the direction.
static INLINE void spv32_bilin_zero(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride,
- int do_sec, int height, __m256i *sum_reg,
- __m256i *sse_reg, int offset, int sstep) {
+ const uint8_t *second_pred,
+ int second_stride, int do_sec, int height,
+ __m256i *sum_reg, __m256i *sse_reg,
+ int offset, int sstep) {
const __m256i zero_reg = _mm256_setzero_si256();
const __m256i pw8 = _mm256_set1_epi16(8);
const __m256i filter = _mm256_load_si256(
@@ -351,10 +357,10 @@ static INLINE void spv32_bilin_zero(const uint8_t *src, int src_stride,
FILTER_SRC(filter)
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
- sec += sec_stride;
+ second_pred += second_stride;
exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
}
@@ -366,27 +372,27 @@ static INLINE void spv32_bilin_zero(const uint8_t *src, int src_stride,
static INLINE void spv32_x0_yb(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg,
- int y_offset) {
- spv32_bilin_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, sum_reg, sse_reg, y_offset, src_stride);
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg, int y_offset) {
+ spv32_bilin_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, sum_reg, sse_reg, y_offset, src_stride);
}
static INLINE void spv32_xb_y0(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg,
- int x_offset) {
- spv32_bilin_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, sum_reg, sse_reg, x_offset, 1);
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg, int x_offset) {
+ spv32_bilin_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, sum_reg, sse_reg, x_offset, 1);
}
static INLINE void spv32_x4_yb(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg,
- int y_offset) {
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg, int y_offset) {
const __m256i zero_reg = _mm256_setzero_si256();
const __m256i pw8 = _mm256_set1_epi16(8);
const __m256i filter = _mm256_load_si256(
@@ -408,12 +414,12 @@ static INLINE void spv32_x4_yb(const uint8_t *src, int src_stride,
FILTER_SRC(filter)
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i exp_src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
const __m256i avg_reg = _mm256_avg_epu8(exp_src_avg, sec_reg);
exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
- sec += sec_stride;
+ second_pred += second_stride;
}
CALC_SUM_SSE_INSIDE_LOOP
dst += dst_stride;
@@ -423,9 +429,9 @@ static INLINE void spv32_x4_yb(const uint8_t *src, int src_stride,
static INLINE void spv32_xb_y4(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg,
- int x_offset) {
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg, int x_offset) {
const __m256i zero_reg = _mm256_setzero_si256();
const __m256i pw8 = _mm256_set1_epi16(8);
const __m256i filter = _mm256_load_si256(
@@ -456,11 +462,11 @@ static INLINE void spv32_xb_y4(const uint8_t *src, int src_stride,
src_pack = _mm256_avg_epu8(src_pack, src_reg);
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i avg_pack = _mm256_avg_epu8(src_pack, sec_reg);
exp_src_lo = _mm256_unpacklo_epi8(avg_pack, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_pack, zero_reg);
- sec += sec_stride;
+ second_pred += second_stride;
} else {
exp_src_lo = _mm256_unpacklo_epi8(src_pack, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(src_pack, zero_reg);
@@ -474,9 +480,9 @@ static INLINE void spv32_xb_y4(const uint8_t *src, int src_stride,
static INLINE void spv32_xb_yb(const uint8_t *src, int src_stride,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride, int do_sec,
- int height, __m256i *sum_reg, __m256i *sse_reg,
- int x_offset, int y_offset) {
+ const uint8_t *second_pred, int second_stride,
+ int do_sec, int height, __m256i *sum_reg,
+ __m256i *sse_reg, int x_offset, int y_offset) {
const __m256i zero_reg = _mm256_setzero_si256();
const __m256i pw8 = _mm256_set1_epi16(8);
const __m256i xfilter = _mm256_load_si256(
@@ -511,12 +517,12 @@ static INLINE void spv32_xb_yb(const uint8_t *src, int src_stride,
FILTER_SRC(yfilter)
if (do_sec) {
- const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
+ const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
- sec += sec_stride;
+ second_pred += second_stride;
}
prev_src_pack = src_pack;
@@ -530,7 +536,7 @@ static INLINE void spv32_xb_yb(const uint8_t *src, int src_stride,
static INLINE int sub_pix_var32xh(const uint8_t *src, int src_stride,
int x_offset, int y_offset,
const uint8_t *dst, int dst_stride,
- const uint8_t *sec, int sec_stride,
+ const uint8_t *second_pred, int second_stride,
int do_sec, int height, unsigned int *sse) {
const __m256i zero_reg = _mm256_setzero_si256();
__m256i sum_reg = _mm256_setzero_si256();
@@ -540,44 +546,44 @@ static INLINE int sub_pix_var32xh(const uint8_t *src, int src_stride,
// x_offset = 0 and y_offset = 0
if (x_offset == 0) {
if (y_offset == 0) {
- spv32_x0_y0(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg);
+ spv32_x0_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg);
// x_offset = 0 and y_offset = 4
} else if (y_offset == 4) {
- spv32_x0_y4(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg);
+ spv32_x0_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg);
// x_offset = 0 and y_offset = bilin interpolation
} else {
- spv32_x0_yb(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg, y_offset);
+ spv32_x0_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg, y_offset);
}
// x_offset = 4 and y_offset = 0
} else if (x_offset == 4) {
if (y_offset == 0) {
- spv32_x4_y0(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg);
+ spv32_x4_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg);
// x_offset = 4 and y_offset = 4
} else if (y_offset == 4) {
- spv32_x4_y4(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg);
+ spv32_x4_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg);
// x_offset = 4 and y_offset = bilin interpolation
} else {
- spv32_x4_yb(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg, y_offset);
+ spv32_x4_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg, y_offset);
}
// x_offset = bilin interpolation and y_offset = 0
} else {
if (y_offset == 0) {
- spv32_xb_y0(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg, x_offset);
+ spv32_xb_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg, x_offset);
// x_offset = bilin interpolation and y_offset = 4
} else if (y_offset == 4) {
- spv32_xb_y4(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg, x_offset);
+ spv32_xb_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg, x_offset);
// x_offset = bilin interpolation and y_offset = bilin interpolation
} else {
- spv32_xb_yb(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
- height, &sum_reg, &sse_reg, x_offset, y_offset);
+ spv32_xb_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
+ do_sec, height, &sum_reg, &sse_reg, x_offset, y_offset);
}
}
CALC_SUM_AND_SSE
@@ -593,63 +599,63 @@ static unsigned int sub_pixel_variance32xh_avx2(
static unsigned int sub_pixel_avg_variance32xh_avx2(
const uint8_t *src, int src_stride, int x_offset, int y_offset,
- const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
- int height, unsigned int *sse) {
+ const uint8_t *dst, int dst_stride, const uint8_t *second_pred,
+ int second_stride, int height, unsigned int *sse) {
return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
- sec, sec_stride, 1, height, sse);
+ second_pred, second_stride, 1, height, sse);
}
-typedef void (*get_var_avx2)(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+typedef void (*get_var_avx2)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse, int *sum);
-unsigned int vpx_variance16x8_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance16x8_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
- variance16_avx2(src, src_stride, ref, ref_stride, 8, &vsse, &vsum);
+ variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
return *sse - (uint32_t)(((int64_t)sum * sum) >> 7);
}
-unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance16x16_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
- variance16_avx2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
return *sse - (uint32_t)(((int64_t)sum * sum) >> 8);
}
-unsigned int vpx_variance16x32_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance16x32_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
- variance16_avx2(src, src_stride, ref, ref_stride, 32, &vsse, &vsum);
+ variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance32x16_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
- variance32_avx2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance32x32_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
__m128i vsum_128;
- variance32_avx2(src, src_stride, ref, ref_stride, 32, &vsse, &vsum);
+ variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
vsum_128 = _mm_add_epi16(_mm256_castsi256_si128(vsum),
_mm256_extractf128_si256(vsum, 1));
vsum_128 = _mm_add_epi32(_mm_cvtepi16_epi32(vsum_128),
@@ -658,13 +664,13 @@ unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
return *sse - (uint32_t)(((int64_t)sum * sum) >> 10);
}
-unsigned int vpx_variance32x64_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance32x64_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
__m128i vsum_128;
- variance32_avx2(src, src_stride, ref, ref_stride, 64, &vsse, &vsum);
+ variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64, &vsse, &vsum);
vsum = sum_to_32bit_avx2(vsum);
vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
_mm256_extractf128_si256(vsum, 1));
@@ -672,14 +678,14 @@ unsigned int vpx_variance32x64_avx2(const uint8_t *src, int src_stride,
return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance64x32_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m256i vsse = _mm256_setzero_si256();
__m256i vsum = _mm256_setzero_si256();
__m128i vsum_128;
int sum;
- variance64_avx2(src, src_stride, ref, ref_stride, 32, &vsse, &vsum);
+ variance64_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
vsum = sum_to_32bit_avx2(vsum);
vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
_mm256_extractf128_si256(vsum, 1));
@@ -687,8 +693,8 @@ unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance64x64_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m256i vsse = _mm256_setzero_si256();
__m256i vsum = _mm256_setzero_si256();
@@ -698,8 +704,9 @@ unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
for (i = 0; i < 2; i++) {
__m256i vsum16;
- variance64_avx2(src + 32 * i * src_stride, src_stride,
- ref + 32 * i * ref_stride, ref_stride, 32, &vsse, &vsum16);
+ variance64_avx2(src_ptr + 32 * i * src_stride, src_stride,
+ ref_ptr + 32 * i * ref_stride, ref_stride, 32, &vsse,
+ &vsum16);
vsum = _mm256_add_epi32(vsum, sum_to_32bit_avx2(vsum16));
}
vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
@@ -708,63 +715,61 @@ unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
return *sse - (unsigned int)(((int64_t)sum * sum) >> 12);
}
-unsigned int vpx_mse16x8_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_mse16x8_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
- variance16_avx2(src, src_stride, ref, ref_stride, 8, &vsse, &vsum);
+ variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
return *sse;
}
-unsigned int vpx_mse16x16_avx2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_mse16x16_avx2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
int sum;
__m256i vsse, vsum;
- variance16_avx2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
return *sse;
}
-unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
- int src_stride, int x_offset,
- int y_offset, const uint8_t *dst,
- int dst_stride,
- unsigned int *sse) {
+unsigned int vpx_sub_pixel_variance64x64_avx2(
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) {
unsigned int sse1;
const int se1 = sub_pixel_variance32xh_avx2(
- src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1);
+ src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, 64, &sse1);
unsigned int sse2;
const int se2 =
- sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
- dst + 32, dst_stride, 64, &sse2);
+ sub_pixel_variance32xh_avx2(src_ptr + 32, src_stride, x_offset, y_offset,
+ ref_ptr + 32, ref_stride, 64, &sse2);
const int se = se1 + se2;
*sse = sse1 + sse2;
return *sse - (uint32_t)(((int64_t)se * se) >> 12);
}
-unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
- int src_stride, int x_offset,
- int y_offset, const uint8_t *dst,
- int dst_stride,
- unsigned int *sse) {
+unsigned int vpx_sub_pixel_variance32x32_avx2(
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) {
const int se = sub_pixel_variance32xh_avx2(
- src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
+ src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, 32, sse);
return *sse - (uint32_t)(((int64_t)se * se) >> 10);
}
unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
- const uint8_t *src, int src_stride, int x_offset, int y_offset,
- const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
+ const uint8_t *second_pred) {
unsigned int sse1;
- const int se1 = sub_pixel_avg_variance32xh_avx2(
- src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1);
+ const int se1 = sub_pixel_avg_variance32xh_avx2(src_ptr, src_stride, x_offset,
+ y_offset, ref_ptr, ref_stride,
+ second_pred, 64, 64, &sse1);
unsigned int sse2;
const int se2 = sub_pixel_avg_variance32xh_avx2(
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32,
- 64, 64, &sse2);
+ src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, ref_stride,
+ second_pred + 32, 64, 64, &sse2);
const int se = se1 + se2;
*sse = sse1 + sse2;
@@ -773,10 +778,12 @@ unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
}
unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
- const uint8_t *src, int src_stride, int x_offset, int y_offset,
- const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
+ const uint8_t *second_pred) {
// Process 32 elements in parallel.
- const int se = sub_pixel_avg_variance32xh_avx2(
- src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
+ const int se = sub_pixel_avg_variance32xh_avx2(src_ptr, src_stride, x_offset,
+ y_offset, ref_ptr, ref_stride,
+ second_pred, 32, 32, sse);
return *sse - (uint32_t)(((int64_t)se * se) >> 10);
}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c
index 02790525563..37ef64ecaa0 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/variance_sse2.c
@@ -22,14 +22,14 @@ static INLINE unsigned int add32x4_sse2(__m128i val) {
return _mm_cvtsi128_si32(val);
}
-unsigned int vpx_get_mb_ss_sse2(const int16_t *src) {
+unsigned int vpx_get_mb_ss_sse2(const int16_t *src_ptr) {
__m128i vsum = _mm_setzero_si128();
int i;
for (i = 0; i < 32; ++i) {
- const __m128i v = _mm_loadu_si128((const __m128i *)src);
+ const __m128i v = _mm_loadu_si128((const __m128i *)src_ptr);
vsum = _mm_add_epi32(vsum, _mm_madd_epi16(v, v));
- src += 8;
+ src_ptr += 8;
}
return add32x4_sse2(vsum);
@@ -42,10 +42,11 @@ static INLINE __m128i load4x2_sse2(const uint8_t *const p, const int stride) {
return _mm_unpacklo_epi8(p01, _mm_setzero_si128());
}
-static INLINE void variance_kernel_sse2(const __m128i src, const __m128i ref,
+static INLINE void variance_kernel_sse2(const __m128i src_ptr,
+ const __m128i ref_ptr,
__m128i *const sse,
__m128i *const sum) {
- const __m128i diff = _mm_sub_epi16(src, ref);
+ const __m128i diff = _mm_sub_epi16(src_ptr, ref_ptr);
*sse = _mm_add_epi32(*sse, _mm_madd_epi16(diff, diff));
*sum = _mm_add_epi16(*sum, diff);
}
@@ -99,8 +100,8 @@ static INLINE int sum_final_sse2(const __m128i sum) {
return add32x4_sse2(t);
}
-static INLINE void variance4_sse2(const uint8_t *src, const int src_stride,
- const uint8_t *ref, const int ref_stride,
+static INLINE void variance4_sse2(const uint8_t *src_ptr, const int src_stride,
+ const uint8_t *ref_ptr, const int ref_stride,
const int h, __m128i *const sse,
__m128i *const sum) {
int i;
@@ -110,17 +111,17 @@ static INLINE void variance4_sse2(const uint8_t *src, const int src_stride,
*sum = _mm_setzero_si128();
for (i = 0; i < h; i += 2) {
- const __m128i s = load4x2_sse2(src, src_stride);
- const __m128i r = load4x2_sse2(ref, ref_stride);
+ const __m128i s = load4x2_sse2(src_ptr, src_stride);
+ const __m128i r = load4x2_sse2(ref_ptr, ref_stride);
variance_kernel_sse2(s, r, sse, sum);
- src += 2 * src_stride;
- ref += 2 * ref_stride;
+ src_ptr += 2 * src_stride;
+ ref_ptr += 2 * ref_stride;
}
}
-static INLINE void variance8_sse2(const uint8_t *src, const int src_stride,
- const uint8_t *ref, const int ref_stride,
+static INLINE void variance8_sse2(const uint8_t *src_ptr, const int src_stride,
+ const uint8_t *ref_ptr, const int ref_stride,
const int h, __m128i *const sse,
__m128i *const sum) {
const __m128i zero = _mm_setzero_si128();
@@ -132,23 +133,23 @@ static INLINE void variance8_sse2(const uint8_t *src, const int src_stride,
for (i = 0; i < h; i++) {
const __m128i s =
- _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)src), zero);
+ _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)src_ptr), zero);
const __m128i r =
- _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)ref), zero);
+ _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)ref_ptr), zero);
variance_kernel_sse2(s, r, sse, sum);
- src += src_stride;
- ref += ref_stride;
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
}
-static INLINE void variance16_kernel_sse2(const uint8_t *const src,
- const uint8_t *const ref,
+static INLINE void variance16_kernel_sse2(const uint8_t *const src_ptr,
+ const uint8_t *const ref_ptr,
__m128i *const sse,
__m128i *const sum) {
const __m128i zero = _mm_setzero_si128();
- const __m128i s = _mm_loadu_si128((const __m128i *)src);
- const __m128i r = _mm_loadu_si128((const __m128i *)ref);
+ const __m128i s = _mm_loadu_si128((const __m128i *)src_ptr);
+ const __m128i r = _mm_loadu_si128((const __m128i *)ref_ptr);
const __m128i src0 = _mm_unpacklo_epi8(s, zero);
const __m128i ref0 = _mm_unpacklo_epi8(r, zero);
const __m128i src1 = _mm_unpackhi_epi8(s, zero);
@@ -158,8 +159,8 @@ static INLINE void variance16_kernel_sse2(const uint8_t *const src,
variance_kernel_sse2(src1, ref1, sse, sum);
}
-static INLINE void variance16_sse2(const uint8_t *src, const int src_stride,
- const uint8_t *ref, const int ref_stride,
+static INLINE void variance16_sse2(const uint8_t *src_ptr, const int src_stride,
+ const uint8_t *ref_ptr, const int ref_stride,
const int h, __m128i *const sse,
__m128i *const sum) {
int i;
@@ -169,14 +170,14 @@ static INLINE void variance16_sse2(const uint8_t *src, const int src_stride,
*sum = _mm_setzero_si128();
for (i = 0; i < h; ++i) {
- variance16_kernel_sse2(src, ref, sse, sum);
- src += src_stride;
- ref += ref_stride;
+ variance16_kernel_sse2(src_ptr, ref_ptr, sse, sum);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
}
-static INLINE void variance32_sse2(const uint8_t *src, const int src_stride,
- const uint8_t *ref, const int ref_stride,
+static INLINE void variance32_sse2(const uint8_t *src_ptr, const int src_stride,
+ const uint8_t *ref_ptr, const int ref_stride,
const int h, __m128i *const sse,
__m128i *const sum) {
int i;
@@ -186,15 +187,15 @@ static INLINE void variance32_sse2(const uint8_t *src, const int src_stride,
*sum = _mm_setzero_si128();
for (i = 0; i < h; ++i) {
- variance16_kernel_sse2(src + 0, ref + 0, sse, sum);
- variance16_kernel_sse2(src + 16, ref + 16, sse, sum);
- src += src_stride;
- ref += ref_stride;
+ variance16_kernel_sse2(src_ptr + 0, ref_ptr + 0, sse, sum);
+ variance16_kernel_sse2(src_ptr + 16, ref_ptr + 16, sse, sum);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
}
-static INLINE void variance64_sse2(const uint8_t *src, const int src_stride,
- const uint8_t *ref, const int ref_stride,
+static INLINE void variance64_sse2(const uint8_t *src_ptr, const int src_stride,
+ const uint8_t *ref_ptr, const int ref_stride,
const int h, __m128i *const sse,
__m128i *const sum) {
int i;
@@ -204,135 +205,136 @@ static INLINE void variance64_sse2(const uint8_t *src, const int src_stride,
*sum = _mm_setzero_si128();
for (i = 0; i < h; ++i) {
- variance16_kernel_sse2(src + 0, ref + 0, sse, sum);
- variance16_kernel_sse2(src + 16, ref + 16, sse, sum);
- variance16_kernel_sse2(src + 32, ref + 32, sse, sum);
- variance16_kernel_sse2(src + 48, ref + 48, sse, sum);
- src += src_stride;
- ref += ref_stride;
+ variance16_kernel_sse2(src_ptr + 0, ref_ptr + 0, sse, sum);
+ variance16_kernel_sse2(src_ptr + 16, ref_ptr + 16, sse, sum);
+ variance16_kernel_sse2(src_ptr + 32, ref_ptr + 32, sse, sum);
+ variance16_kernel_sse2(src_ptr + 48, ref_ptr + 48, sse, sum);
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
}
}
-void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref,
- int ref_stride, unsigned int *sse, int *sum) {
+void vpx_get8x8var_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
__m128i vsse, vsum;
- variance8_sse2(src, src_stride, ref, ref_stride, 8, &vsse, &vsum);
+ variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, sum);
}
-void vpx_get16x16var_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride, unsigned int *sse,
- int *sum) {
+void vpx_get16x16var_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
__m128i vsse, vsum;
- variance16_sse2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_256_pel_sse2(vsse, vsum, sse, sum);
}
-unsigned int vpx_variance4x4_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance4x4_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance4_sse2(src, src_stride, ref, ref_stride, 4, &vsse, &vsum);
+ variance4_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 4, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
return *sse - ((sum * sum) >> 4);
}
-unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance4x8_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance4_sse2(src, src_stride, ref, ref_stride, 8, &vsse, &vsum);
+ variance4_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
return *sse - ((sum * sum) >> 5);
}
-unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance8x4_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance8_sse2(src, src_stride, ref, ref_stride, 4, &vsse, &vsum);
+ variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 4, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
return *sse - ((sum * sum) >> 5);
}
-unsigned int vpx_variance8x8_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance8x8_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance8_sse2(src, src_stride, ref, ref_stride, 8, &vsse, &vsum);
+ variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
return *sse - ((sum * sum) >> 6);
}
-unsigned int vpx_variance8x16_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance8x16_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance8_sse2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
return *sse - ((sum * sum) >> 7);
}
-unsigned int vpx_variance16x8_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance16x8_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance16_sse2(src, src_stride, ref, ref_stride, 8, &vsse, &vsum);
+ variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
return *sse - ((sum * sum) >> 7);
}
-unsigned int vpx_variance16x16_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance16x16_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance16_sse2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_256_pel_sse2(vsse, vsum, sse, &sum);
return *sse - (uint32_t)(((int64_t)sum * sum) >> 8);
}
-unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance16x32_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse, vsum;
int sum;
- variance16_sse2(src, src_stride, ref, ref_stride, 32, &vsse, &vsum);
+ variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
variance_final_512_pel_sse2(vsse, vsum, sse, &sum);
return *sse - (unsigned int)(((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance32x16_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse = _mm_setzero_si128();
__m128i vsum;
int sum;
- variance32_sse2(src, src_stride, ref, ref_stride, 16, &vsse, &vsum);
+ variance32_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
variance_final_512_pel_sse2(vsse, vsum, sse, &sum);
return *sse - (unsigned int)(((int64_t)sum * sum) >> 9);
}
-unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance32x32_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse = _mm_setzero_si128();
__m128i vsum;
int sum;
- variance32_sse2(src, src_stride, ref, ref_stride, 32, &vsse, &vsum);
+ variance32_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
*sse = add32x4_sse2(vsse);
sum = sum_final_sse2(vsum);
return *sse - (unsigned int)(((int64_t)sum * sum) >> 10);
}
-unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance32x64_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse = _mm_setzero_si128();
__m128i vsum = _mm_setzero_si128();
@@ -341,8 +343,9 @@ unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride,
for (i = 0; i < 2; i++) {
__m128i vsum16;
- variance32_sse2(src + 32 * i * src_stride, src_stride,
- ref + 32 * i * ref_stride, ref_stride, 32, &vsse, &vsum16);
+ variance32_sse2(src_ptr + 32 * i * src_stride, src_stride,
+ ref_ptr + 32 * i * ref_stride, ref_stride, 32, &vsse,
+ &vsum16);
vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16));
}
*sse = add32x4_sse2(vsse);
@@ -350,8 +353,8 @@ unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride,
return *sse - (unsigned int)(((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance64x32_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse = _mm_setzero_si128();
__m128i vsum = _mm_setzero_si128();
@@ -360,8 +363,9 @@ unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride,
for (i = 0; i < 2; i++) {
__m128i vsum16;
- variance64_sse2(src + 16 * i * src_stride, src_stride,
- ref + 16 * i * ref_stride, ref_stride, 16, &vsse, &vsum16);
+ variance64_sse2(src_ptr + 16 * i * src_stride, src_stride,
+ ref_ptr + 16 * i * ref_stride, ref_stride, 16, &vsse,
+ &vsum16);
vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16));
}
*sse = add32x4_sse2(vsse);
@@ -369,8 +373,8 @@ unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride,
return *sse - (unsigned int)(((int64_t)sum * sum) >> 11);
}
-unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_variance64x64_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
__m128i vsse = _mm_setzero_si128();
__m128i vsum = _mm_setzero_si128();
@@ -379,8 +383,9 @@ unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride,
for (i = 0; i < 4; i++) {
__m128i vsum16;
- variance64_sse2(src + 16 * i * src_stride, src_stride,
- ref + 16 * i * ref_stride, ref_stride, 16, &vsse, &vsum16);
+ variance64_sse2(src_ptr + 16 * i * src_stride, src_stride,
+ ref_ptr + 16 * i * ref_stride, ref_stride, 16, &vsse,
+ &vsum16);
vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16));
}
*sse = add32x4_sse2(vsse);
@@ -388,41 +393,41 @@ unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride,
return *sse - (unsigned int)(((int64_t)sum * sum) >> 12);
}
-unsigned int vpx_mse8x8_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_mse8x8_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
- vpx_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
+ vpx_variance8x8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
return *sse;
}
-unsigned int vpx_mse8x16_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_mse8x16_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
- vpx_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
+ vpx_variance8x16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
return *sse;
}
-unsigned int vpx_mse16x8_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_mse16x8_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
- vpx_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
+ vpx_variance16x8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
return *sse;
}
-unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride,
+unsigned int vpx_mse16x16_sse2(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
unsigned int *sse) {
- vpx_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
+ vpx_variance16x16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
return *sse;
}
// The 2 unused parameters are place holders for PIC enabled build.
// These definitions are for functions defined in subpel_variance.asm
-#define DECL(w, opt) \
- int vpx_sub_pixel_variance##w##xh_##opt( \
- const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
- const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
- void *unused0, void *unused)
+#define DECL(w, opt) \
+ int vpx_sub_pixel_variance##w##xh_##opt( \
+ const uint8_t *src_ptr, ptrdiff_t src_stride, int x_offset, \
+ int y_offset, const uint8_t *ref_ptr, ptrdiff_t ref_stride, int height, \
+ unsigned int *sse, void *unused0, void *unused)
#define DECLS(opt1, opt2) \
DECL(4, opt1); \
DECL(8, opt1); \
@@ -433,36 +438,37 @@ DECLS(ssse3, ssse3);
#undef DECLS
#undef DECL
-#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
- unsigned int vpx_sub_pixel_variance##w##x##h##_##opt( \
- const uint8_t *src, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) { \
- unsigned int sse; \
- int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \
- y_offset, dst, dst_stride, \
- h, &sse, NULL, NULL); \
- if (w > wf) { \
- unsigned int sse2; \
- int se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
- src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \
- &sse2, NULL, NULL); \
- se += se2; \
- sse += sse2; \
- if (w > wf * 2) { \
- se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \
- &sse2, NULL, NULL); \
- se += se2; \
- sse += sse2; \
- se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
- src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \
- &sse2, NULL, NULL); \
- se += se2; \
- sse += sse2; \
- } \
- } \
- *sse_ptr = sse; \
- return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
+#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
+ unsigned int vpx_sub_pixel_variance##w##x##h##_##opt( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) { \
+ unsigned int sse_tmp; \
+ int se = vpx_sub_pixel_variance##wf##xh_##opt( \
+ src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, h, \
+ &sse_tmp, NULL, NULL); \
+ if (w > wf) { \
+ unsigned int sse2; \
+ int se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
+ src_ptr + 16, src_stride, x_offset, y_offset, ref_ptr + 16, \
+ ref_stride, h, &sse2, NULL, NULL); \
+ se += se2; \
+ sse_tmp += sse2; \
+ if (w > wf * 2) { \
+ se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
+ src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, \
+ ref_stride, h, &sse2, NULL, NULL); \
+ se += se2; \
+ sse_tmp += sse2; \
+ se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
+ src_ptr + 48, src_stride, x_offset, y_offset, ref_ptr + 48, \
+ ref_stride, h, &sse2, NULL, NULL); \
+ se += se2; \
+ sse_tmp += sse2; \
+ } \
+ } \
+ *sse = sse_tmp; \
+ return sse_tmp - \
+ (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
#define FNS(opt1, opt2) \
@@ -487,12 +493,12 @@ FNS(ssse3, ssse3);
#undef FN
// The 2 unused parameters are place holders for PIC enabled build.
-#define DECL(w, opt) \
- int vpx_sub_pixel_avg_variance##w##xh_##opt( \
- const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
- const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \
- ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
- void *unused)
+#define DECL(w, opt) \
+ int vpx_sub_pixel_avg_variance##w##xh_##opt( \
+ const uint8_t *src_ptr, ptrdiff_t src_stride, int x_offset, \
+ int y_offset, const uint8_t *ref_ptr, ptrdiff_t ref_stride, \
+ const uint8_t *second_pred, ptrdiff_t second_stride, int height, \
+ unsigned int *sse, void *unused0, void *unused)
#define DECLS(opt1, opt2) \
DECL(4, opt1); \
DECL(8, opt1); \
@@ -503,37 +509,38 @@ DECLS(ssse3, ssse3);
#undef DECL
#undef DECLS
-#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
- unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt( \
- const uint8_t *src, int src_stride, int x_offset, int y_offset, \
- const uint8_t *dst, int dst_stride, unsigned int *sseptr, \
- const uint8_t *sec) { \
- unsigned int sse; \
- int se = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
- src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \
- NULL, NULL); \
- if (w > wf) { \
- unsigned int sse2; \
- int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \
- sec + 16, w, h, &sse2, NULL, NULL); \
- se += se2; \
- sse += sse2; \
- if (w > wf * 2) { \
- se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \
- sec + 32, w, h, &sse2, NULL, NULL); \
- se += se2; \
- sse += sse2; \
- se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
- src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \
- sec + 48, w, h, &sse2, NULL, NULL); \
- se += se2; \
- sse += sse2; \
- } \
- } \
- *sseptr = sse; \
- return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
+#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
+ unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt( \
+ const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
+ const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, \
+ const uint8_t *second_pred) { \
+ unsigned int sse_tmp; \
+ int se = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, \
+ second_pred, w, h, &sse_tmp, NULL, NULL); \
+ if (w > wf) { \
+ unsigned int sse2; \
+ int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ src_ptr + 16, src_stride, x_offset, y_offset, ref_ptr + 16, \
+ ref_stride, second_pred + 16, w, h, &sse2, NULL, NULL); \
+ se += se2; \
+ sse_tmp += sse2; \
+ if (w > wf * 2) { \
+ se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, \
+ ref_stride, second_pred + 32, w, h, &sse2, NULL, NULL); \
+ se += se2; \
+ sse_tmp += sse2; \
+ se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
+ src_ptr + 48, src_stride, x_offset, y_offset, ref_ptr + 48, \
+ ref_stride, second_pred + 48, w, h, &sse2, NULL, NULL); \
+ se += se2; \
+ sse_tmp += sse2; \
+ } \
+ } \
+ *sse = sse_tmp; \
+ return sse_tmp - \
+ (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
#define FNS(opt1, opt2) \
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c
index 4f164afeb4e..9d6f83787e8 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_asm_stubs.c
@@ -26,6 +26,19 @@ filter8_1dfunction vpx_filter_block1d8_h8_avg_sse2;
filter8_1dfunction vpx_filter_block1d4_v8_avg_sse2;
filter8_1dfunction vpx_filter_block1d4_h8_avg_sse2;
+filter8_1dfunction vpx_filter_block1d16_h4_sse2;
+filter8_1dfunction vpx_filter_block1d16_v4_sse2;
+filter8_1dfunction vpx_filter_block1d8_h4_sse2;
+filter8_1dfunction vpx_filter_block1d8_v4_sse2;
+filter8_1dfunction vpx_filter_block1d4_h4_sse2;
+filter8_1dfunction vpx_filter_block1d4_v4_sse2;
+#define vpx_filter_block1d16_v4_avg_sse2 vpx_filter_block1d16_v8_avg_sse2
+#define vpx_filter_block1d16_h4_avg_sse2 vpx_filter_block1d16_h8_avg_sse2
+#define vpx_filter_block1d8_v4_avg_sse2 vpx_filter_block1d8_v8_avg_sse2
+#define vpx_filter_block1d8_h4_avg_sse2 vpx_filter_block1d8_h8_avg_sse2
+#define vpx_filter_block1d4_v4_avg_sse2 vpx_filter_block1d4_v8_avg_sse2
+#define vpx_filter_block1d4_h4_avg_sse2 vpx_filter_block1d4_h8_avg_sse2
+
filter8_1dfunction vpx_filter_block1d16_v2_sse2;
filter8_1dfunction vpx_filter_block1d16_h2_sse2;
filter8_1dfunction vpx_filter_block1d8_v2_sse2;
@@ -91,6 +104,25 @@ highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h8_avg_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_avg_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_avg_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v4_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h4_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v4_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d8_h4_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v4_sse2;
+highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h4_sse2;
+#define vpx_highbd_filter_block1d16_v4_avg_sse2 \
+ vpx_highbd_filter_block1d16_v8_avg_sse2
+#define vpx_highbd_filter_block1d16_h4_avg_sse2 \
+ vpx_highbd_filter_block1d16_h8_avg_sse2
+#define vpx_highbd_filter_block1d8_v4_avg_sse2 \
+ vpx_highbd_filter_block1d8_v8_avg_sse2
+#define vpx_highbd_filter_block1d8_h4_avg_sse2 \
+ vpx_highbd_filter_block1d8_h8_avg_sse2
+#define vpx_highbd_filter_block1d4_v4_avg_sse2 \
+ vpx_highbd_filter_block1d4_v8_avg_sse2
+#define vpx_highbd_filter_block1d4_h4_avg_sse2 \
+ vpx_highbd_filter_block1d4_h8_avg_sse2
+
highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v2_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d16_h2_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d8_v2_sse2;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm
index d83507dc995..e6e72b826a6 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm
@@ -45,7 +45,7 @@
;Compute max and min values of a pixel
mov rdx, 0x00010001
- movsxd rcx, DWORD PTR arg(6) ;bps
+ movsxd rcx, DWORD PTR arg(6) ;bd
movq xmm0, rdx
movq xmm1, rcx
pshufd xmm0, xmm0, 0b
@@ -121,7 +121,7 @@
;Compute max and min values of a pixel
mov rdx, 0x00010001
- movsxd rcx, DWORD PTR arg(6) ;bps
+ movsxd rcx, DWORD PTR arg(6) ;bd
movq xmm0, rdx
movq xmm1, rcx
pshufd xmm0, xmm0, 0b
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
index 9bffe504b1f..87bf75ebb8e 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm
@@ -26,7 +26,7 @@
pshufd xmm3, xmm3, 0
mov rdx, 0x00010001
- movsxd rcx, DWORD PTR arg(6) ;bps
+ movsxd rcx, DWORD PTR arg(6) ;bd
movq xmm5, rdx
movq xmm2, rcx
pshufd xmm5, xmm5, 0b
@@ -82,7 +82,7 @@
pshufd xmm4, xmm4, 0
mov rdx, 0x00010001
- movsxd rcx, DWORD PTR arg(6) ;bps
+ movsxd rcx, DWORD PTR arg(6) ;bd
movq xmm8, rdx
movq xmm5, rcx
pshufd xmm8, xmm8, 0b
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c
new file mode 100644
index 00000000000..0be2c0fef48
--- /dev/null
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_4t_intrin_sse2.c
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/x86/convolve.h"
+#include "vpx_dsp/x86/convolve_sse2.h"
+#include "vpx_ports/mem.h"
+
+#define CONV8_ROUNDING_BITS (7)
+#define CONV8_ROUNDING_NUM (1 << (CONV8_ROUNDING_BITS - 1))
+
+void vpx_filter_block1d16_h4_sse2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ int h;
+
+ __m128i src_reg, src_reg_shift_1, src_reg_shift_2, src_reg_shift_3;
+ __m128i dst_first, dst_second;
+ __m128i even, odd;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ for (h = height; h > 0; --h) {
+ // We will load multiple shifted versions of the row and shuffle them into
+ // 16-bit words of the form
+ // ... s[2] s[1] s[0] s[-1]
+ // ... s[4] s[3] s[2] s[1]
+ // Then we call multiply and add to get partial results
+ // s[2]k[3]+s[1]k[2] s[0]k[3]s[-1]k[2]
+ // s[4]k[5]+s[3]k[4] s[2]k[5]s[1]k[4]
+ // The two results are then added together for the first half of even
+ // output.
+ // Repeat multiple times to get the whole outoput
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shift_1 = _mm_srli_si128(src_reg, 1);
+ src_reg_shift_2 = _mm_srli_si128(src_reg, 2);
+ src_reg_shift_3 = _mm_srli_si128(src_reg, 3);
+
+ // Output 6 4 2 0
+ even = mm_madd_add_epi8_sse2(&src_reg, &src_reg_shift_2, &kernel_reg_23,
+ &kernel_reg_45);
+
+ // Output 7 5 3 1
+ odd = mm_madd_add_epi8_sse2(&src_reg_shift_1, &src_reg_shift_3,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Combine to get the first half of the dst
+ dst_first = mm_zip_epi32_sse2(&even, &odd);
+
+ // Do again to get the second half of dst
+ src_reg = _mm_loadu_si128((const __m128i *)(src_ptr + 8));
+ src_reg_shift_1 = _mm_srli_si128(src_reg, 1);
+ src_reg_shift_2 = _mm_srli_si128(src_reg, 2);
+ src_reg_shift_3 = _mm_srli_si128(src_reg, 3);
+
+ // Output 14 12 10 8
+ even = mm_madd_add_epi8_sse2(&src_reg, &src_reg_shift_2, &kernel_reg_23,
+ &kernel_reg_45);
+
+ // Output 15 13 11 9
+ odd = mm_madd_add_epi8_sse2(&src_reg_shift_1, &src_reg_shift_3,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Combine to get the second half of the dst
+ dst_second = mm_zip_epi32_sse2(&even, &odd);
+
+ // Round each result
+ dst_first = mm_round_epi16_sse2(&dst_first, &reg_32, 6);
+ dst_second = mm_round_epi16_sse2(&dst_second, &reg_32, 6);
+
+ // Finally combine to get the final dst
+ dst_first = _mm_packus_epi16(dst_first, dst_second);
+ _mm_store_si128((__m128i *)dst_ptr, dst_first);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+/* The macro used to generate functions shifts the src_ptr up by 3 rows already
+ * */
+
+void vpx_filter_block1d16_v4_sse2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10_lo, src_reg_m10_hi, src_reg_01_lo, src_reg_01_hi;
+ __m128i src_reg_12_lo, src_reg_12_hi, src_reg_23_lo, src_reg_23_hi;
+ // Half of half of the interleaved rows
+ __m128i src_reg_m10_lo_1, src_reg_m10_lo_2, src_reg_m10_hi_1,
+ src_reg_m10_hi_2;
+ __m128i src_reg_01_lo_1, src_reg_01_lo_2, src_reg_01_hi_1, src_reg_01_hi_2;
+ __m128i src_reg_12_lo_1, src_reg_12_lo_2, src_reg_12_hi_1, src_reg_12_hi_2;
+ __m128i src_reg_23_lo_1, src_reg_23_lo_2, src_reg_23_hi_1, src_reg_23_hi_2;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m128i res_reg_m10_lo, res_reg_01_lo, res_reg_12_lo, res_reg_23_lo;
+ __m128i res_reg_m10_hi, res_reg_01_hi, res_reg_12_hi, res_reg_23_hi;
+ __m128i res_reg_m1012, res_reg_0123;
+ __m128i res_reg_m1012_lo, res_reg_0123_lo, res_reg_m1012_hi, res_reg_0123_hi;
+
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ // We will load two rows of pixels as 8-bit words, rearrange them as 16-bit
+ // words,
+ // shuffle the data into the form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // ... s[0,7] s[-1,7] s[0,6] s[-1,6]
+ // ... s[0,9] s[-1,9] s[0,8] s[-1,8]
+ // ... s[0,13] s[-1,13] s[0,12] s[-1,12]
+ // so that we can call multiply and add with the kernel to get 32-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10_lo = _mm_unpacklo_epi8(src_reg_m1, src_reg_0);
+ src_reg_m10_hi = _mm_unpackhi_epi8(src_reg_m1, src_reg_0);
+ src_reg_m10_lo_1 = _mm_unpacklo_epi8(src_reg_m10_lo, _mm_setzero_si128());
+ src_reg_m10_lo_2 = _mm_unpackhi_epi8(src_reg_m10_lo, _mm_setzero_si128());
+ src_reg_m10_hi_1 = _mm_unpacklo_epi8(src_reg_m10_hi, _mm_setzero_si128());
+ src_reg_m10_hi_2 = _mm_unpackhi_epi8(src_reg_m10_hi, _mm_setzero_si128());
+
+ // More shuffling
+ src_reg_1 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01_lo = _mm_unpacklo_epi8(src_reg_0, src_reg_1);
+ src_reg_01_hi = _mm_unpackhi_epi8(src_reg_0, src_reg_1);
+ src_reg_01_lo_1 = _mm_unpacklo_epi8(src_reg_01_lo, _mm_setzero_si128());
+ src_reg_01_lo_2 = _mm_unpackhi_epi8(src_reg_01_lo, _mm_setzero_si128());
+ src_reg_01_hi_1 = _mm_unpacklo_epi8(src_reg_01_hi, _mm_setzero_si128());
+ src_reg_01_hi_2 = _mm_unpackhi_epi8(src_reg_01_hi, _mm_setzero_si128());
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12_lo = _mm_unpacklo_epi8(src_reg_1, src_reg_2);
+ src_reg_12_hi = _mm_unpackhi_epi8(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23_lo = _mm_unpacklo_epi8(src_reg_2, src_reg_3);
+ src_reg_23_hi = _mm_unpackhi_epi8(src_reg_2, src_reg_3);
+
+ // Partial output from first half
+ res_reg_m10_lo = mm_madd_packs_epi16_sse2(
+ &src_reg_m10_lo_1, &src_reg_m10_lo_2, &kernel_reg_23);
+
+ res_reg_01_lo = mm_madd_packs_epi16_sse2(&src_reg_01_lo_1, &src_reg_01_lo_2,
+ &kernel_reg_23);
+
+ src_reg_12_lo_1 = _mm_unpacklo_epi8(src_reg_12_lo, _mm_setzero_si128());
+ src_reg_12_lo_2 = _mm_unpackhi_epi8(src_reg_12_lo, _mm_setzero_si128());
+ res_reg_12_lo = mm_madd_packs_epi16_sse2(&src_reg_12_lo_1, &src_reg_12_lo_2,
+ &kernel_reg_45);
+
+ src_reg_23_lo_1 = _mm_unpacklo_epi8(src_reg_23_lo, _mm_setzero_si128());
+ src_reg_23_lo_2 = _mm_unpackhi_epi8(src_reg_23_lo, _mm_setzero_si128());
+ res_reg_23_lo = mm_madd_packs_epi16_sse2(&src_reg_23_lo_1, &src_reg_23_lo_2,
+ &kernel_reg_45);
+
+ // Add to get first half of the results
+ res_reg_m1012_lo = _mm_adds_epi16(res_reg_m10_lo, res_reg_12_lo);
+ res_reg_0123_lo = _mm_adds_epi16(res_reg_01_lo, res_reg_23_lo);
+
+ // Now repeat everything again for the second half
+ // Partial output for second half
+ res_reg_m10_hi = mm_madd_packs_epi16_sse2(
+ &src_reg_m10_hi_1, &src_reg_m10_hi_2, &kernel_reg_23);
+
+ res_reg_01_hi = mm_madd_packs_epi16_sse2(&src_reg_01_hi_1, &src_reg_01_hi_2,
+ &kernel_reg_23);
+
+ src_reg_12_hi_1 = _mm_unpacklo_epi8(src_reg_12_hi, _mm_setzero_si128());
+ src_reg_12_hi_2 = _mm_unpackhi_epi8(src_reg_12_hi, _mm_setzero_si128());
+ res_reg_12_hi = mm_madd_packs_epi16_sse2(&src_reg_12_hi_1, &src_reg_12_hi_2,
+ &kernel_reg_45);
+
+ src_reg_23_hi_1 = _mm_unpacklo_epi8(src_reg_23_hi, _mm_setzero_si128());
+ src_reg_23_hi_2 = _mm_unpackhi_epi8(src_reg_23_hi, _mm_setzero_si128());
+ res_reg_23_hi = mm_madd_packs_epi16_sse2(&src_reg_23_hi_1, &src_reg_23_hi_2,
+ &kernel_reg_45);
+
+ // Second half of the results
+ res_reg_m1012_hi = _mm_adds_epi16(res_reg_m10_hi, res_reg_12_hi);
+ res_reg_0123_hi = _mm_adds_epi16(res_reg_01_hi, res_reg_23_hi);
+
+ // Round the words
+ res_reg_m1012_lo = mm_round_epi16_sse2(&res_reg_m1012_lo, &reg_32, 6);
+ res_reg_0123_lo = mm_round_epi16_sse2(&res_reg_0123_lo, &reg_32, 6);
+ res_reg_m1012_hi = mm_round_epi16_sse2(&res_reg_m1012_hi, &reg_32, 6);
+ res_reg_0123_hi = mm_round_epi16_sse2(&res_reg_0123_hi, &reg_32, 6);
+
+ // Combine to get the result
+ res_reg_m1012 = _mm_packus_epi16(res_reg_m1012_lo, res_reg_m1012_hi);
+ res_reg_0123 = _mm_packus_epi16(res_reg_0123_lo, res_reg_0123_hi);
+
+ _mm_store_si128((__m128i *)dst_ptr, res_reg_m1012);
+ _mm_store_si128((__m128i *)(dst_ptr + dst_stride), res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10_lo_1 = src_reg_12_lo_1;
+ src_reg_m10_lo_2 = src_reg_12_lo_2;
+ src_reg_m10_hi_1 = src_reg_12_hi_1;
+ src_reg_m10_hi_2 = src_reg_12_hi_2;
+ src_reg_01_lo_1 = src_reg_23_lo_1;
+ src_reg_01_lo_2 = src_reg_23_lo_2;
+ src_reg_01_hi_1 = src_reg_23_hi_1;
+ src_reg_01_hi_2 = src_reg_23_hi_2;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_filter_block1d8_h4_sse2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ int h;
+
+ __m128i src_reg, src_reg_shift_1, src_reg_shift_2, src_reg_shift_3;
+ __m128i dst_first;
+ __m128i even, odd;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ for (h = height; h > 0; --h) {
+ // We will load multiple shifted versions of the row and shuffle them into
+ // 16-bit words of the form
+ // ... s[2] s[1] s[0] s[-1]
+ // ... s[4] s[3] s[2] s[1]
+ // Then we call multiply and add to get partial results
+ // s[2]k[3]+s[1]k[2] s[0]k[3]s[-1]k[2]
+ // s[4]k[5]+s[3]k[4] s[2]k[5]s[1]k[4]
+ // The two results are then added together to get the even output
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shift_1 = _mm_srli_si128(src_reg, 1);
+ src_reg_shift_2 = _mm_srli_si128(src_reg, 2);
+ src_reg_shift_3 = _mm_srli_si128(src_reg, 3);
+
+ // Output 6 4 2 0
+ even = mm_madd_add_epi8_sse2(&src_reg, &src_reg_shift_2, &kernel_reg_23,
+ &kernel_reg_45);
+
+ // Output 7 5 3 1
+ odd = mm_madd_add_epi8_sse2(&src_reg_shift_1, &src_reg_shift_3,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Combine to get the first half of the dst
+ dst_first = mm_zip_epi32_sse2(&even, &odd);
+ dst_first = mm_round_epi16_sse2(&dst_first, &reg_32, 6);
+
+ // Saturate and convert to 8-bit words
+ dst_first = _mm_packus_epi16(dst_first, _mm_setzero_si128());
+
+ _mm_storel_epi64((__m128i *)dst_ptr, dst_first);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_filter_block1d8_v4_sse2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10_lo, src_reg_01_lo;
+ __m128i src_reg_12_lo, src_reg_23_lo;
+ // Half of half of the interleaved rows
+ __m128i src_reg_m10_lo_1, src_reg_m10_lo_2;
+ __m128i src_reg_01_lo_1, src_reg_01_lo_2;
+ __m128i src_reg_12_lo_1, src_reg_12_lo_2;
+ __m128i src_reg_23_lo_1, src_reg_23_lo_2;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m128i res_reg_m10_lo, res_reg_01_lo, res_reg_12_lo, res_reg_23_lo;
+ __m128i res_reg_m1012, res_reg_0123;
+ __m128i res_reg_m1012_lo, res_reg_0123_lo;
+
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ // We will load two rows of pixels as 8-bit words, rearrange them as 16-bit
+ // words,
+ // shuffle the data into the form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // ... s[0,7] s[-1,7] s[0,6] s[-1,6]
+ // ... s[0,9] s[-1,9] s[0,8] s[-1,8]
+ // ... s[0,13] s[-1,13] s[0,12] s[-1,12]
+ // so that we can call multiply and add with the kernel to get 32-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10_lo = _mm_unpacklo_epi8(src_reg_m1, src_reg_0);
+ src_reg_m10_lo_1 = _mm_unpacklo_epi8(src_reg_m10_lo, _mm_setzero_si128());
+ src_reg_m10_lo_2 = _mm_unpackhi_epi8(src_reg_m10_lo, _mm_setzero_si128());
+
+ // More shuffling
+ src_reg_1 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01_lo = _mm_unpacklo_epi8(src_reg_0, src_reg_1);
+ src_reg_01_lo_1 = _mm_unpacklo_epi8(src_reg_01_lo, _mm_setzero_si128());
+ src_reg_01_lo_2 = _mm_unpackhi_epi8(src_reg_01_lo, _mm_setzero_si128());
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12_lo = _mm_unpacklo_epi8(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23_lo = _mm_unpacklo_epi8(src_reg_2, src_reg_3);
+
+ // Partial output
+ res_reg_m10_lo = mm_madd_packs_epi16_sse2(
+ &src_reg_m10_lo_1, &src_reg_m10_lo_2, &kernel_reg_23);
+
+ res_reg_01_lo = mm_madd_packs_epi16_sse2(&src_reg_01_lo_1, &src_reg_01_lo_2,
+ &kernel_reg_23);
+
+ src_reg_12_lo_1 = _mm_unpacklo_epi8(src_reg_12_lo, _mm_setzero_si128());
+ src_reg_12_lo_2 = _mm_unpackhi_epi8(src_reg_12_lo, _mm_setzero_si128());
+ res_reg_12_lo = mm_madd_packs_epi16_sse2(&src_reg_12_lo_1, &src_reg_12_lo_2,
+ &kernel_reg_45);
+
+ src_reg_23_lo_1 = _mm_unpacklo_epi8(src_reg_23_lo, _mm_setzero_si128());
+ src_reg_23_lo_2 = _mm_unpackhi_epi8(src_reg_23_lo, _mm_setzero_si128());
+ res_reg_23_lo = mm_madd_packs_epi16_sse2(&src_reg_23_lo_1, &src_reg_23_lo_2,
+ &kernel_reg_45);
+
+ // Add to get results
+ res_reg_m1012_lo = _mm_adds_epi16(res_reg_m10_lo, res_reg_12_lo);
+ res_reg_0123_lo = _mm_adds_epi16(res_reg_01_lo, res_reg_23_lo);
+
+ // Round the words
+ res_reg_m1012_lo = mm_round_epi16_sse2(&res_reg_m1012_lo, &reg_32, 6);
+ res_reg_0123_lo = mm_round_epi16_sse2(&res_reg_0123_lo, &reg_32, 6);
+
+ // Convert to 8-bit words
+ res_reg_m1012 = _mm_packus_epi16(res_reg_m1012_lo, _mm_setzero_si128());
+ res_reg_0123 = _mm_packus_epi16(res_reg_0123_lo, _mm_setzero_si128());
+
+ // Save only half of the register (8 words)
+ _mm_storel_epi64((__m128i *)dst_ptr, res_reg_m1012);
+ _mm_storel_epi64((__m128i *)(dst_ptr + dst_stride), res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10_lo_1 = src_reg_12_lo_1;
+ src_reg_m10_lo_2 = src_reg_12_lo_2;
+ src_reg_01_lo_1 = src_reg_23_lo_1;
+ src_reg_01_lo_2 = src_reg_23_lo_2;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_filter_block1d4_h4_sse2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ int h;
+
+ __m128i src_reg, src_reg_shift_1, src_reg_shift_2, src_reg_shift_3;
+ __m128i dst_first;
+ __m128i tmp_0, tmp_1;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ for (h = height; h > 0; --h) {
+ // We will load multiple shifted versions of the row and shuffle them into
+ // 16-bit words of the form
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Then we call multiply and add to get partial results
+ // s[1]k[3]+s[0]k[2] s[0]k[3]s[-1]k[2]
+ // s[3]k[5]+s[2]k[4] s[2]k[5]s[1]k[4]
+ // The two results are then added together to get the output
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shift_1 = _mm_srli_si128(src_reg, 1);
+ src_reg_shift_2 = _mm_srli_si128(src_reg, 2);
+ src_reg_shift_3 = _mm_srli_si128(src_reg, 3);
+
+ // Convert to 16-bit words
+ src_reg = _mm_unpacklo_epi8(src_reg, _mm_setzero_si128());
+ src_reg_shift_1 = _mm_unpacklo_epi8(src_reg_shift_1, _mm_setzero_si128());
+ src_reg_shift_2 = _mm_unpacklo_epi8(src_reg_shift_2, _mm_setzero_si128());
+ src_reg_shift_3 = _mm_unpacklo_epi8(src_reg_shift_3, _mm_setzero_si128());
+
+ // Shuffle into the right format
+ tmp_0 = _mm_unpacklo_epi32(src_reg, src_reg_shift_1);
+ tmp_1 = _mm_unpacklo_epi32(src_reg_shift_2, src_reg_shift_3);
+
+ // Partial output
+ tmp_0 = _mm_madd_epi16(tmp_0, kernel_reg_23);
+ tmp_1 = _mm_madd_epi16(tmp_1, kernel_reg_45);
+
+ // Output
+ dst_first = _mm_add_epi32(tmp_0, tmp_1);
+ dst_first = _mm_packs_epi32(dst_first, _mm_setzero_si128());
+
+ dst_first = mm_round_epi16_sse2(&dst_first, &reg_32, 6);
+
+ // Saturate and convert to 8-bit words
+ dst_first = _mm_packus_epi16(dst_first, _mm_setzero_si128());
+
+ *((uint32_t *)(dst_ptr)) = _mm_cvtsi128_si32(dst_first);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_filter_block1d4_v4_sse2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10_lo, src_reg_01_lo;
+ __m128i src_reg_12_lo, src_reg_23_lo;
+ // Half of half of the interleaved rows
+ __m128i src_reg_m10_lo_1;
+ __m128i src_reg_01_lo_1;
+ __m128i src_reg_12_lo_1;
+ __m128i src_reg_23_lo_1;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m128i res_reg_m10_lo, res_reg_01_lo, res_reg_12_lo, res_reg_23_lo;
+ __m128i res_reg_m1012, res_reg_0123;
+ __m128i res_reg_m1012_lo, res_reg_0123_lo;
+
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ const __m128i reg_zero = _mm_setzero_si128();
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ // We will load two rows of pixels as 8-bit words, rearrange them as 16-bit
+ // words,
+ // shuffle the data into the form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // ... s[0,7] s[-1,7] s[0,6] s[-1,6]
+ // ... s[0,9] s[-1,9] s[0,8] s[-1,8]
+ // ... s[0,13] s[-1,13] s[0,12] s[-1,12]
+ // so that we can call multiply and add with the kernel to get 32-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10_lo = _mm_unpacklo_epi8(src_reg_m1, src_reg_0);
+ src_reg_m10_lo_1 = _mm_unpacklo_epi8(src_reg_m10_lo, _mm_setzero_si128());
+
+ // More shuffling
+ src_reg_1 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01_lo = _mm_unpacklo_epi8(src_reg_0, src_reg_1);
+ src_reg_01_lo_1 = _mm_unpacklo_epi8(src_reg_01_lo, _mm_setzero_si128());
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12_lo = _mm_unpacklo_epi8(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23_lo = _mm_unpacklo_epi8(src_reg_2, src_reg_3);
+
+ // Partial output
+ res_reg_m10_lo =
+ mm_madd_packs_epi16_sse2(&src_reg_m10_lo_1, &reg_zero, &kernel_reg_23);
+
+ res_reg_01_lo =
+ mm_madd_packs_epi16_sse2(&src_reg_01_lo_1, &reg_zero, &kernel_reg_23);
+
+ src_reg_12_lo_1 = _mm_unpacklo_epi8(src_reg_12_lo, _mm_setzero_si128());
+ res_reg_12_lo =
+ mm_madd_packs_epi16_sse2(&src_reg_12_lo_1, &reg_zero, &kernel_reg_45);
+
+ src_reg_23_lo_1 = _mm_unpacklo_epi8(src_reg_23_lo, _mm_setzero_si128());
+ res_reg_23_lo =
+ mm_madd_packs_epi16_sse2(&src_reg_23_lo_1, &reg_zero, &kernel_reg_45);
+
+ // Add to get results
+ res_reg_m1012_lo = _mm_adds_epi16(res_reg_m10_lo, res_reg_12_lo);
+ res_reg_0123_lo = _mm_adds_epi16(res_reg_01_lo, res_reg_23_lo);
+
+ // Round the words
+ res_reg_m1012_lo = mm_round_epi16_sse2(&res_reg_m1012_lo, &reg_32, 6);
+ res_reg_0123_lo = mm_round_epi16_sse2(&res_reg_0123_lo, &reg_32, 6);
+
+ // Convert to 8-bit words
+ res_reg_m1012 = _mm_packus_epi16(res_reg_m1012_lo, reg_zero);
+ res_reg_0123 = _mm_packus_epi16(res_reg_0123_lo, reg_zero);
+
+ // Save only half of the register (8 words)
+ *((uint32_t *)(dst_ptr)) = _mm_cvtsi128_si32(res_reg_m1012);
+ *((uint32_t *)(dst_ptr + dst_stride)) = _mm_cvtsi128_si32(res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10_lo_1 = src_reg_12_lo_1;
+ src_reg_01_lo_1 = src_reg_23_lo_1;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_highbd_filter_block1d4_h4_sse2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will load multiple shifted versions of the row and shuffle them into
+ // 16-bit words of the form
+ // ... s[2] s[1] s[0] s[-1]
+ // ... s[4] s[3] s[2] s[1]
+ // Then we call multiply and add to get partial results
+ // s[2]k[3]+s[1]k[2] s[0]k[3]s[-1]k[2]
+ // s[4]k[5]+s[3]k[4] s[2]k[5]s[1]k[4]
+ // The two results are then added together to get the even output
+
+ __m128i src_reg, src_reg_shift_1, src_reg_shift_2, src_reg_shift_3;
+ __m128i res_reg;
+ __m128i even, odd;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_round =
+ _mm_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m128i reg_max = _mm_set1_epi16((1 << bd) - 1);
+ const __m128i reg_zero = _mm_setzero_si128();
+ int h;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ for (h = height; h > 0; --h) {
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shift_1 = _mm_srli_si128(src_reg, 2);
+ src_reg_shift_2 = _mm_srli_si128(src_reg, 4);
+ src_reg_shift_3 = _mm_srli_si128(src_reg, 6);
+
+ // Output 2 0
+ even = mm_madd_add_epi16_sse2(&src_reg, &src_reg_shift_2, &kernel_reg_23,
+ &kernel_reg_45);
+
+ // Output 3 1
+ odd = mm_madd_add_epi16_sse2(&src_reg_shift_1, &src_reg_shift_3,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Combine to get the first half of the dst
+ res_reg = _mm_unpacklo_epi32(even, odd);
+ res_reg = mm_round_epi32_sse2(&res_reg, &reg_round, CONV8_ROUNDING_BITS);
+ res_reg = _mm_packs_epi32(res_reg, reg_zero);
+
+ // Saturate the result and save
+ res_reg = _mm_min_epi16(res_reg, reg_max);
+ res_reg = _mm_max_epi16(res_reg, reg_zero);
+ _mm_storel_epi64((__m128i *)dst_ptr, res_reg);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_highbd_filter_block1d4_v4_sse2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will load two rows of pixels as 16-bit words, and shuffle them into the
+ // form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // ... s[0,7] s[-1,7] s[0,6] s[-1,6]
+ // ... s[0,9] s[-1,9] s[0,8] s[-1,8]
+ // ... s[0,13] s[-1,13] s[0,12] s[-1,12]
+ // so that we can call multiply and add with the kernel to get 32-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10, src_reg_01;
+ __m128i src_reg_12, src_reg_23;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m128i res_reg_m10, res_reg_01, res_reg_12, res_reg_23;
+ __m128i res_reg_m1012, res_reg_0123;
+
+ const __m128i reg_round =
+ _mm_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m128i reg_max = _mm_set1_epi16((1 << bd) - 1);
+ const __m128i reg_zero = _mm_setzero_si128();
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the source, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadl_epi64((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10 = _mm_unpacklo_epi16(src_reg_m1, src_reg_0);
+
+ // More shuffling
+ src_reg_1 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01 = _mm_unpacklo_epi16(src_reg_0, src_reg_1);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12 = _mm_unpacklo_epi16(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23 = _mm_unpacklo_epi16(src_reg_2, src_reg_3);
+
+ // Partial output
+ res_reg_m10 = _mm_madd_epi16(src_reg_m10, kernel_reg_23);
+ res_reg_01 = _mm_madd_epi16(src_reg_01, kernel_reg_23);
+ res_reg_12 = _mm_madd_epi16(src_reg_12, kernel_reg_45);
+ res_reg_23 = _mm_madd_epi16(src_reg_23, kernel_reg_45);
+
+ // Add to get results
+ res_reg_m1012 = _mm_add_epi32(res_reg_m10, res_reg_12);
+ res_reg_0123 = _mm_add_epi32(res_reg_01, res_reg_23);
+
+ // Round the words
+ res_reg_m1012 =
+ mm_round_epi32_sse2(&res_reg_m1012, &reg_round, CONV8_ROUNDING_BITS);
+ res_reg_0123 =
+ mm_round_epi32_sse2(&res_reg_0123, &reg_round, CONV8_ROUNDING_BITS);
+
+ res_reg_m1012 = _mm_packs_epi32(res_reg_m1012, reg_zero);
+ res_reg_0123 = _mm_packs_epi32(res_reg_0123, reg_zero);
+
+ // Saturate according to bit depth
+ res_reg_m1012 = _mm_min_epi16(res_reg_m1012, reg_max);
+ res_reg_0123 = _mm_min_epi16(res_reg_0123, reg_max);
+ res_reg_m1012 = _mm_max_epi16(res_reg_m1012, reg_zero);
+ res_reg_0123 = _mm_max_epi16(res_reg_0123, reg_zero);
+
+ // Save only half of the register (8 words)
+ _mm_storel_epi64((__m128i *)dst_ptr, res_reg_m1012);
+ _mm_storel_epi64((__m128i *)(dst_ptr + dst_stride), res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10 = src_reg_12;
+ src_reg_01 = src_reg_23;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_highbd_filter_block1d8_h4_sse2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will load multiple shifted versions of the row and shuffle them into
+ // 16-bit words of the form
+ // ... s[2] s[1] s[0] s[-1]
+ // ... s[4] s[3] s[2] s[1]
+ // Then we call multiply and add to get partial results
+ // s[2]k[3]+s[1]k[2] s[0]k[3]s[-1]k[2]
+ // s[4]k[5]+s[3]k[4] s[2]k[5]s[1]k[4]
+ // The two results are then added together for the first half of even
+ // output.
+ // Repeat multiple times to get the whole outoput
+
+ __m128i src_reg, src_reg_next, src_reg_shift_1, src_reg_shift_2,
+ src_reg_shift_3;
+ __m128i res_reg;
+ __m128i even, odd;
+ __m128i tmp_0, tmp_1;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_round =
+ _mm_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m128i reg_max = _mm_set1_epi16((1 << bd) - 1);
+ const __m128i reg_zero = _mm_setzero_si128();
+ int h;
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ for (h = height; h > 0; --h) {
+ // We will put first half in the first half of the reg, and second half in
+ // second half
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_next = _mm_loadu_si128((const __m128i *)(src_ptr + 5));
+
+ // Output 6 4 2 0
+ tmp_0 = _mm_srli_si128(src_reg, 4);
+ tmp_1 = _mm_srli_si128(src_reg_next, 2);
+ src_reg_shift_2 = _mm_unpacklo_epi64(tmp_0, tmp_1);
+ even = mm_madd_add_epi16_sse2(&src_reg, &src_reg_shift_2, &kernel_reg_23,
+ &kernel_reg_45);
+
+ // Output 7 5 3 1
+ tmp_0 = _mm_srli_si128(src_reg, 2);
+ tmp_1 = src_reg_next;
+ src_reg_shift_1 = _mm_unpacklo_epi64(tmp_0, tmp_1);
+
+ tmp_0 = _mm_srli_si128(src_reg, 6);
+ tmp_1 = _mm_srli_si128(src_reg_next, 4);
+ src_reg_shift_3 = _mm_unpacklo_epi64(tmp_0, tmp_1);
+
+ odd = mm_madd_add_epi16_sse2(&src_reg_shift_1, &src_reg_shift_3,
+ &kernel_reg_23, &kernel_reg_45);
+
+ // Combine to get the first half of the dst
+ even = mm_round_epi32_sse2(&even, &reg_round, CONV8_ROUNDING_BITS);
+ odd = mm_round_epi32_sse2(&odd, &reg_round, CONV8_ROUNDING_BITS);
+ res_reg = mm_zip_epi32_sse2(&even, &odd);
+
+ // Saturate the result and save
+ res_reg = _mm_min_epi16(res_reg, reg_max);
+ res_reg = _mm_max_epi16(res_reg, reg_zero);
+
+ _mm_store_si128((__m128i *)dst_ptr, res_reg);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_highbd_filter_block1d8_v4_sse2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ // We will load two rows of pixels as 16-bit words, and shuffle them into the
+ // form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // ... s[0,7] s[-1,7] s[0,6] s[-1,6]
+ // ... s[0,9] s[-1,9] s[0,8] s[-1,8]
+ // ... s[0,13] s[-1,13] s[0,12] s[-1,12]
+ // so that we can call multiply and add with the kernel to get 32-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10_lo, src_reg_01_lo, src_reg_m10_hi, src_reg_01_hi;
+ __m128i src_reg_12_lo, src_reg_23_lo, src_reg_12_hi, src_reg_23_hi;
+
+ // Result after multiply and add
+ __m128i res_reg_m10_lo, res_reg_01_lo, res_reg_12_lo, res_reg_23_lo;
+ __m128i res_reg_m10_hi, res_reg_01_hi, res_reg_12_hi, res_reg_23_hi;
+ __m128i res_reg_m1012, res_reg_0123;
+ __m128i res_reg_m1012_lo, res_reg_0123_lo;
+ __m128i res_reg_m1012_hi, res_reg_0123_hi;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ const __m128i reg_round =
+ _mm_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
+ const __m128i reg_max = _mm_set1_epi16((1 << bd) - 1);
+ const __m128i reg_zero = _mm_setzero_si128();
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the source, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_23 = extract_quarter_2_epi16_sse2(&kernel_reg);
+ kernel_reg_45 = extract_quarter_3_epi16_sse2(&kernel_reg);
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10_lo = _mm_unpacklo_epi16(src_reg_m1, src_reg_0);
+ src_reg_m10_hi = _mm_unpackhi_epi16(src_reg_m1, src_reg_0);
+
+ // More shuffling
+ src_reg_1 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01_lo = _mm_unpacklo_epi16(src_reg_0, src_reg_1);
+ src_reg_01_hi = _mm_unpackhi_epi16(src_reg_0, src_reg_1);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12_lo = _mm_unpacklo_epi16(src_reg_1, src_reg_2);
+ src_reg_12_hi = _mm_unpackhi_epi16(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23_lo = _mm_unpacklo_epi16(src_reg_2, src_reg_3);
+ src_reg_23_hi = _mm_unpackhi_epi16(src_reg_2, src_reg_3);
+
+ // Partial output for first half
+ res_reg_m10_lo = _mm_madd_epi16(src_reg_m10_lo, kernel_reg_23);
+ res_reg_01_lo = _mm_madd_epi16(src_reg_01_lo, kernel_reg_23);
+ res_reg_12_lo = _mm_madd_epi16(src_reg_12_lo, kernel_reg_45);
+ res_reg_23_lo = _mm_madd_epi16(src_reg_23_lo, kernel_reg_45);
+
+ // Add to get results
+ res_reg_m1012_lo = _mm_add_epi32(res_reg_m10_lo, res_reg_12_lo);
+ res_reg_0123_lo = _mm_add_epi32(res_reg_01_lo, res_reg_23_lo);
+
+ // Round the words
+ res_reg_m1012_lo =
+ mm_round_epi32_sse2(&res_reg_m1012_lo, &reg_round, CONV8_ROUNDING_BITS);
+ res_reg_0123_lo =
+ mm_round_epi32_sse2(&res_reg_0123_lo, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Partial output for first half
+ res_reg_m10_hi = _mm_madd_epi16(src_reg_m10_hi, kernel_reg_23);
+ res_reg_01_hi = _mm_madd_epi16(src_reg_01_hi, kernel_reg_23);
+ res_reg_12_hi = _mm_madd_epi16(src_reg_12_hi, kernel_reg_45);
+ res_reg_23_hi = _mm_madd_epi16(src_reg_23_hi, kernel_reg_45);
+
+ // Add to get results
+ res_reg_m1012_hi = _mm_add_epi32(res_reg_m10_hi, res_reg_12_hi);
+ res_reg_0123_hi = _mm_add_epi32(res_reg_01_hi, res_reg_23_hi);
+
+ // Round the words
+ res_reg_m1012_hi =
+ mm_round_epi32_sse2(&res_reg_m1012_hi, &reg_round, CONV8_ROUNDING_BITS);
+ res_reg_0123_hi =
+ mm_round_epi32_sse2(&res_reg_0123_hi, &reg_round, CONV8_ROUNDING_BITS);
+
+ // Combine the two halfs
+ res_reg_m1012 = _mm_packs_epi32(res_reg_m1012_lo, res_reg_m1012_hi);
+ res_reg_0123 = _mm_packs_epi32(res_reg_0123_lo, res_reg_0123_hi);
+
+ // Saturate according to bit depth
+ res_reg_m1012 = _mm_min_epi16(res_reg_m1012, reg_max);
+ res_reg_0123 = _mm_min_epi16(res_reg_0123, reg_max);
+ res_reg_m1012 = _mm_max_epi16(res_reg_m1012, reg_zero);
+ res_reg_0123 = _mm_max_epi16(res_reg_0123, reg_zero);
+
+ // Save only half of the register (8 words)
+ _mm_store_si128((__m128i *)dst_ptr, res_reg_m1012);
+ _mm_store_si128((__m128i *)(dst_ptr + dst_stride), res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10_lo = src_reg_12_lo;
+ src_reg_m10_hi = src_reg_12_hi;
+ src_reg_01_lo = src_reg_23_lo;
+ src_reg_01_hi = src_reg_23_hi;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_highbd_filter_block1d16_h4_sse2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride,
+ uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ vpx_highbd_filter_block1d8_h4_sse2(src_ptr, src_stride, dst_ptr, dst_stride,
+ height, kernel, bd);
+ vpx_highbd_filter_block1d8_h4_sse2(src_ptr + 8, src_stride, dst_ptr + 8,
+ dst_stride, height, kernel, bd);
+}
+
+void vpx_highbd_filter_block1d16_v4_sse2(const uint16_t *src_ptr,
+ ptrdiff_t src_stride,
+ uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, uint32_t height,
+ const int16_t *kernel, int bd) {
+ vpx_highbd_filter_block1d8_v4_sse2(src_ptr, src_stride, dst_ptr, dst_stride,
+ height, kernel, bd);
+ vpx_highbd_filter_block1d8_v4_sse2(src_ptr + 8, src_stride, dst_ptr + 8,
+ dst_stride, height, kernel, bd);
+}
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
index d0919695ce9..b55b7e57a80 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c
@@ -9,10 +9,12 @@
*/
#include <immintrin.h>
+#include <stdio.h>
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/convolve.h"
#include "vpx_dsp/x86/convolve_avx2.h"
+#include "vpx_dsp/x86/convolve_sse2.h"
#include "vpx_ports/mem.h"
// filters for 16_h8
@@ -326,6 +328,576 @@ static void vpx_filter_block1d16_v8_avg_avx2(
height, filter, 1);
}
+void vpx_filter_block1d16_h4_avx2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will cast the kernel from 16-bit words to 8-bit words, and then extract
+ // the middle four elements of the kernel into two registers in the form
+ // ... k[3] k[2] k[3] k[2]
+ // ... k[5] k[4] k[5] k[4]
+ // Then we shuffle the source into
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Calling multiply and add gives us half of the sum. Calling add gives us
+ // first half of the output. Repeat again to get the second half of the
+ // output. Finally we shuffle again to combine the two outputs.
+ // Since avx2 allows us to use 256-bit buffer, we can do this two rows at a
+ // time.
+
+ __m128i kernel_reg; // Kernel
+ __m256i kernel_reg_256, kernel_reg_23,
+ kernel_reg_45; // Segments of the kernel used
+ const __m256i reg_32 = _mm256_set1_epi16(32); // Used for rounding
+ const ptrdiff_t unrolled_src_stride = src_stride << 1;
+ const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
+ int h;
+
+ __m256i src_reg, src_reg_shift_0, src_reg_shift_2;
+ __m256i dst_first, dst_second;
+ __m256i tmp_0, tmp_1;
+ __m256i idx_shift_0 =
+ _mm256_setr_epi8(0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 0, 1, 1,
+ 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8);
+ __m256i idx_shift_2 =
+ _mm256_setr_epi8(2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10);
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg_256 = _mm256_broadcastsi128_si256(kernel_reg);
+ kernel_reg_23 =
+ _mm256_shuffle_epi8(kernel_reg_256, _mm256_set1_epi16(0x0302u));
+ kernel_reg_45 =
+ _mm256_shuffle_epi8(kernel_reg_256, _mm256_set1_epi16(0x0504u));
+
+ for (h = height; h >= 2; h -= 2) {
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr, src_ptr + src_stride);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Partial result for first half
+ tmp_0 = _mm256_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm256_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_first = _mm256_adds_epi16(tmp_0, tmp_1);
+
+ // Do again to get the second half of dst
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr + 8, src_ptr + src_stride + 8);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Partial result for second half
+ tmp_0 = _mm256_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm256_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_second = _mm256_adds_epi16(tmp_0, tmp_1);
+
+ // Round each result
+ dst_first = mm256_round_epi16(&dst_first, &reg_32, 6);
+ dst_second = mm256_round_epi16(&dst_second, &reg_32, 6);
+
+ // Finally combine to get the final dst
+ dst_first = _mm256_packus_epi16(dst_first, dst_second);
+ mm256_store2_si128((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &dst_first);
+
+ src_ptr += unrolled_src_stride;
+ dst_ptr += unrolled_dst_stride;
+ }
+
+ // Repeat for the last row if needed
+ if (h > 0) {
+ src_reg = _mm256_loadu_si256((const __m256i *)src_ptr);
+ // Reorder into 2 1 1 2
+ src_reg = _mm256_permute4x64_epi64(src_reg, 0x94);
+
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ tmp_0 = _mm256_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm256_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_first = _mm256_adds_epi16(tmp_0, tmp_1);
+
+ dst_first = mm256_round_epi16(&dst_first, &reg_32, 6);
+
+ dst_first = _mm256_packus_epi16(dst_first, dst_first);
+ dst_first = _mm256_permute4x64_epi64(dst_first, 0x8);
+
+ _mm_store_si128((__m128i *)dst_ptr, _mm256_castsi256_si128(dst_first));
+ }
+}
+
+void vpx_filter_block1d16_v4_avx2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will load two rows of pixels as 8-bit words, rearrange them into the
+ // form
+ // ... s[1,0] s[0,0] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel partial output. Then
+ // we can call add with another row to get the output.
+
+ // Register for source s[-1:3, :]
+ __m256i src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
+ __m256i src_reg_m1001_lo, src_reg_m1001_hi, src_reg_1223_lo, src_reg_1223_hi;
+
+ __m128i kernel_reg; // Kernel
+ __m256i kernel_reg_256, kernel_reg_23,
+ kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m256i res_reg_m1001_lo, res_reg_1223_lo, res_reg_m1001_hi, res_reg_1223_hi;
+ __m256i res_reg, res_reg_lo, res_reg_hi;
+
+ const __m256i reg_32 = _mm256_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg_256 = _mm256_broadcastsi128_si256(kernel_reg);
+ kernel_reg_23 =
+ _mm256_shuffle_epi8(kernel_reg_256, _mm256_set1_epi16(0x0302u));
+ kernel_reg_45 =
+ _mm256_shuffle_epi8(kernel_reg_256, _mm256_set1_epi16(0x0504u));
+
+ // Row -1 to row 0
+ src_reg_m10 = mm256_loadu2_si128((const __m128i *)src_ptr,
+ (const __m128i *)(src_ptr + src_stride));
+
+ // Row 0 to row 1
+ src_reg_1 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
+ src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
+
+ // First three rows
+ src_reg_m1001_lo = _mm256_unpacklo_epi8(src_reg_m10, src_reg_01);
+ src_reg_m1001_hi = _mm256_unpackhi_epi8(src_reg_m10, src_reg_01);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3)));
+
+ src_reg_12 = _mm256_inserti128_si256(src_reg_1,
+ _mm256_castsi256_si128(src_reg_2), 1);
+
+ src_reg_3 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4)));
+
+ src_reg_23 = _mm256_inserti128_si256(src_reg_2,
+ _mm256_castsi256_si128(src_reg_3), 1);
+
+ // Last three rows
+ src_reg_1223_lo = _mm256_unpacklo_epi8(src_reg_12, src_reg_23);
+ src_reg_1223_hi = _mm256_unpackhi_epi8(src_reg_12, src_reg_23);
+
+ // Output from first half
+ res_reg_m1001_lo = _mm256_maddubs_epi16(src_reg_m1001_lo, kernel_reg_23);
+ res_reg_1223_lo = _mm256_maddubs_epi16(src_reg_1223_lo, kernel_reg_45);
+ res_reg_lo = _mm256_adds_epi16(res_reg_m1001_lo, res_reg_1223_lo);
+
+ // Output from second half
+ res_reg_m1001_hi = _mm256_maddubs_epi16(src_reg_m1001_hi, kernel_reg_23);
+ res_reg_1223_hi = _mm256_maddubs_epi16(src_reg_1223_hi, kernel_reg_45);
+ res_reg_hi = _mm256_adds_epi16(res_reg_m1001_hi, res_reg_1223_hi);
+
+ // Round the words
+ res_reg_lo = mm256_round_epi16(&res_reg_lo, &reg_32, 6);
+ res_reg_hi = mm256_round_epi16(&res_reg_hi, &reg_32, 6);
+
+ // Combine to get the result
+ res_reg = _mm256_packus_epi16(res_reg_lo, res_reg_hi);
+
+ // Save the result
+ mm256_store2_si128((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m1001_lo = src_reg_1223_lo;
+ src_reg_m1001_hi = src_reg_1223_hi;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_filter_block1d8_h4_avx2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will cast the kernel from 16-bit words to 8-bit words, and then extract
+ // the middle four elements of the kernel into two registers in the form
+ // ... k[3] k[2] k[3] k[2]
+ // ... k[5] k[4] k[5] k[4]
+ // Then we shuffle the source into
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Calling multiply and add gives us half of the sum. Calling add gives us
+ // first half of the output. Repeat again to get the second half of the
+ // output. Finally we shuffle again to combine the two outputs.
+ // Since avx2 allows us to use 256-bit buffer, we can do this two rows at a
+ // time.
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg, kernel_reg_23,
+ kernel_reg_45; // Segments of the kernel used
+ const __m256i reg_32 = _mm256_set1_epi16(32); // Used for rounding
+ const ptrdiff_t unrolled_src_stride = src_stride << 1;
+ const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
+ int h;
+
+ __m256i src_reg, src_reg_shift_0, src_reg_shift_2;
+ __m256i dst_reg;
+ __m256i tmp_0, tmp_1;
+ __m256i idx_shift_0 =
+ _mm256_setr_epi8(0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 0, 1, 1,
+ 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8);
+ __m256i idx_shift_2 =
+ _mm256_setr_epi8(2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10);
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_128 = _mm_srai_epi16(kernel_reg_128, 1);
+ kernel_reg_128 = _mm_packs_epi16(kernel_reg_128, kernel_reg_128);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg_23 = _mm256_shuffle_epi8(kernel_reg, _mm256_set1_epi16(0x0302u));
+ kernel_reg_45 = _mm256_shuffle_epi8(kernel_reg, _mm256_set1_epi16(0x0504u));
+
+ for (h = height; h >= 2; h -= 2) {
+ // Load the source
+ src_reg = mm256_loadu2_si128(src_ptr, src_ptr + src_stride);
+ src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Get the output
+ tmp_0 = _mm256_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm256_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_reg = _mm256_adds_epi16(tmp_0, tmp_1);
+
+ // Round the result
+ dst_reg = mm256_round_epi16(&dst_reg, &reg_32, 6);
+
+ // Finally combine to get the final dst
+ dst_reg = _mm256_packus_epi16(dst_reg, dst_reg);
+ mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &dst_reg);
+
+ src_ptr += unrolled_src_stride;
+ dst_ptr += unrolled_dst_stride;
+ }
+
+ // Repeat for the last row if needed
+ if (h > 0) {
+ __m128i src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ __m128i dst_reg;
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ __m128i tmp_0, tmp_1;
+
+ __m128i src_reg_shift_0 =
+ _mm_shuffle_epi8(src_reg, _mm256_castsi256_si128(idx_shift_0));
+ __m128i src_reg_shift_2 =
+ _mm_shuffle_epi8(src_reg, _mm256_castsi256_si128(idx_shift_2));
+
+ tmp_0 = _mm_maddubs_epi16(src_reg_shift_0,
+ _mm256_castsi256_si128(kernel_reg_23));
+ tmp_1 = _mm_maddubs_epi16(src_reg_shift_2,
+ _mm256_castsi256_si128(kernel_reg_45));
+ dst_reg = _mm_adds_epi16(tmp_0, tmp_1);
+
+ dst_reg = mm_round_epi16_sse2(&dst_reg, &reg_32, 6);
+
+ dst_reg = _mm_packus_epi16(dst_reg, _mm_setzero_si128());
+
+ _mm_storel_epi64((__m128i *)dst_ptr, dst_reg);
+ }
+}
+
+void vpx_filter_block1d8_v4_avx2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will load two rows of pixels as 8-bit words, rearrange them into the
+ // form
+ // ... s[1,0] s[0,0] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel partial output. Then
+ // we can call add with another row to get the output.
+
+ // Register for source s[-1:3, :]
+ __m256i src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
+ __m256i src_reg_m1001, src_reg_1223;
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg, kernel_reg_23,
+ kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m256i res_reg_m1001, res_reg_1223;
+ __m256i res_reg;
+
+ const __m256i reg_32 = _mm256_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_128 = _mm_srai_epi16(kernel_reg_128, 1);
+ kernel_reg_128 = _mm_packs_epi16(kernel_reg_128, kernel_reg_128);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg_23 = _mm256_shuffle_epi8(kernel_reg, _mm256_set1_epi16(0x0302u));
+ kernel_reg_45 = _mm256_shuffle_epi8(kernel_reg, _mm256_set1_epi16(0x0504u));
+
+ // Row -1 to row 0
+ src_reg_m10 = mm256_loadu2_epi64((const __m128i *)src_ptr,
+ (const __m128i *)(src_ptr + src_stride));
+
+ // Row 0 to row 1
+ src_reg_1 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
+ src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
+
+ // First three rows
+ src_reg_m1001 = _mm256_unpacklo_epi8(src_reg_m10, src_reg_01);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm256_castsi128_si256(
+ _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3)));
+
+ src_reg_12 = _mm256_inserti128_si256(src_reg_1,
+ _mm256_castsi256_si128(src_reg_2), 1);
+
+ src_reg_3 = _mm256_castsi128_si256(
+ _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4)));
+
+ src_reg_23 = _mm256_inserti128_si256(src_reg_2,
+ _mm256_castsi256_si128(src_reg_3), 1);
+
+ // Last three rows
+ src_reg_1223 = _mm256_unpacklo_epi8(src_reg_12, src_reg_23);
+
+ // Output
+ res_reg_m1001 = _mm256_maddubs_epi16(src_reg_m1001, kernel_reg_23);
+ res_reg_1223 = _mm256_maddubs_epi16(src_reg_1223, kernel_reg_45);
+ res_reg = _mm256_adds_epi16(res_reg_m1001, res_reg_1223);
+
+ // Round the words
+ res_reg = mm256_round_epi16(&res_reg, &reg_32, 6);
+
+ // Combine to get the result
+ res_reg = _mm256_packus_epi16(res_reg, res_reg);
+
+ // Save the result
+ mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m1001 = src_reg_1223;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_filter_block1d4_h4_avx2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will cast the kernel from 16-bit words to 8-bit words, and then extract
+ // the middle four elements of the kernel into a single register in the form
+ // k[5:2] k[5:2] k[5:2] k[5:2]
+ // Then we shuffle the source into
+ // s[5:2] s[4:1] s[3:0] s[2:-1]
+ // Calling multiply and add gives us half of the sum next to each other.
+ // Calling horizontal add then gives us the output.
+ // Since avx2 has 256-bit register, we can do 2 rows at a time.
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg;
+ const __m256i reg_32 = _mm256_set1_epi16(32); // Used for rounding
+ int h;
+ const ptrdiff_t unrolled_src_stride = src_stride << 1;
+ const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
+
+ __m256i src_reg, src_reg_shuf;
+ __m256i dst;
+ __m256i shuf_idx =
+ _mm256_setr_epi8(0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, 0, 1, 2,
+ 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6);
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_128 = _mm_srai_epi16(kernel_reg_128, 1);
+ kernel_reg_128 = _mm_packs_epi16(kernel_reg_128, kernel_reg_128);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg = _mm256_shuffle_epi8(kernel_reg, _mm256_set1_epi32(0x05040302u));
+
+ for (h = height; h > 1; h -= 2) {
+ // Load the source
+ src_reg = mm256_loadu2_epi64((const __m128i *)src_ptr,
+ (const __m128i *)(src_ptr + src_stride));
+ src_reg_shuf = _mm256_shuffle_epi8(src_reg, shuf_idx);
+
+ // Get the result
+ dst = _mm256_maddubs_epi16(src_reg_shuf, kernel_reg);
+ dst = _mm256_hadds_epi16(dst, _mm256_setzero_si256());
+
+ // Round result
+ dst = mm256_round_epi16(&dst, &reg_32, 6);
+
+ // Pack to 8-bits
+ dst = _mm256_packus_epi16(dst, _mm256_setzero_si256());
+
+ // Save
+ mm256_storeu2_epi32((__m128i *const)dst_ptr,
+ (__m128i *const)(dst_ptr + dst_stride), &dst);
+
+ src_ptr += unrolled_src_stride;
+ dst_ptr += unrolled_dst_stride;
+ }
+
+ if (h > 0) {
+ // Load the source
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ __m128i src_reg = _mm_loadl_epi64((const __m128i *)src_ptr);
+ __m128i src_reg_shuf =
+ _mm_shuffle_epi8(src_reg, _mm256_castsi256_si128(shuf_idx));
+
+ // Get the result
+ __m128i dst =
+ _mm_maddubs_epi16(src_reg_shuf, _mm256_castsi256_si128(kernel_reg));
+ dst = _mm_hadds_epi16(dst, _mm_setzero_si128());
+
+ // Round result
+ dst = mm_round_epi16_sse2(&dst, &reg_32, 6);
+
+ // Pack to 8-bits
+ dst = _mm_packus_epi16(dst, _mm_setzero_si128());
+ *((uint32_t *)(dst_ptr)) = _mm_cvtsi128_si32(dst);
+ }
+}
+
+void vpx_filter_block1d4_v4_avx2(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will load two rows of pixels as 8-bit words, rearrange them into the
+ // form
+ // ... s[3,0] s[2,0] s[1,0] s[0,0] s[2,0] s[1,0] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel to get partial output.
+ // Calling horizontal add then gives us the completely output
+
+ // Register for source s[-1:3, :]
+ __m256i src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
+ __m256i src_reg_m1001, src_reg_1223, src_reg_m1012_1023;
+
+ __m128i kernel_reg_128; // Kernel
+ __m256i kernel_reg;
+
+ // Result after multiply and add
+ __m256i res_reg;
+
+ const __m256i reg_32 = _mm256_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg_128 = _mm_srai_epi16(kernel_reg_128, 1);
+ kernel_reg_128 = _mm_packs_epi16(kernel_reg_128, kernel_reg_128);
+ kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
+ kernel_reg = _mm256_shuffle_epi8(kernel_reg, _mm256_set1_epi32(0x05040302u));
+
+ // Row -1 to row 0
+ src_reg_m10 = mm256_loadu2_si128((const __m128i *)src_ptr,
+ (const __m128i *)(src_ptr + src_stride));
+
+ // Row 0 to row 1
+ src_reg_1 = _mm256_castsi128_si256(
+ _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
+ src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
+
+ // First three rows
+ src_reg_m1001 = _mm256_unpacklo_epi8(src_reg_m10, src_reg_01);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm256_castsi128_si256(
+ _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3)));
+
+ src_reg_12 = _mm256_inserti128_si256(src_reg_1,
+ _mm256_castsi256_si128(src_reg_2), 1);
+
+ src_reg_3 = _mm256_castsi128_si256(
+ _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4)));
+
+ src_reg_23 = _mm256_inserti128_si256(src_reg_2,
+ _mm256_castsi256_si128(src_reg_3), 1);
+
+ // Last three rows
+ src_reg_1223 = _mm256_unpacklo_epi8(src_reg_12, src_reg_23);
+
+ // Combine all the rows
+ src_reg_m1012_1023 = _mm256_unpacklo_epi16(src_reg_m1001, src_reg_1223);
+
+ // Output
+ res_reg = _mm256_maddubs_epi16(src_reg_m1012_1023, kernel_reg);
+ res_reg = _mm256_hadds_epi16(res_reg, _mm256_setzero_si256());
+
+ // Round the words
+ res_reg = mm256_round_epi16(&res_reg, &reg_32, 6);
+
+ // Combine to get the result
+ res_reg = _mm256_packus_epi16(res_reg, res_reg);
+
+ // Save the result
+ mm256_storeu2_epi32((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
+ &res_reg);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m1001 = src_reg_1223;
+ src_reg_1 = src_reg_3;
+ }
+}
+
#if HAVE_AVX2 && HAVE_SSSE3
filter8_1dfunction vpx_filter_block1d4_v8_ssse3;
#if ARCH_X86_64
@@ -376,6 +948,13 @@ filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3;
#define vpx_filter_block1d8_h2_avg_avx2 vpx_filter_block1d8_h2_avg_ssse3
#define vpx_filter_block1d4_v2_avg_avx2 vpx_filter_block1d4_v2_avg_ssse3
#define vpx_filter_block1d4_h2_avg_avx2 vpx_filter_block1d4_h2_avg_ssse3
+
+#define vpx_filter_block1d16_v4_avg_avx2 vpx_filter_block1d16_v8_avg_avx2
+#define vpx_filter_block1d16_h4_avg_avx2 vpx_filter_block1d16_h8_avg_avx2
+#define vpx_filter_block1d8_v4_avg_avx2 vpx_filter_block1d8_v8_avg_avx2
+#define vpx_filter_block1d8_h4_avg_avx2 vpx_filter_block1d8_h8_avg_avx2
+#define vpx_filter_block1d4_v4_avg_avx2 vpx_filter_block1d4_v8_avg_avx2
+#define vpx_filter_block1d4_h4_avg_avx2 vpx_filter_block1d4_h8_avg_avx2
// void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
// uint8_t *dst, ptrdiff_t dst_stride,
// const InterpKernel *filter, int x0_q4,
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
index e4f992780ff..b5f6ca57d95 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c
@@ -16,6 +16,7 @@
#include "vpx_dsp/vpx_filter.h"
#include "vpx_dsp/x86/convolve.h"
#include "vpx_dsp/x86/convolve_ssse3.h"
+#include "vpx_dsp/x86/convolve_sse2.h"
#include "vpx_dsp/x86/mem_sse2.h"
#include "vpx_dsp/x86/transpose_sse2.h"
#include "vpx_mem/vpx_mem.h"
@@ -185,6 +186,488 @@ void vpx_filter_block1d8_v8_intrin_ssse3(
}
}
+void vpx_filter_block1d16_h4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will cast the kernel from 16-bit words to 8-bit words, and then extract
+ // the middle four elements of the kernel into two registers in the form
+ // ... k[3] k[2] k[3] k[2]
+ // ... k[5] k[4] k[5] k[4]
+ // Then we shuffle the source into
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Calling multiply and add gives us half of the sum. Calling add gives us
+ // first half of the output. Repeat again to get the second half of the
+ // output. Finally we shuffle again to combine the two outputs.
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ int h;
+
+ __m128i src_reg, src_reg_shift_0, src_reg_shift_2;
+ __m128i dst_first, dst_second;
+ __m128i tmp_0, tmp_1;
+ __m128i idx_shift_0 =
+ _mm_setr_epi8(0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8);
+ __m128i idx_shift_2 =
+ _mm_setr_epi8(2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10);
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg_23 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0302u));
+ kernel_reg_45 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0504u));
+
+ for (h = height; h > 0; --h) {
+ // Load the source
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shift_0 = _mm_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Partial result for first half
+ tmp_0 = _mm_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_first = _mm_adds_epi16(tmp_0, tmp_1);
+
+ // Do again to get the second half of dst
+ // Load the source
+ src_reg = _mm_loadu_si128((const __m128i *)(src_ptr + 8));
+ src_reg_shift_0 = _mm_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Partial result for first half
+ tmp_0 = _mm_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_second = _mm_adds_epi16(tmp_0, tmp_1);
+
+ // Round each result
+ dst_first = mm_round_epi16_sse2(&dst_first, &reg_32, 6);
+ dst_second = mm_round_epi16_sse2(&dst_second, &reg_32, 6);
+
+ // Finally combine to get the final dst
+ dst_first = _mm_packus_epi16(dst_first, dst_second);
+ _mm_store_si128((__m128i *)dst_ptr, dst_first);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_filter_block1d16_v4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will load two rows of pixels as 8-bit words, rearrange them into the
+ // form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // ... s[0,9] s[-1,9] s[0,8] s[-1,8]
+ // so that we can call multiply and add with the kernel to get 16-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10_lo, src_reg_m10_hi, src_reg_01_lo, src_reg_01_hi;
+ __m128i src_reg_12_lo, src_reg_12_hi, src_reg_23_lo, src_reg_23_hi;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m128i res_reg_m10_lo, res_reg_01_lo, res_reg_12_lo, res_reg_23_lo;
+ __m128i res_reg_m10_hi, res_reg_01_hi, res_reg_12_hi, res_reg_23_hi;
+ __m128i res_reg_m1012, res_reg_0123;
+ __m128i res_reg_m1012_lo, res_reg_0123_lo, res_reg_m1012_hi, res_reg_0123_hi;
+
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg_23 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0302u));
+ kernel_reg_45 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0504u));
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10_lo = _mm_unpacklo_epi8(src_reg_m1, src_reg_0);
+ src_reg_m10_hi = _mm_unpackhi_epi8(src_reg_m1, src_reg_0);
+
+ // More shuffling
+ src_reg_1 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01_lo = _mm_unpacklo_epi8(src_reg_0, src_reg_1);
+ src_reg_01_hi = _mm_unpackhi_epi8(src_reg_0, src_reg_1);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12_lo = _mm_unpacklo_epi8(src_reg_1, src_reg_2);
+ src_reg_12_hi = _mm_unpackhi_epi8(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23_lo = _mm_unpacklo_epi8(src_reg_2, src_reg_3);
+ src_reg_23_hi = _mm_unpackhi_epi8(src_reg_2, src_reg_3);
+
+ // Partial output from first half
+ res_reg_m10_lo = _mm_maddubs_epi16(src_reg_m10_lo, kernel_reg_23);
+ res_reg_01_lo = _mm_maddubs_epi16(src_reg_01_lo, kernel_reg_23);
+
+ res_reg_12_lo = _mm_maddubs_epi16(src_reg_12_lo, kernel_reg_45);
+ res_reg_23_lo = _mm_maddubs_epi16(src_reg_23_lo, kernel_reg_45);
+
+ // Add to get first half of the results
+ res_reg_m1012_lo = _mm_adds_epi16(res_reg_m10_lo, res_reg_12_lo);
+ res_reg_0123_lo = _mm_adds_epi16(res_reg_01_lo, res_reg_23_lo);
+
+ // Partial output for second half
+ res_reg_m10_hi = _mm_maddubs_epi16(src_reg_m10_hi, kernel_reg_23);
+ res_reg_01_hi = _mm_maddubs_epi16(src_reg_01_hi, kernel_reg_23);
+
+ res_reg_12_hi = _mm_maddubs_epi16(src_reg_12_hi, kernel_reg_45);
+ res_reg_23_hi = _mm_maddubs_epi16(src_reg_23_hi, kernel_reg_45);
+
+ // Second half of the results
+ res_reg_m1012_hi = _mm_adds_epi16(res_reg_m10_hi, res_reg_12_hi);
+ res_reg_0123_hi = _mm_adds_epi16(res_reg_01_hi, res_reg_23_hi);
+
+ // Round the words
+ res_reg_m1012_lo = mm_round_epi16_sse2(&res_reg_m1012_lo, &reg_32, 6);
+ res_reg_0123_lo = mm_round_epi16_sse2(&res_reg_0123_lo, &reg_32, 6);
+ res_reg_m1012_hi = mm_round_epi16_sse2(&res_reg_m1012_hi, &reg_32, 6);
+ res_reg_0123_hi = mm_round_epi16_sse2(&res_reg_0123_hi, &reg_32, 6);
+
+ // Combine to get the result
+ res_reg_m1012 = _mm_packus_epi16(res_reg_m1012_lo, res_reg_m1012_hi);
+ res_reg_0123 = _mm_packus_epi16(res_reg_0123_lo, res_reg_0123_hi);
+
+ _mm_store_si128((__m128i *)dst_ptr, res_reg_m1012);
+ _mm_store_si128((__m128i *)(dst_ptr + dst_stride), res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10_lo = src_reg_12_lo;
+ src_reg_m10_hi = src_reg_12_hi;
+ src_reg_01_lo = src_reg_23_lo;
+ src_reg_01_hi = src_reg_23_hi;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_filter_block1d8_h4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will cast the kernel from 16-bit words to 8-bit words, and then extract
+ // the middle four elements of the kernel into two registers in the form
+ // ... k[3] k[2] k[3] k[2]
+ // ... k[5] k[4] k[5] k[4]
+ // Then we shuffle the source into
+ // ... s[1] s[0] s[0] s[-1]
+ // ... s[3] s[2] s[2] s[1]
+ // Calling multiply and add gives us half of the sum. Calling add gives us
+ // first half of the output. Repeat again to get the second half of the
+ // output. Finally we shuffle again to combine the two outputs.
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ int h;
+
+ __m128i src_reg, src_reg_shift_0, src_reg_shift_2;
+ __m128i dst_first;
+ __m128i tmp_0, tmp_1;
+ __m128i idx_shift_0 =
+ _mm_setr_epi8(0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8);
+ __m128i idx_shift_2 =
+ _mm_setr_epi8(2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10);
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg_23 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0302u));
+ kernel_reg_45 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0504u));
+
+ for (h = height; h > 0; --h) {
+ // Load the source
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shift_0 = _mm_shuffle_epi8(src_reg, idx_shift_0);
+ src_reg_shift_2 = _mm_shuffle_epi8(src_reg, idx_shift_2);
+
+ // Get the result
+ tmp_0 = _mm_maddubs_epi16(src_reg_shift_0, kernel_reg_23);
+ tmp_1 = _mm_maddubs_epi16(src_reg_shift_2, kernel_reg_45);
+ dst_first = _mm_adds_epi16(tmp_0, tmp_1);
+
+ // Round round result
+ dst_first = mm_round_epi16_sse2(&dst_first, &reg_32, 6);
+
+ // Pack to 8-bits
+ dst_first = _mm_packus_epi16(dst_first, _mm_setzero_si128());
+ _mm_storel_epi64((__m128i *)dst_ptr, dst_first);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_filter_block1d8_v4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will load two rows of pixels as 8-bit words, rearrange them into the
+ // form
+ // ... s[0,1] s[-1,1] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel to get 16-bit words of
+ // the form
+ // ... s[0,1]k[3]+s[-1,1]k[2] s[0,0]k[3]+s[-1,0]k[2]
+ // Finally, we can add multiple rows together to get the desired output.
+
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source. lo is first half, hi second
+ __m128i src_reg_m10, src_reg_01;
+ __m128i src_reg_12, src_reg_23;
+
+ __m128i kernel_reg; // Kernel
+ __m128i kernel_reg_23, kernel_reg_45; // Segments of the kernel used
+
+ // Result after multiply and add
+ __m128i res_reg_m10, res_reg_01, res_reg_12, res_reg_23;
+ __m128i res_reg_m1012, res_reg_0123;
+
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg_23 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0302u));
+ kernel_reg_45 = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi16(0x0504u));
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadl_epi64((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10 = _mm_unpacklo_epi8(src_reg_m1, src_reg_0);
+
+ // More shuffling
+ src_reg_1 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01 = _mm_unpacklo_epi8(src_reg_0, src_reg_1);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3));
+
+ src_reg_12 = _mm_unpacklo_epi8(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4));
+
+ src_reg_23 = _mm_unpacklo_epi8(src_reg_2, src_reg_3);
+
+ // Partial output
+ res_reg_m10 = _mm_maddubs_epi16(src_reg_m10, kernel_reg_23);
+ res_reg_01 = _mm_maddubs_epi16(src_reg_01, kernel_reg_23);
+
+ res_reg_12 = _mm_maddubs_epi16(src_reg_12, kernel_reg_45);
+ res_reg_23 = _mm_maddubs_epi16(src_reg_23, kernel_reg_45);
+
+ // Add to get entire output
+ res_reg_m1012 = _mm_adds_epi16(res_reg_m10, res_reg_12);
+ res_reg_0123 = _mm_adds_epi16(res_reg_01, res_reg_23);
+
+ // Round the words
+ res_reg_m1012 = mm_round_epi16_sse2(&res_reg_m1012, &reg_32, 6);
+ res_reg_0123 = mm_round_epi16_sse2(&res_reg_0123, &reg_32, 6);
+
+ // Pack from 16-bit to 8-bit
+ res_reg_m1012 = _mm_packus_epi16(res_reg_m1012, _mm_setzero_si128());
+ res_reg_0123 = _mm_packus_epi16(res_reg_0123, _mm_setzero_si128());
+
+ _mm_storel_epi64((__m128i *)dst_ptr, res_reg_m1012);
+ _mm_storel_epi64((__m128i *)(dst_ptr + dst_stride), res_reg_0123);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m10 = src_reg_12;
+ src_reg_01 = src_reg_23;
+ src_reg_1 = src_reg_3;
+ }
+}
+
+void vpx_filter_block1d4_h4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will cast the kernel from 16-bit words to 8-bit words, and then extract
+ // the middle four elements of the kernel into a single register in the form
+ // k[5:2] k[5:2] k[5:2] k[5:2]
+ // Then we shuffle the source into
+ // s[5:2] s[4:1] s[3:0] s[2:-1]
+ // Calling multiply and add gives us half of the sum next to each other.
+ // Calling horizontal add then gives us the output.
+
+ __m128i kernel_reg; // Kernel
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+ int h;
+
+ __m128i src_reg, src_reg_shuf;
+ __m128i dst_first;
+ __m128i shuf_idx =
+ _mm_setr_epi8(0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6);
+
+ // Start one pixel before as we need tap/2 - 1 = 1 sample from the past
+ src_ptr -= 1;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi32(0x05040302u));
+
+ for (h = height; h > 0; --h) {
+ // Load the source
+ src_reg = _mm_loadu_si128((const __m128i *)src_ptr);
+ src_reg_shuf = _mm_shuffle_epi8(src_reg, shuf_idx);
+
+ // Get the result
+ dst_first = _mm_maddubs_epi16(src_reg_shuf, kernel_reg);
+ dst_first = _mm_hadds_epi16(dst_first, _mm_setzero_si128());
+
+ // Round result
+ dst_first = mm_round_epi16_sse2(&dst_first, &reg_32, 6);
+
+ // Pack to 8-bits
+ dst_first = _mm_packus_epi16(dst_first, _mm_setzero_si128());
+ *((uint32_t *)(dst_ptr)) = _mm_cvtsi128_si32(dst_first);
+
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+void vpx_filter_block1d4_v4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_stride,
+ uint8_t *dst_ptr, ptrdiff_t dst_stride,
+ uint32_t height, const int16_t *kernel) {
+ // We will load two rows of pixels as 8-bit words, rearrange them into the
+ // form
+ // ... s[2,0] s[1,0] s[0,0] s[-1,0]
+ // so that we can call multiply and add with the kernel partial output. Then
+ // we can call horizontal add to get the output.
+ // Finally, we can add multiple rows together to get the desired output.
+ // This is done two rows at a time
+
+ // Register for source s[-1:3, :]
+ __m128i src_reg_m1, src_reg_0, src_reg_1, src_reg_2, src_reg_3;
+ // Interleaved rows of the source.
+ __m128i src_reg_m10, src_reg_01;
+ __m128i src_reg_12, src_reg_23;
+ __m128i src_reg_m1001, src_reg_1223;
+ __m128i src_reg_m1012_1023_lo, src_reg_m1012_1023_hi;
+
+ __m128i kernel_reg; // Kernel
+
+ // Result after multiply and add
+ __m128i reg_0, reg_1;
+
+ const __m128i reg_32 = _mm_set1_epi16(32); // Used for rounding
+
+ // We will compute the result two rows at a time
+ const ptrdiff_t src_stride_unrolled = src_stride << 1;
+ const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
+ int h;
+
+ // We only need to go num_taps/2 - 1 row above the souce, so we move
+ // 3 - (num_taps/2 - 1) = 4 - num_taps/2 = 2 back down
+ src_ptr += src_stride_unrolled;
+
+ // Load Kernel
+ kernel_reg = _mm_loadu_si128((const __m128i *)kernel);
+ kernel_reg = _mm_srai_epi16(kernel_reg, 1);
+ kernel_reg = _mm_packs_epi16(kernel_reg, kernel_reg);
+ kernel_reg = _mm_shuffle_epi8(kernel_reg, _mm_set1_epi32(0x05040302u));
+
+ // First shuffle the data
+ src_reg_m1 = _mm_loadl_epi64((const __m128i *)src_ptr);
+ src_reg_0 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride));
+ src_reg_m10 = _mm_unpacklo_epi32(src_reg_m1, src_reg_0);
+
+ // More shuffling
+ src_reg_1 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 2));
+ src_reg_01 = _mm_unpacklo_epi32(src_reg_0, src_reg_1);
+
+ // Put three rows next to each other
+ src_reg_m1001 = _mm_unpacklo_epi8(src_reg_m10, src_reg_01);
+
+ for (h = height; h > 1; h -= 2) {
+ src_reg_2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3));
+ src_reg_12 = _mm_unpacklo_epi32(src_reg_1, src_reg_2);
+
+ src_reg_3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4));
+ src_reg_23 = _mm_unpacklo_epi32(src_reg_2, src_reg_3);
+
+ // Put three rows next to each other
+ src_reg_1223 = _mm_unpacklo_epi8(src_reg_12, src_reg_23);
+
+ // Put all four rows next to each other
+ src_reg_m1012_1023_lo = _mm_unpacklo_epi16(src_reg_m1001, src_reg_1223);
+ src_reg_m1012_1023_hi = _mm_unpackhi_epi16(src_reg_m1001, src_reg_1223);
+
+ // Get the results
+ reg_0 = _mm_maddubs_epi16(src_reg_m1012_1023_lo, kernel_reg);
+ reg_1 = _mm_maddubs_epi16(src_reg_m1012_1023_hi, kernel_reg);
+ reg_0 = _mm_hadds_epi16(reg_0, _mm_setzero_si128());
+ reg_1 = _mm_hadds_epi16(reg_1, _mm_setzero_si128());
+
+ // Round the words
+ reg_0 = mm_round_epi16_sse2(&reg_0, &reg_32, 6);
+ reg_1 = mm_round_epi16_sse2(&reg_1, &reg_32, 6);
+
+ // Pack from 16-bit to 8-bit and put them in the right order
+ reg_0 = _mm_packus_epi16(reg_0, reg_0);
+ reg_1 = _mm_packus_epi16(reg_1, reg_1);
+
+ // Save the result
+ *((uint32_t *)(dst_ptr)) = _mm_cvtsi128_si32(reg_0);
+ *((uint32_t *)(dst_ptr + dst_stride)) = _mm_cvtsi128_si32(reg_1);
+
+ // Update the source by two rows
+ src_ptr += src_stride_unrolled;
+ dst_ptr += dst_stride_unrolled;
+
+ src_reg_m1001 = src_reg_1223;
+ src_reg_1 = src_reg_3;
+ }
+}
+
filter8_1dfunction vpx_filter_block1d16_v8_ssse3;
filter8_1dfunction vpx_filter_block1d16_h8_ssse3;
filter8_1dfunction vpx_filter_block1d8_v8_ssse3;
@@ -198,6 +681,13 @@ filter8_1dfunction vpx_filter_block1d8_h8_avg_ssse3;
filter8_1dfunction vpx_filter_block1d4_v8_avg_ssse3;
filter8_1dfunction vpx_filter_block1d4_h8_avg_ssse3;
+#define vpx_filter_block1d16_v4_avg_ssse3 vpx_filter_block1d16_v8_avg_ssse3
+#define vpx_filter_block1d16_h4_avg_ssse3 vpx_filter_block1d16_h8_avg_ssse3
+#define vpx_filter_block1d8_v4_avg_ssse3 vpx_filter_block1d8_v8_avg_ssse3
+#define vpx_filter_block1d8_h4_avg_ssse3 vpx_filter_block1d8_h8_avg_ssse3
+#define vpx_filter_block1d4_v4_avg_ssse3 vpx_filter_block1d4_v8_avg_ssse3
+#define vpx_filter_block1d4_h4_avg_ssse3 vpx_filter_block1d4_h8_avg_ssse3
+
filter8_1dfunction vpx_filter_block1d16_v2_ssse3;
filter8_1dfunction vpx_filter_block1d16_h2_ssse3;
filter8_1dfunction vpx_filter_block1d8_v2_ssse3;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_scale/generic/yv12config.c b/chromium/third_party/libvpx/source/libvpx/vpx_scale/generic/yv12config.c
index db1db37ce6d..7d4aa2a9198 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_scale/generic/yv12config.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_scale/generic/yv12config.c
@@ -57,10 +57,18 @@ int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width,
* uv_stride == y_stride/2, so enforce this here. */
int uv_stride = y_stride >> 1;
int uvplane_size = (uv_height + border) * uv_stride;
- const int frame_size = yplane_size + 2 * uvplane_size;
+ const size_t frame_size = yplane_size + 2 * uvplane_size;
if (!ybf->buffer_alloc) {
ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, frame_size);
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+ // This memset is needed for fixing the issue of using uninitialized
+ // value in msan test. It will cause a perf loss, so only do this for
+ // msan test.
+ memset(ybf->buffer_alloc, 0, frame_size);
+#endif
+#endif
ybf->buffer_alloc_sz = frame_size;
}
@@ -146,6 +154,13 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT) return -1;
#endif
+ /* Only support allocating buffers that have a border that's a multiple
+ * of 32. The border restriction is required to get 16-byte alignment of
+ * the start of the chroma rows without introducing an arbitrary gap
+ * between planes, which would break the semantics of things like
+ * vpx_img_set_rect(). */
+ if (border & 0x1f) return -3;
+
if (ybf) {
const int vp9_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
const int aligned_width = (width + 7) & ~7;
@@ -170,9 +185,9 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
uint8_t *buf = NULL;
- // frame_size is stored in buffer_alloc_sz, which is an int. If it won't
+ // frame_size is stored in buffer_alloc_sz, which is a size_t. If it won't
// fit, fail early.
- if (frame_size > INT_MAX) {
+ if (frame_size > SIZE_MAX) {
return -1;
}
@@ -196,10 +211,10 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
// This memset is needed for fixing the issue of using uninitialized
// value in msan test. It will cause a perf loss, so only do this for
// msan test.
- memset(ybf->buffer_alloc, 0, (int)frame_size);
+ memset(ybf->buffer_alloc, 0, (size_t)frame_size);
#endif
#endif
- } else if (frame_size > (size_t)ybf->buffer_alloc_sz) {
+ } else if (frame_size > ybf->buffer_alloc_sz) {
// Allocation to hold larger frame, or first allocation.
vpx_free(ybf->buffer_alloc);
ybf->buffer_alloc = NULL;
@@ -207,7 +222,7 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, (size_t)frame_size);
if (!ybf->buffer_alloc) return -1;
- ybf->buffer_alloc_sz = (int)frame_size;
+ ybf->buffer_alloc_sz = (size_t)frame_size;
// This memset is needed for fixing valgrind error from C loop filter
// due to access uninitialized memory in frame border. It could be
@@ -215,13 +230,6 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
memset(ybf->buffer_alloc, 0, ybf->buffer_alloc_sz);
}
- /* Only support allocating buffers that have a border that's a multiple
- * of 32. The border restriction is required to get 16-byte alignment of
- * the start of the chroma rows without introducing an arbitrary gap
- * between planes, which would break the semantics of things like
- * vpx_img_set_rect(). */
- if (border & 0x1f) return -3;
-
ybf->y_crop_width = width;
ybf->y_crop_height = height;
ybf->y_width = aligned_width;
@@ -235,7 +243,7 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
ybf->uv_stride = uv_stride;
ybf->border = border;
- ybf->frame_size = (int)frame_size;
+ ybf->frame_size = (size_t)frame_size;
ybf->subsampling_x = ss_x;
ybf->subsampling_y = ss_y;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_scale/yv12config.h b/chromium/third_party/libvpx/source/libvpx/vpx_scale/yv12config.h
index 53728af42c5..2cf18217f60 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_scale/yv12config.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_scale/yv12config.h
@@ -49,9 +49,9 @@ typedef struct yv12_buffer_config {
uint8_t *alpha_buffer;
uint8_t *buffer_alloc;
- int buffer_alloc_sz;
+ size_t buffer_alloc_sz;
int border;
- int frame_size;
+ size_t frame_size;
int subsampling_x;
int subsampling_y;
unsigned int bit_depth;
diff --git a/chromium/third_party/libvpx/source/libvpx/vpx_util/vpx_thread.h b/chromium/third_party/libvpx/source/libvpx/vpx_util/vpx_thread.h
index 43a9780071f..4c20f378b07 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpx_util/vpx_thread.h
+++ b/chromium/third_party/libvpx/source/libvpx/vpx_util/vpx_thread.h
@@ -159,6 +159,23 @@ static INLINE int pthread_cond_init(pthread_cond_t *const condition,
return 0;
}
+static INLINE int pthread_cond_broadcast(pthread_cond_t *const condition) {
+ int ok = 1;
+#ifdef USE_WINDOWS_CONDITION_VARIABLE
+ WakeAllConditionVariable(condition);
+#else
+ while (WaitForSingleObject(condition->waiting_sem_, 0) == WAIT_OBJECT_0) {
+ // a thread is waiting in pthread_cond_wait: allow it to be notified
+ ok &= SetEvent(condition->signal_event_);
+ // wait until the event is consumed so the signaler cannot consume
+ // the event via its own pthread_cond_wait.
+ ok &= (WaitForSingleObject(condition->received_sem_, INFINITE) !=
+ WAIT_OBJECT_0);
+ }
+#endif
+ return !ok;
+}
+
static INLINE int pthread_cond_signal(pthread_cond_t *const condition) {
int ok = 1;
#ifdef USE_WINDOWS_CONDITION_VARIABLE
@@ -202,6 +219,11 @@ static INLINE int pthread_cond_wait(pthread_cond_t *const condition,
#include <stdlib.h> // NOLINT
#include <sys/builtin.h> // NOLINT
+#if defined(__STRICT_ANSI__)
+// _beginthread() is not declared on __STRICT_ANSI__ mode. Declare here.
+int _beginthread(void (*)(void *), void *, unsigned, void *);
+#endif
+
#define pthread_t TID
#define pthread_mutex_t HMTX
diff --git a/chromium/third_party/libvpx/source/libvpx/vpxdec.c b/chromium/third_party/libvpx/source/libvpx/vpxdec.c
index 522eda12620..eaa28bd84ae 100644
--- a/chromium/third_party/libvpx/source/libvpx/vpxdec.c
+++ b/chromium/third_party/libvpx/source/libvpx/vpxdec.c
@@ -100,19 +100,39 @@ static const arg_def_t framestatsarg =
ARG_DEF(NULL, "framestats", 1, "Output per-frame stats (.csv format)");
static const arg_def_t rowmtarg =
ARG_DEF(NULL, "row-mt", 1, "Enable multi-threading to run row-wise in VP9");
-
-static const arg_def_t *all_args[] = {
- &help, &codecarg, &use_yv12, &use_i420,
- &flipuvarg, &rawvideo, &noblitarg, &progressarg,
- &limitarg, &skiparg, &postprocarg, &summaryarg,
- &outputfile, &threadsarg, &frameparallelarg, &verbosearg,
- &scalearg, &fb_arg, &md5arg, &error_concealment,
- &continuearg,
+static const arg_def_t lpfoptarg =
+ ARG_DEF(NULL, "lpf-opt", 1,
+ "Do loopfilter without waiting for all threads to sync.");
+
+static const arg_def_t *all_args[] = { &help,
+ &codecarg,
+ &use_yv12,
+ &use_i420,
+ &flipuvarg,
+ &rawvideo,
+ &noblitarg,
+ &progressarg,
+ &limitarg,
+ &skiparg,
+ &postprocarg,
+ &summaryarg,
+ &outputfile,
+ &threadsarg,
+ &frameparallelarg,
+ &verbosearg,
+ &scalearg,
+ &fb_arg,
+ &md5arg,
+ &error_concealment,
+ &continuearg,
#if CONFIG_VP9_HIGHBITDEPTH
- &outbitdeptharg,
+ &outbitdeptharg,
#endif
- &svcdecodingarg, &framestatsarg, &rowmtarg, NULL
-};
+ &svcdecodingarg,
+ &framestatsarg,
+ &rowmtarg,
+ &lpfoptarg,
+ NULL };
#if CONFIG_VP8_DECODER
static const arg_def_t addnoise_level =
@@ -509,6 +529,7 @@ static int main_loop(int argc, const char **argv_) {
int ec_enabled = 0;
int keep_going = 0;
int enable_row_mt = 0;
+ int enable_lpf_opt = 0;
const VpxInterface *interface = NULL;
const VpxInterface *fourcc_interface = NULL;
uint64_t dx_time = 0;
@@ -633,6 +654,8 @@ static int main_loop(int argc, const char **argv_) {
}
} else if (arg_match(&arg, &rowmtarg, argi)) {
enable_row_mt = arg_parse_uint(&arg);
+ } else if (arg_match(&arg, &lpfoptarg, argi)) {
+ enable_lpf_opt = arg_parse_uint(&arg);
}
#if CONFIG_VP8_DECODER
else if (arg_match(&arg, &addnoise_level, argi)) {
@@ -764,6 +787,12 @@ static int main_loop(int argc, const char **argv_) {
vpx_codec_error(&decoder));
goto fail;
}
+ if (interface->fourcc == VP9_FOURCC &&
+ vpx_codec_control(&decoder, VP9D_SET_LOOP_FILTER_OPT, enable_lpf_opt)) {
+ fprintf(stderr, "Failed to set decoder in optimized loopfilter mode: %s\n",
+ vpx_codec_error(&decoder));
+ goto fail;
+ }
if (!quiet) fprintf(stderr, "%s\n", decoder.name);
#if CONFIG_VP8_DECODER