summaryrefslogtreecommitdiff
path: root/chromium/third_party/libyuv
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-05-09 14:22:11 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-05-09 15:11:45 +0000
commit2ddb2d3e14eef3de7dbd0cef553d669b9ac2361c (patch)
treee75f511546c5fd1a173e87c1f9fb11d7ac8d1af3 /chromium/third_party/libyuv
parenta4f3d46271c57e8155ba912df46a05559d14726e (diff)
downloadqtwebengine-chromium-2ddb2d3e14eef3de7dbd0cef553d669b9ac2361c.tar.gz
BASELINE: Update Chromium to 51.0.2704.41
Also adds in all smaller components by reversing logic for exclusion. Change-Id: Ibf90b506e7da088ea2f65dcf23f2b0992c504422 Reviewed-by: Joerg Bornemann <joerg.bornemann@theqtcompany.com>
Diffstat (limited to 'chromium/third_party/libyuv')
-rw-r--r--chromium/third_party/libyuv/.gn1
-rw-r--r--chromium/third_party/libyuv/CMakeLists.txt6
-rw-r--r--chromium/third_party/libyuv/DEPS2
-rw-r--r--chromium/third_party/libyuv/OWNERS13
-rwxr-xr-xchromium/third_party/libyuv/PRESUBMIT.py4
-rw-r--r--chromium/third_party/libyuv/README.chromium2
-rw-r--r--chromium/third_party/libyuv/README.md18
-rw-r--r--chromium/third_party/libyuv/docs/environment_variables.md32
-rw-r--r--chromium/third_party/libyuv/docs/filtering.md196
-rw-r--r--chromium/third_party/libyuv/docs/formats.md133
-rw-r--r--chromium/third_party/libyuv/docs/getting_started.md424
-rw-r--r--chromium/third_party/libyuv/docs/rotation.md103
-rw-r--r--chromium/third_party/libyuv/drover.properties6
-rw-r--r--chromium/third_party/libyuv/include/libyuv/compare_row.h3
-rw-r--r--chromium/third_party/libyuv/include/libyuv/cpu_id.h2
-rw-r--r--chromium/third_party/libyuv/include/libyuv/planar_functions.h6
-rw-r--r--chromium/third_party/libyuv/include/libyuv/rotate_row.h26
-rw-r--r--chromium/third_party/libyuv/include/libyuv/row.h127
-rw-r--r--chromium/third_party/libyuv/include/libyuv/scale_row.h50
-rw-r--r--chromium/third_party/libyuv/include/libyuv/version.h2
-rw-r--r--chromium/third_party/libyuv/libyuv.gyp15
-rw-r--r--chromium/third_party/libyuv/libyuv_test.gyp41
-rwxr-xr-xchromium/third_party/libyuv/setup_links.py6
-rw-r--r--chromium/third_party/libyuv/source/convert.cc8
-rw-r--r--chromium/third_party/libyuv/source/convert_argb.cc18
-rw-r--r--chromium/third_party/libyuv/source/convert_from.cc14
-rw-r--r--chromium/third_party/libyuv/source/convert_from_argb.cc175
-rw-r--r--chromium/third_party/libyuv/source/cpu_id.cc20
-rw-r--r--chromium/third_party/libyuv/source/planar_functions.cc53
-rw-r--r--chromium/third_party/libyuv/source/rotate.cc36
-rw-r--r--chromium/third_party/libyuv/source/rotate_any.cc8
-rw-r--r--chromium/third_party/libyuv/source/rotate_mips.cc6
-rw-r--r--chromium/third_party/libyuv/source/row_any.cc15
-rw-r--r--chromium/third_party/libyuv/source/row_common.cc60
-rw-r--r--chromium/third_party/libyuv/source/row_gcc.cc177
-rw-r--r--chromium/third_party/libyuv/source/row_mips.cc12
-rw-r--r--chromium/third_party/libyuv/source/row_neon.cc84
-rw-r--r--chromium/third_party/libyuv/source/row_neon64.cc48
-rw-r--r--chromium/third_party/libyuv/source/row_win.cc131
-rw-r--r--chromium/third_party/libyuv/source/scale.cc113
-rw-r--r--chromium/third_party/libyuv/source/scale_argb.cc27
-rw-r--r--chromium/third_party/libyuv/source/scale_common.cc16
-rw-r--r--chromium/third_party/libyuv/source/scale_mips.cc42
-rw-r--r--chromium/third_party/libyuv/source/scale_win.cc2
-rw-r--r--chromium/third_party/libyuv/third_party/gflags/BUILD.gn14
-rw-r--r--chromium/third_party/libyuv/third_party/gflags/gflags.gyp3
-rw-r--r--chromium/third_party/libyuv/tools/OWNERS1
-rw-r--r--chromium/third_party/libyuv/tools/msan/OWNERS3
-rw-r--r--chromium/third_party/libyuv/tools/msan/blacklist.txt9
-rw-r--r--chromium/third_party/libyuv/tools/ubsan/OWNERS4
-rw-r--r--chromium/third_party/libyuv/tools/ubsan/blacklist.txt15
-rw-r--r--chromium/third_party/libyuv/tools/ubsan/vptr_blacklist.txt21
-rw-r--r--chromium/third_party/libyuv/unit_test/color_test.cc14
-rw-r--r--chromium/third_party/libyuv/unit_test/convert_test.cc130
-rw-r--r--chromium/third_party/libyuv/unit_test/cpu_test.cc4
-rw-r--r--chromium/third_party/libyuv/unit_test/planar_test.cc109
-rw-r--r--chromium/third_party/libyuv/unit_test/rotate_argb_test.cc52
-rw-r--r--chromium/third_party/libyuv/unit_test/rotate_test.cc43
-rw-r--r--chromium/third_party/libyuv/unit_test/unit_test.cc12
-rw-r--r--chromium/third_party/libyuv/unit_test/unit_test.h6
-rw-r--r--chromium/third_party/libyuv/unit_test/version_test.cc44
-rw-r--r--chromium/third_party/libyuv/util/cpuid.c4
62 files changed, 1742 insertions, 1029 deletions
diff --git a/chromium/third_party/libyuv/.gn b/chromium/third_party/libyuv/.gn
index bc19d4b6c66..b1386c9a7ef 100644
--- a/chromium/third_party/libyuv/.gn
+++ b/chromium/third_party/libyuv/.gn
@@ -35,6 +35,7 @@ exec_script_whitelist = [
"//build/config/linux/pkg_config.gni",
"//build/config/mac/mac_sdk.gni",
"//build/config/posix/BUILD.gn",
+ "//build/config/sysroot.gni",
"//build/config/win/visual_studio_version.gni",
"//build/gn_helpers.py",
"//build/gypi_to_gn.py",
diff --git a/chromium/third_party/libyuv/CMakeLists.txt b/chromium/third_party/libyuv/CMakeLists.txt
index fb4747527bc..f74c05f6b7a 100644
--- a/chromium/third_party/libyuv/CMakeLists.txt
+++ b/chromium/third_party/libyuv/CMakeLists.txt
@@ -70,7 +70,6 @@ set(ly_unittest_sources
${ly_base_dir}/unit_test/scale_test.cc
${ly_base_dir}/unit_test/unit_test.cc
${ly_base_dir}/unit_test/video_common_test.cc
- ${ly_base_dir}/unit_test/version_test.cc
)
set(ly_header_files
@@ -111,12 +110,13 @@ endif()
if(TEST)
find_library(GTEST_LIBRARY gtest)
if(GTEST_LIBRARY STREQUAL "GTEST_LIBRARY-NOTFOUND")
- set(GTEST_SRC_DIR /usr/src/gtest)
+ set(GTEST_SRC_DIR /usr/src/gtest CACHE STRING "Location of gtest sources")
if(EXISTS ${GTEST_SRC_DIR}/src/gtest-all.cc)
message(STATUS "building gtest from sources in ${GTEST_SRC_DIR}")
set(gtest_sources ${GTEST_SRC_DIR}/src/gtest-all.cc)
add_library(gtest STATIC ${gtest_sources})
include_directories(${GTEST_SRC_DIR})
+ include_directories(${GTEST_SRC_DIR}/include)
set(GTEST_LIBRARY gtest)
else()
message(FATAL_ERROR "TEST is set but unable to find gtest library")
@@ -134,6 +134,8 @@ if(NACL AND NACL_LIBC STREQUAL "newlib")
target_link_libraries(libyuv_unittest glibc-compat)
endif()
+target_link_libraries(libyuv_unittest gflags)
+
install(TARGETS ${ly_lib_name} DESTINATION lib)
install(FILES ${ly_header_files} DESTINATION include/libyuv)
install(FILES ${ly_inc_dir}/libyuv.h DESTINATION include/)
diff --git a/chromium/third_party/libyuv/DEPS b/chromium/third_party/libyuv/DEPS
index 0f84b106d1f..794fff37f6a 100644
--- a/chromium/third_party/libyuv/DEPS
+++ b/chromium/third_party/libyuv/DEPS
@@ -7,7 +7,7 @@ vars = {
# Roll the Chromium Git hash to pick up newer versions of all the
# dependencies and tools linked to in setup_links.py.
- 'chromium_revision': 'dad6346948dde45a6e86c614692256c746d9bfb1',
+ 'chromium_revision': '8cdf034791388299f18fba186f2941313320b706',
}
# NOTE: Prefer revision numbers to tags for svn deps. Use http rather than
diff --git a/chromium/third_party/libyuv/OWNERS b/chromium/third_party/libyuv/OWNERS
index e878f833db8..2db52d30797 100644
--- a/chromium/third_party/libyuv/OWNERS
+++ b/chromium/third_party/libyuv/OWNERS
@@ -1,2 +1,13 @@
fbarchard@chromium.org
-mflodman@chromium.org
+magjed@chromium.org
+torbjorng@chromium.org
+
+per-file *.gyp=kjellander@chromium.org
+per-file *.gn=kjellander@chromium.org
+per-file .gitignore=*
+per-file AUTHORS=*
+per-file DEPS=*
+per-file PRESUBMIT.py=kjellander@chromium.org
+per-file gyp_libyuv.py=kjellander@chromium.org
+per-file setup_links.py=*
+per-file sync_chromium.py=kjellander@chromium.org
diff --git a/chromium/third_party/libyuv/PRESUBMIT.py b/chromium/third_party/libyuv/PRESUBMIT.py
index 09aa8270a86..61d92aea170 100755
--- a/chromium/third_party/libyuv/PRESUBMIT.py
+++ b/chromium/third_party/libyuv/PRESUBMIT.py
@@ -28,7 +28,6 @@ def GetPreferredTryMasters(project, change):
'win_x64_rel',
'mac',
'mac_rel',
- 'mac_x64_rel',
'ios',
'ios_rel',
'ios_arm64',
@@ -39,6 +38,9 @@ def GetPreferredTryMasters(project, change):
'linux_memcheck',
'linux_tsan2',
'linux_asan',
+ 'linux_msan',
+ 'linux_ubsan',
+ 'linux_ubsan_vptr',
'android',
'android_rel',
'android_clang',
diff --git a/chromium/third_party/libyuv/README.chromium b/chromium/third_party/libyuv/README.chromium
index 1a4f19dcf30..7733d42b62f 100644
--- a/chromium/third_party/libyuv/README.chromium
+++ b/chromium/third_party/libyuv/README.chromium
@@ -1,6 +1,6 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
-Version: 1563
+Version: 1579
License: BSD
License File: LICENSE
diff --git a/chromium/third_party/libyuv/README.md b/chromium/third_party/libyuv/README.md
new file mode 100644
index 00000000000..7b11325d37a
--- /dev/null
+++ b/chromium/third_party/libyuv/README.md
@@ -0,0 +1,18 @@
+**libyuv** is an open source project that includes YUV scaling and conversion functionality.
+
+* Scale YUV to prepare content for compression, with point, bilinear or box filter.
+* Convert to YUV from webcam formats.
+* Convert from YUV to formats for rendering/effects.
+* Rotate by 90/180/270 degrees to adjust for mobile devices in portrait mode.
+* Optimized for SSE2/SSSE3/AVX2 on x86/x64.
+* Optimized for Neon on Arm.
+* Optimized for DSP R2 on Mips.
+
+### Development
+
+See [Getting started] [1] for instructions on how to get started developing.
+
+You can also browse the [docs directory] [2] for more documentation.
+
+[1]: docs/getting_started.md
+[2]: docs/
diff --git a/chromium/third_party/libyuv/docs/environment_variables.md b/chromium/third_party/libyuv/docs/environment_variables.md
new file mode 100644
index 00000000000..bc5e2f6fdb3
--- /dev/null
+++ b/chromium/third_party/libyuv/docs/environment_variables.md
@@ -0,0 +1,32 @@
+# Introduction
+
+For test purposes, environment variables can be set to control libyuv behavior. These should only be used for testing, to narrow down bugs or to test performance.
+
+# CPU
+
+By default the cpu is detected and the most advanced form of SIMD is used. But you can disable instruction sets selectively, or completely, falling back on C code. Set the variable to 1 to disable the specified instruction set.
+
+ LIBYUV_DISABLE_ASM
+ LIBYUV_DISABLE_X86
+ LIBYUV_DISABLE_SSE2
+ LIBYUV_DISABLE_SSSE3
+ LIBYUV_DISABLE_SSE41
+ LIBYUV_DISABLE_SSE42
+ LIBYUV_DISABLE_AVX
+ LIBYUV_DISABLE_AVX2
+ LIBYUV_DISABLE_AVX3
+ LIBYUV_DISABLE_ERMS
+ LIBYUV_DISABLE_FMA3
+ LIBYUV_DISABLE_DSPR2
+ LIBYUV_DISABLE_NEON
+
+# Test Width/Height/Repeat
+
+The unittests default to a small image (32x18) to run fast. This can be set by environment variable to test a specific resolutions.
+You can also repeat the test a specified number of iterations, allowing benchmarking and profiling.
+
+ set LIBYUV_WIDTH=1280
+ set LIBYUV_HEIGHT=720
+ set LIBYUV_REPEAT=999
+ set LIBYUV_FLAGS=-1
+ set LIBYUV_CPU_INFO=-1
diff --git a/chromium/third_party/libyuv/docs/filtering.md b/chromium/third_party/libyuv/docs/filtering.md
new file mode 100644
index 00000000000..8696976e8a5
--- /dev/null
+++ b/chromium/third_party/libyuv/docs/filtering.md
@@ -0,0 +1,196 @@
+# Introduction
+
+This document discusses the current state of filtering in libyuv. An emphasis on maximum performance while avoiding memory exceptions, and minimal amount of code/complexity. See future work at end.
+
+# LibYuv Filter Subsampling
+
+There are 2 challenges with subsampling
+
+1. centering of samples, which involves clamping on edges
+2. clipping a source region
+
+Centering depends on scale factor and filter mode.
+
+# Down Sampling
+
+If scaling down, the stepping rate is always src_width / dst_width.
+
+ dx = src_width / dst_width;
+
+e.g. If scaling from 1280x720 to 640x360, the step thru the source will be 2.0, stepping over 2 pixels of source for each pixel of destination.
+
+Centering, depends on filter mode.
+
+*Point* downsampling takes the middle pixel.
+
+ x = dx >> 1;
+
+For odd scale factors (e.g. 3x down) this is exactly the middle. For even scale factors, this rounds up and takes the pixel to the right of center. e.g. scale of 4x down will take pixel 2.
+
+**Bilinear** filter, uses the 2x2 pixels in the middle.
+
+ x = dx / 2 - 0.5;
+
+For odd scale factors (e.g. 3x down) this is exactly the middle, and point sampling is used.
+For even scale factors, this evenly filters the middle 2x2 pixels. e.g. 4x down will filter pixels 1,2 at 50% in both directions.
+
+**Box** filter averages the entire box so sampling starts at 0.
+
+ x = 0;
+
+For a scale factor of 2x down, this is equivalent to bilinear.
+
+# Up Sampling
+
+**Point** upsampling use stepping rate of src_width / dst_width and a starting coordinate of 0.
+
+ x = 0;
+ dx = src_width / dst_width;
+
+e.g. If scaling from 640x360 to 1280x720 the step thru the source will be 0.0, stepping half a pixel of source for each pixel of destination. Each pixel is replicated by the scale factor.
+
+**Bilinear** filter stretches such that the first pixel of source maps to the first pixel of destination, and the last pixel of source maps to the last pixel of destination.
+
+ x = 0;
+ dx = (src_width - 1) / (dst_width - 1);
+
+This method is not technically correct, and will likely change in the future.
+
+* It is inconsistent with the bilinear down sampler. The same method could be used for down sampling, and then it would be more reversible, but that would prevent specialized 2x down sampling.
+* Although centered, the image is slightly magnified.
+* The filtering was changed in early 2013 - previously it used:
+
+ x = 0;
+ dx = (src_width - 1) / (dst_width - 1);
+
+Which is the correct scale factor, but shifted the image left, and extruded the last pixel. The reason for the change was to remove the extruding code from the low level row functions, allowing 3 functions to sshare the same row functions - ARGBScale, I420Scale, and ARGBInterpolate. Then the one function was ported to many cpu variations: SSE2, SSSE3, AVX2, Neon and 'Any' version for any number of pixels and alignment. The function is also specialized for 0,25,50,75%.
+
+The above goes still has the potential to read the last pixel 100% and last pixel + 1 0%, which may cause a memory exception. So the left pixel goes to a fraction less than the last pixel, but filters in the minimum amount of it, and the maximum of the last pixel.
+
+ dx = FixedDiv((src_width << 16) - 0x00010001, (dst << 16) - 0x00010000);
+
+**Box** filter for upsampling switches over to Bilinear.
+
+# Scale snippet:
+
+ #define CENTERSTART(dx, s) (dx < 0) ? -((-dx >> 1) + s) : ((dx >> 1) + s)
+ #define FIXEDDIV1(src, dst) FixedDiv((src << 16) - 0x00010001, \
+ (dst << 16) - 0x00010000);
+
+ // Compute slope values for stepping.
+ void ScaleSlope(int src_width, int src_height,
+ int dst_width, int dst_height,
+ FilterMode filtering,
+ int* x, int* y, int* dx, int* dy) {
+ assert(x != NULL);
+ assert(y != NULL);
+ assert(dx != NULL);
+ assert(dy != NULL);
+ assert(src_width != 0);
+ assert(src_height != 0);
+ assert(dst_width > 0);
+ assert(dst_height > 0);
+ if (filtering == kFilterBox) {
+ // Scale step for point sampling duplicates all pixels equally.
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *dy = FixedDiv(src_height, dst_height);
+ *x = 0;
+ *y = 0;
+ } else if (filtering == kFilterBilinear) {
+ // Scale step for bilinear sampling renders last pixel once for upsample.
+ if (dst_width <= Abs(src_width)) {
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *x = CENTERSTART(*dx, -32768);
+ } else if (dst_width > 1) {
+ *dx = FIXEDDIV1(Abs(src_width), dst_width);
+ *x = 0;
+ }
+ if (dst_height <= src_height) {
+ *dy = FixedDiv(src_height, dst_height);
+ *y = CENTERSTART(*dy, -32768); // 32768 = -0.5 to center bilinear.
+ } else if (dst_height > 1) {
+ *dy = FIXEDDIV1(src_height, dst_height);
+ *y = 0;
+ }
+ } else if (filtering == kFilterLinear) {
+ // Scale step for bilinear sampling renders last pixel once for upsample.
+ if (dst_width <= Abs(src_width)) {
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *x = CENTERSTART(*dx, -32768);
+ } else if (dst_width > 1) {
+ *dx = FIXEDDIV1(Abs(src_width), dst_width);
+ *x = 0;
+ }
+ *dy = FixedDiv(src_height, dst_height);
+ *y = *dy >> 1;
+ } else {
+ // Scale step for point sampling duplicates all pixels equally.
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *dy = FixedDiv(src_height, dst_height);
+ *x = CENTERSTART(*dx, 0);
+ *y = CENTERSTART(*dy, 0);
+ }
+ // Negative src_width means horizontally mirror.
+ if (src_width < 0) {
+ *x += (dst_width - 1) * *dx;
+ *dx = -*dx;
+ src_width = -src_width;
+ }
+ }
+
+# Future Work
+
+Point sampling should ideally be the same as bilinear, but pixel by pixel, round to nearest neighbor. But as is, it is reversible and exactly matches ffmpeg at all scale factors, both up and down. The scale factor is
+
+ dx = src_width / dst_width;
+
+The step value is centered for down sample:
+
+ x = dx / 2;
+
+Or starts at 0 for upsample.
+
+ x = 0;
+
+Bilinear filtering is currently correct for down sampling, but not for upsampling.
+Upsampling is stretching the first and last pixel of source, to the first and last pixel of destination.
+
+ dx = (src_width - 1) / (dst_width - 1);<br>
+ x = 0;
+
+It should be stretching such that the first pixel is centered in the middle of the scale factor, to match the pixel that would be sampled for down sampling by the same amount. And same on last pixel.
+
+ dx = src_width / dst_width;<br>
+ x = dx / 2 - 0.5;
+
+This would start at -0.5 and go to last pixel + 0.5, sampling 50% from last pixel + 1.
+Then clamping would be needed. On GPUs there are numerous ways to clamp.
+
+1. Clamp the coordinate to the edge of the texture, duplicating the first and last pixel.
+2. Blend with a constant color, such as transparent black. Typically best for fonts.
+3. Mirror the UV coordinate, which is similar to clamping. Good for continuous tone images.
+4. Wrap the coordinate, for texture tiling.
+5. Allow the coordinate to index beyond the image, which may be the correct data if sampling a subimage.
+6. Extrapolate the edge based on the previous pixel. pixel -0.5 is computed from slope of pixel 0 and 1.
+
+Some of these are computational, even for a GPU, which is one reason textures are sometimes limited to power of 2 sizes.
+We do care about the clipping case, where allowing coordinates to become negative and index pixels before the image is the correct data. But normally for simple scaling, we want to clamp to the edge pixel. For example, if bilinear scaling from 3x3 to 30x30, we’d essentially want 10 pixels of each of the original 3 pixels. But we want the original pixels to land in the middle of each 10 pixels, at offsets 5, 15 and 25. There would be filtering between 5 and 15 between the original pixels 0 and 1. And filtering between 15 and 25 from original pixels 1 and 2. The first 5 pixels are clamped to pixel 0 and the last 5 pixels are clamped to pixel 2.
+The easiest way to implement this is copy the original 3 pixels to a buffer, and duplicate the first and last pixels. 0,1,2 becomes 0, 0,1,2, 2. Then implement a filtering without clamping. We call this source extruding. Its only necessary on up sampling, since down sampler will always have valid surrounding pixels.
+Extruding is practical when the image is already copied to a temporary buffer. It could be done to the original image, as long as the original memory is restored, but valgrind and/or memory protection would disallow this, so it requires a memcpy to a temporary buffer, which may hurt performance. The memcpy has a performance advantage, from a cache point of view, that can actually make this technique faster, depending on hardware characteristics.
+Vertical extrusion can be done with a memcpy of the first/last row, or clamping a pointer.
+
+
+The other way to implement clamping is handle the edges with a memset. e.g. Read first source pixel and memset the first 5 pixels. Filter pixels 0,1,2 to 5 to 25. Read last pixel and memset the last 5 pixels. Blur is implemented with this method like this, which has 3 loops per row - left, middle and right.
+
+Box filter is only used for 2x down sample or more. Its based on integer sized boxes. Technically it should be filtered edges, but thats substantially slower (roughly 100x), and at that point you may as well do a cubic filter which is more correct.
+
+Box filter currently sums rows into a row buffer. It does this with
+
+Mirroring will use the same slope as normal, but with a negative.
+The starting coordinate needs to consider the scale factor and filter. e.g. box filter of 30x30 to 3x3 with mirroring would use -10 for step, but x = 20. width (30) - dx.
+
+Step needs to be accurate, so it uses an integer divide. This is as much as 5% of the profile. An approximated divide is substantially faster, but the inaccuracy causes stepping beyond the original image boundaries. 3 general solutions:
+
+1. copy image to buffer with padding. allows for small errors in stepping.
+2. hash the divide, so common values are quickly found.
+3. change api so caller provides the slope.
diff --git a/chromium/third_party/libyuv/docs/formats.md b/chromium/third_party/libyuv/docs/formats.md
new file mode 100644
index 00000000000..a7cfed82189
--- /dev/null
+++ b/chromium/third_party/libyuv/docs/formats.md
@@ -0,0 +1,133 @@
+# Introduction
+
+Formats (FOURCC) supported by libyuv are detailed here.
+
+# Core Formats
+
+There are 2 core formats supported by libyuv - I420 and ARGB. All YUV formats can be converted to/from I420. All RGB formats can be converted to/from ARGB.
+
+Filtering functions such as scaling and planar functions work on I420 and/or ARGB.
+
+# OSX Core Media Pixel Formats
+
+This is how OSX formats map to libyuv
+
+ enum {
+ kCMPixelFormat_32ARGB = 32, FOURCC_BGRA
+ kCMPixelFormat_32BGRA = 'BGRA', FOURCC_ARGB
+ kCMPixelFormat_24RGB = 24, FOURCC_RAW
+ kCMPixelFormat_16BE555 = 16, Not supported.
+ kCMPixelFormat_16BE565 = 'B565', Not supported.
+ kCMPixelFormat_16LE555 = 'L555', FOURCC_RGBO
+ kCMPixelFormat_16LE565 = 'L565', FOURCC_RGBP
+ kCMPixelFormat_16LE5551 = '5551', FOURCC_RGBO
+ kCMPixelFormat_422YpCbCr8 = '2vuy', FOURCC_UYVY
+ kCMPixelFormat_422YpCbCr8_yuvs = 'yuvs', FOURCC_YUY2
+ kCMPixelFormat_444YpCbCr8 = 'v308', FOURCC_I444 ?
+ kCMPixelFormat_4444YpCbCrA8 = 'v408', Not supported.
+ kCMPixelFormat_422YpCbCr16 = 'v216', Not supported.
+ kCMPixelFormat_422YpCbCr10 = 'v210', FOURCC_V210 previously. Removed now.
+ kCMPixelFormat_444YpCbCr10 = 'v410', Not supported.
+ kCMPixelFormat_8IndexedGray_WhiteIsZero = 0x00000028, Not supported.
+ };
+
+
+# FOURCC (Four Charactacter Code) List
+
+The following is extracted from video_common.h as a complete list of formats supported by libyuv.
+
+ enum FourCC {
+ // 9 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
+ FOURCC_I420 = FOURCC('I', '4', '2', '0'),
+ FOURCC_I422 = FOURCC('I', '4', '2', '2'),
+ FOURCC_I444 = FOURCC('I', '4', '4', '4'),
+ FOURCC_I411 = FOURCC('I', '4', '1', '1'),
+ FOURCC_I400 = FOURCC('I', '4', '0', '0'),
+ FOURCC_NV21 = FOURCC('N', 'V', '2', '1'),
+ FOURCC_NV12 = FOURCC('N', 'V', '1', '2'),
+ FOURCC_YUY2 = FOURCC('Y', 'U', 'Y', '2'),
+ FOURCC_UYVY = FOURCC('U', 'Y', 'V', 'Y'),
+
+ // 2 Secondary YUV formats: row biplanar.
+ FOURCC_M420 = FOURCC('M', '4', '2', '0'),
+ FOURCC_Q420 = FOURCC('Q', '4', '2', '0'),
+
+ // 9 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp.
+ FOURCC_ARGB = FOURCC('A', 'R', 'G', 'B'),
+ FOURCC_BGRA = FOURCC('B', 'G', 'R', 'A'),
+ FOURCC_ABGR = FOURCC('A', 'B', 'G', 'R'),
+ FOURCC_24BG = FOURCC('2', '4', 'B', 'G'),
+ FOURCC_RAW = FOURCC('r', 'a', 'w', ' '),
+ FOURCC_RGBA = FOURCC('R', 'G', 'B', 'A'),
+ FOURCC_RGBP = FOURCC('R', 'G', 'B', 'P'), // rgb565 LE.
+ FOURCC_RGBO = FOURCC('R', 'G', 'B', 'O'), // argb1555 LE.
+ FOURCC_R444 = FOURCC('R', '4', '4', '4'), // argb4444 LE.
+
+ // 4 Secondary RGB formats: 4 Bayer Patterns.
+ FOURCC_RGGB = FOURCC('R', 'G', 'G', 'B'),
+ FOURCC_BGGR = FOURCC('B', 'G', 'G', 'R'),
+ FOURCC_GRBG = FOURCC('G', 'R', 'B', 'G'),
+ FOURCC_GBRG = FOURCC('G', 'B', 'R', 'G'),
+
+ // 1 Primary Compressed YUV format.
+ FOURCC_MJPG = FOURCC('M', 'J', 'P', 'G'),
+
+ // 5 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
+ FOURCC_YV12 = FOURCC('Y', 'V', '1', '2'),
+ FOURCC_YV16 = FOURCC('Y', 'V', '1', '6'),
+ FOURCC_YV24 = FOURCC('Y', 'V', '2', '4'),
+ FOURCC_YU12 = FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
+ FOURCC_J420 = FOURCC('J', '4', '2', '0'),
+ FOURCC_J400 = FOURCC('J', '4', '0', '0'),
+
+ // 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc.
+ FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
+ FOURCC_YU16 = FOURCC('Y', 'U', '1', '6'), // Alias for I422.
+ FOURCC_YU24 = FOURCC('Y', 'U', '2', '4'), // Alias for I444.
+ FOURCC_YUYV = FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
+ FOURCC_YUVS = FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
+ FOURCC_HDYC = FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
+ FOURCC_2VUY = FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
+ FOURCC_JPEG = FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
+ FOURCC_DMB1 = FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
+ FOURCC_BA81 = FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
+ FOURCC_RGB3 = FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
+ FOURCC_BGR3 = FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
+ FOURCC_CM32 = FOURCC(0, 0, 0, 32), // Alias for BGRA kCMPixelFormat_32ARGB
+ FOURCC_CM24 = FOURCC(0, 0, 0, 24), // Alias for RAW kCMPixelFormat_24RGB
+ FOURCC_L555 = FOURCC('L', '5', '5', '5'), // Alias for RGBO.
+ FOURCC_L565 = FOURCC('L', '5', '6', '5'), // Alias for RGBP.
+ FOURCC_5551 = FOURCC('5', '5', '5', '1'), // Alias for RGBO.
+
+ // 1 Auxiliary compressed YUV format set aside for capturer.
+ FOURCC_H264 = FOURCC('H', '2', '6', '4'),
+
+# The ARGB FOURCC
+
+There are 4 ARGB layouts - ARGB, BGRA, ABGR and RGBA. ARGB is most common by far, used for screen formats, and windows webcam drivers.
+
+The fourcc describes the order of channels in a ***register***.
+
+A fourcc provided by capturer, can be thought of string, e.g. "ARGB".
+
+On little endian machines, as an int, this would have 'A' in the lowest byte. The FOURCC macro reverses the order:
+
+ #define FOURCC(a, b, c, d) (((uint32)(a)) | ((uint32)(b) << 8) | ((uint32)(c) << 16) | ((uint32)(d) << 24))
+
+So the "ARGB" string, read as an uint32, is
+
+ FOURCC_ARGB = FOURCC('A', 'R', 'G', 'B')
+
+If you were to read ARGB pixels as uint32's, the alpha would be in the high byte, and the blue in the lowest byte. In memory, these are stored little endian, so 'B' is first, then 'G', 'R' and 'A' last.
+
+When calling conversion functions, the names match the FOURCC, so in this case it would be I420ToARGB().
+
+All formats can be converted to/from ARGB.
+
+Most 'planar_functions' work on ARGB (e.g. ARGBBlend).
+
+Some are channel order agnostic (e.g. ARGBScale).
+
+Some functions are symmetric (e.g. ARGBToBGRA is the same as BGRAToARGB, so its a macro).
+
+ARGBBlend expects preattenuated ARGB. The R,G,B are premultiplied by alpha. Other functions don't care.
diff --git a/chromium/third_party/libyuv/docs/getting_started.md b/chromium/third_party/libyuv/docs/getting_started.md
new file mode 100644
index 00000000000..9f00d965094
--- /dev/null
+++ b/chromium/third_party/libyuv/docs/getting_started.md
@@ -0,0 +1,424 @@
+# Getting Started
+
+How to get and build the libyuv code.
+
+## Pre-requisites
+
+You'll need to have depot tools installed: https://www.chromium.org/developers/how-tos/install-depot-tools
+Refer to chromium instructions for each platform for other prerequisites.
+
+## Getting the Code
+
+Create a working directory, enter it, and run:
+
+ gclient config https://chromium.googlesource.com/libyuv/libyuv
+ gclient sync
+
+
+Then you'll get a .gclient file like:
+
+ solutions = [
+ { "name" : "libyuv",
+ "url" : "https://chromium.googlesource.com/libyuv/libyuv",
+ "deps_file" : "DEPS",
+ "managed" : True,
+ "custom_deps" : {
+ },
+ "safesync_url": "",
+ },
+ ];
+
+
+For iOS add `;target_os=['ios'];` to your OSX .gclient and run `GYP_DEFINES="OS=ios" gclient sync.`
+
+Browse the Git reprository: https://chromium.googlesource.com/libyuv/libyuv/+/master
+
+### Android
+For Android add `;target_os=['android'];` to your Linux .gclient
+
+
+ solutions = [
+ { "name" : "libyuv",
+ "url" : "https://chromium.googlesource.com/libyuv/libyuv",
+ "deps_file" : "DEPS",
+ "managed" : True,
+ "custom_deps" : {
+ },
+ "safesync_url": "",
+ },
+ ];
+ target_os = ["android", "unix"];
+
+Then run:
+
+ export GYP_DEFINES="OS=android"
+ gclient sync
+
+Caveat: Theres an error with Google Play services updates. If you get the error "Your version of the Google Play services library is not up to date", run the following:
+ cd chromium/src
+ ./build/android/play_services/update.py download
+ cd ../..
+
+For Windows the gclient sync must be done from an Administrator command prompt.
+
+The sync will generate native build files for your environment using gyp (Windows: Visual Studio, OSX: XCode, Linux: make). This generation can also be forced manually: `gclient runhooks`
+
+To get just the source (not buildable):
+ git clone https://chromium.googlesource.com/libyuv/libyuv
+
+
+## Building the Library and Unittests
+
+### Windows
+
+ set GYP_DEFINES=target_arch=ia32
+ call python gyp_libyuv -fninja -G msvs_version=2013
+ ninja -j7 -C out\Release
+ ninja -j7 -C out\Debug
+
+ set GYP_DEFINES=target_arch=x64
+ call python gyp_libyuv -fninja -G msvs_version=2013
+ ninja -C out\Debug_x64
+ ninja -C out\Release_x64
+
+#### Building with clangcl
+ set GYP_DEFINES=clang=1 target_arch=ia32 libyuv_enable_svn=1
+ set LLVM_REPO_URL=svn://svn.chromium.org/llvm-project
+ call python tools\clang\scripts\update.py
+ call python gyp_libyuv -fninja libyuv_test.gyp
+ ninja -C out\Debug
+ ninja -C out\Release
+
+### OSX
+
+Clang 64 bit shown. Remove `clang=1` for GCC and change x64 to ia32 for 32 bit.
+
+ GYP_DEFINES="clang=1 target_arch=x64" ./gyp_libyuv
+ ninja -j7 -C out/Debug
+ ninja -j7 -C out/Release
+
+ GYP_DEFINES="clang=1 target_arch=ia32" ./gyp_libyuv
+ ninja -j7 -C out/Debug
+ ninja -j7 -C out/Release
+
+### iOS
+http://www.chromium.org/developers/how-tos/build-instructions-ios
+
+Add to .gclient last line: `target_os=['ios'];`
+
+armv7
+
+ GYP_DEFINES="OS=ios target_arch=armv7 target_subarch=arm32" GYP_CROSSCOMPILE=1 GYP_GENERATOR_FLAGS="output_dir=out_ios" ./gyp_libyuv
+ ninja -j7 -C out_ios/Debug-iphoneos libyuv_unittest
+ ninja -j7 -C out_ios/Release-iphoneos libyuv_unittest
+
+arm64
+
+ GYP_DEFINES="OS=ios target_arch=arm64 target_subarch=arm64" GYP_CROSSCOMPILE=1 GYP_GENERATOR_FLAGS="output_dir=out_ios" ./gyp_libyuv
+ ninja -j7 -C out_ios/Debug-iphoneos libyuv_unittest
+ ninja -j7 -C out_ios/Release-iphoneos libyuv_unittest
+
+both armv7 and arm64 (fat)
+
+ GYP_DEFINES="OS=ios target_arch=armv7 target_subarch=both" GYP_CROSSCOMPILE=1 GYP_GENERATOR_FLAGS="output_dir=out_ios" ./gyp_libyuv
+ ninja -j7 -C out_ios/Debug-iphoneos libyuv_unittest
+ ninja -j7 -C out_ios/Release-iphoneos libyuv_unittest
+
+simulator
+
+ GYP_DEFINES="OS=ios target_arch=ia32 target_subarch=arm32" GYP_CROSSCOMPILE=1 GYP_GENERATOR_FLAGS="output_dir=out_sim" ./gyp_libyuv
+ ninja -j7 -C out_sim/Debug-iphonesimulator libyuv_unittest
+ ninja -j7 -C out_sim/Release-iphonesimulator libyuv_unittest
+
+### Android
+https://code.google.com/p/chromium/wiki/AndroidBuildInstructions
+
+Add to .gclient last line: `target_os=['android'];`
+
+armv7
+
+ GYP_DEFINES="OS=android" GYP_CROSSCOMPILE=1 ./gyp_libyuv
+ ninja -j7 -C out/Debug libyuv_unittest_apk
+ ninja -j7 -C out/Release libyuv_unittest_apk
+
+arm64
+
+ GYP_DEFINES="OS=android target_arch=arm64 target_subarch=arm64" GYP_CROSSCOMPILE=1 ./gyp_libyuv
+ ninja -j7 -C out/Debug libyuv_unittest_apk
+ ninja -j7 -C out/Release libyuv_unittest_apk
+
+ia32
+
+ GYP_DEFINES="OS=android target_arch=ia32" GYP_CROSSCOMPILE=1 ./gyp_libyuv
+ ninja -j7 -C out/Debug libyuv_unittest_apk
+ ninja -j7 -C out/Release libyuv_unittest_apk
+
+ GYP_DEFINES="OS=android target_arch=ia32 android_full_debug=1" GYP_CROSSCOMPILE=1 ./gyp_libyuv
+ ninja -j7 -C out/Debug libyuv_unittest_apk
+
+mipsel
+
+ GYP_DEFINES="OS=android target_arch=mipsel" GYP_CROSSCOMPILE=1 ./gyp_libyuv
+ ninja -j7 -C out/Debug libyuv_unittest_apk
+ ninja -j7 -C out/Release libyuv_unittest_apk
+
+arm32 disassembly:
+
+ third_party/android_tools/ndk/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin/arm-linux-androideabi-objdump -d out/Release/obj/source/libyuv.row_neon.o
+
+arm64 disassembly:
+
+ third_party/android_tools/ndk/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/bin/aarch64-linux-android-objdump -d out/Release/obj/source/libyuv.row_neon64.o
+
+Running tests:
+
+ util/android/test_runner.py gtest -s libyuv_unittest -t 7200 --verbose --release --gtest_filter=*
+
+Running test as benchmark:
+
+ util/android/test_runner.py gtest -s libyuv_unittest -t 7200 --verbose --release --gtest_filter=* -a "--libyuv_width=1280 --libyuv_height=720 --libyuv_repeat=999 --libyuv_flags=-1"
+
+Running test with C code:
+
+ util/android/test_runner.py gtest -s libyuv_unittest -t 7200 --verbose --release --gtest_filter=* -a "--libyuv_width=1280 --libyuv_height=720 --libyuv_repeat=999 --libyuv_flags=0 --libyuv_cpu_info=0"
+
+#### Building with GN
+
+ call gn gen out/Release "--args=is_debug=false target_cpu=\"x86\""
+ call gn gen out/Debug "--args=is_debug=true target_cpu=\"x86\""
+ ninja -C out/Release
+ ninja -C out/Debug
+
+### Linux
+
+ GYP_DEFINES="target_arch=x64" ./gyp_libyuv
+ ninja -j7 -C out/Debug
+ ninja -j7 -C out/Release
+
+ GYP_DEFINES="target_arch=ia32" ./gyp_libyuv
+ ninja -j7 -C out/Debug
+ ninja -j7 -C out/Release
+
+#### CentOS
+
+On CentOS 32 bit the following work around allows a sync:
+
+ export GYP_DEFINES="host_arch=ia32"
+ gclient sync
+
+### Windows Shared Library
+
+Modify libyuv.gyp from 'static_library' to 'shared_library', and add 'LIBYUV_BUILDING_SHARED_LIBRARY' to 'defines'.
+
+ gclient runhooks
+
+After this command follow the building the library instructions above.
+
+If you get a compile error for atlthunk.lib on Windows, read http://www.chromium.org/developers/how-tos/build-instructions-windows
+
+
+### Build targets
+
+ ninja -C out/Debug libyuv
+ ninja -C out/Debug libyuv_unittest
+ ninja -C out/Debug compare
+ ninja -C out/Debug convert
+ ninja -C out/Debug psnr
+ ninja -C out/Debug cpuid
+
+
+## Building the Library with make
+
+### Linux
+
+ make -j7 V=1 -f linux.mk
+ make -j7 V=1 -f linux.mk clean
+ make -j7 V=1 -f linux.mk CXX=clang++
+
+## Building the Library with cmake
+
+Install cmake: http://www.cmake.org/
+
+Default debug build:
+
+ mkdir out
+ cd out
+ cmake ..
+ cmake --build .
+
+Release build/install
+
+ mkdir out
+ cd out
+ cmake -DCMAKE_INSTALL_PREFIX="/usr/lib" -DCMAKE_BUILD_TYPE="Release" ..
+ cmake --build . --config Release
+ sudo cmake --build . --target install --config Release
+
+### Windows 8 Phone
+
+Pre-requisite:
+
+* Install Visual Studio 2012 and Arm to your environment.<br>
+
+Then:
+
+ call "c:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\bin\x86_arm\vcvarsx86_arm.bat"
+
+or with Visual Studio 2013:
+
+ call "c:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\bin\x86_arm\vcvarsx86_arm.bat"
+ nmake /f winarm.mk clean
+ nmake /f winarm.mk
+
+### Windows Shared Library
+
+Modify libyuv.gyp from 'static_library' to 'shared_library', and add 'LIBYUV_BUILDING_SHARED_LIBRARY' to 'defines'. Then run this.
+
+ gclient runhooks
+
+After this command follow the building the library instructions above.
+
+If you get a compile error for atlthunk.lib on Windows, read http://www.chromium.org/developers/how-tos/build-instructions-windows
+
+### 64 bit Windows
+
+ set GYP_DEFINES=target_arch=x64
+ gclient runhooks V=1
+
+### ARM Linux
+
+ export GYP_DEFINES="target_arch=arm"
+ export CROSSTOOL=`<path>`/arm-none-linux-gnueabi
+ export CXX=$CROSSTOOL-g++
+ export CC=$CROSSTOOL-gcc
+ export AR=$CROSSTOOL-ar
+ export AS=$CROSSTOOL-as
+ export RANLIB=$CROSSTOOL-ranlib
+ gclient runhooks
+
+## Running Unittests
+
+### Windows
+
+ out\Release\libyuv_unittest.exe --gtest_catch_exceptions=0 --gtest_filter="*"
+
+### OSX
+
+ out/Release/libyuv_unittest --gtest_filter="*"
+
+### Linux
+
+ out/Release/libyuv_unittest --gtest_filter="*"
+
+Replace --gtest_filter="*" with specific unittest to run. May include wildcards. e.g.
+
+ out/Release/libyuv_unittest --gtest_filter=libyuvTest.I420ToARGB_Opt
+
+## CPU Emulator tools
+
+### Intel SDE (Software Development Emulator)
+
+Pre-requisite: Install IntelSDE for Windows: http://software.intel.com/en-us/articles/intel-software-development-emulator
+
+Then run:
+
+ c:\intelsde\sde -hsw -- out\release\libyuv_unittest.exe --gtest_filter=*
+
+
+## Memory tools
+
+### Running Dr Memory memcheck for Windows
+
+Pre-requisite: Install Dr Memory for Windows and add it to your path: http://www.drmemory.org/docs/page_install_windows.html
+
+ set GYP_DEFINES=build_for_tool=drmemory target_arch=ia32
+ call python gyp_libyuv -fninja -G msvs_version=2013
+ ninja -C out\Debug
+ drmemory out\Debug\libyuv_unittest.exe --gtest_catch_exceptions=0 --gtest_filter=*
+
+### Running UBSan
+
+See Chromium instructions for sanitizers: https://www.chromium.org/developers/testing/undefinedbehaviorsanitizer
+
+Sanitizers available: TSan, MSan, ASan, UBSan, LSan
+
+ GYP_DEFINES='ubsan=1' gclient runhooks
+ ninja -C out/Release
+
+### Running Valgrind memcheck
+
+Memory errors and race conditions can be found by running tests under special memory tools. [Valgrind] [1] is an instrumentation framework for building dynamic analysis tools. Various tests and profilers are built upon it to find memory handling errors and memory leaks, for instance.
+
+[1]: http://valgrind.org
+
+ solutions = [
+ { "name" : "libyuv",
+ "url" : "https://chromium.googlesource.com/libyuv/libyuv",
+ "deps_file" : "DEPS",
+ "managed" : True,
+ "custom_deps" : {
+ "libyuv/chromium/src/third_party/valgrind": "https://chromium.googlesource.com/chromium/deps/valgrind/binaries",
+ },
+ "safesync_url": "",
+ },
+ ]
+
+Then run:
+
+ GYP_DEFINES="clang=0 target_arch=x64 build_for_tool=memcheck" python gyp_libyuv
+ ninja -C out/Debug
+ valgrind out/Debug/libyuv_unittest
+
+
+For more information, see http://www.chromium.org/developers/how-tos/using-valgrind
+
+### Running Thread Sanitizer (TSan)
+
+ GYP_DEFINES="clang=0 target_arch=x64 build_for_tool=tsan" python gyp_libyuv
+ ninja -C out/Debug
+ valgrind out/Debug/libyuv_unittest
+
+For more info, see http://www.chromium.org/developers/how-tos/using-valgrind/threadsanitizer
+
+### Running Address Sanitizer (ASan)
+
+ GYP_DEFINES="clang=0 target_arch=x64 build_for_tool=asan" python gyp_libyuv
+ ninja -C out/Debug
+ valgrind out/Debug/libyuv_unittest
+
+For more info, see http://dev.chromium.org/developers/testing/addresssanitizer
+
+## Benchmarking
+
+The unittests can be used to benchmark.
+
+### Windows
+
+ set LIBYUV_WIDTH=1280
+ set LIBYUV_HEIGHT=720
+ set LIBYUV_REPEAT=999
+ set LIBYUV_FLAGS=-1
+ out\Release\libyuv_unittest.exe --gtest_filter=*I420ToARGB_Opt
+
+
+### Linux and Mac
+
+ LIBYUV_WIDTH=1280 LIBYUV_HEIGHT=720 LIBYUV_REPEAT=1000 out/Release/libyuv_unittest --gtest_filter=*I420ToARGB_Opt
+
+ libyuvTest.I420ToARGB_Opt (547 ms)
+
+Indicates 0.547 ms/frame for 1280 x 720.
+
+## Making a change
+
+ gclient sync
+ git checkout -b mycl -t origin/master
+ git pull
+ <edit files>
+ git add -u
+ git commit -m "my change"
+ git cl lint
+ git cl try
+ git cl upload -r a-reviewer@chomium.org -s
+ <once approved..>
+ git cl land
diff --git a/chromium/third_party/libyuv/docs/rotation.md b/chromium/third_party/libyuv/docs/rotation.md
new file mode 100644
index 00000000000..fb84fce5a9c
--- /dev/null
+++ b/chromium/third_party/libyuv/docs/rotation.md
@@ -0,0 +1,103 @@
+# Introduction
+
+Rotation by multiplies of 90 degrees allows mobile devices to rotate webcams from landscape to portrait. The higher level functions ConvertToI420 and ConvertToARGB allow rotation of any format. Optimized functionality is supported for I420, ARGB, NV12 and NV21.
+
+# ConvertToI420
+
+ int ConvertToI420(const uint8* src_frame, size_t src_size,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int crop_x, int crop_y,
+ int src_width, int src_height,
+ int crop_width, int crop_height,
+ enum RotationMode rotation,
+ uint32 format);
+
+This function crops, converts, and rotates. You should think of it in that order.
+ * Crops the original image, which is src_width x src_height, to crop_width x crop_height. At this point the image is still not rotated.
+ * Converts the cropped region to I420. Supports inverted source for src_height negative.
+ * Rotates by 90, 180 or 270 degrees.
+The buffer the caller provides should account for rotation. Be especially important to get stride of the destination correct.
+
+e.g.
+640 x 480 NV12 captured<br>
+Crop to 640 x 360<br>
+Rotate by 90 degrees to 360 x 640.<br>
+Caller passes stride of 360 for Y and 360 / 2 for U and V.<br>
+Caller passes crop_width of 640, crop_height of 360.<br>
+
+# ConvertToARGB
+
+ int ConvertToARGB(const uint8* src_frame, size_t src_size,
+ uint8* dst_argb, int dst_stride_argb,
+ int crop_x, int crop_y,
+ int src_width, int src_height,
+ int crop_width, int crop_height,
+ enum RotationMode rotation,
+ uint32 format);
+
+Same as I420, but implementation is less optimized - reads columns and writes rows, 16 bytes at a time.
+
+# I420Rotate
+
+ int I420Rotate(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int src_width, int src_height, enum RotationMode mode);
+
+Destination is rotated, so pass dst_stride_y etc that consider rotation.<br>
+Rotate by 180 can be done in place, but 90 and 270 can not.
+
+Implementation (Neon/SSE2) uses 8 x 8 block transpose, so best efficiency is with sizes and pointers that are aligned to 8.
+
+Cropping can be achieved by adjusting the src_y/u/v pointers and src_width, src_height.
+
+Lower level plane functions are provided, allowing other planar formats to be rotated. (e.g. I444)
+
+For other planar YUV formats (I444, I422, I411, I400, NV16, NV24), the planar functions are exposed and can be called directly
+
+
+ // Rotate a plane by 0, 90, 180, or 270.
+ int RotatePlane(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int src_width, int src_height, enum RotationMode mode);
+
+# ARGBRotate
+
+ LIBYUV_API
+ int ARGBRotate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int src_width, int src_height, enum RotationMode mode);
+
+Same as I420, but implementation is less optimized - reads columns and writes rows.
+
+Rotate by 90, or any angle, can be achieved using ARGBAffine.
+
+# Mirror - Horizontal Flip
+
+Mirror functions for horizontally flipping an image, which can be useful for 'self view' of a webcam.
+
+ int I420Mirror(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+ int ARGBMirror(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+Mirror functionality can also be achieved with the I420Scale and ARGBScale functions by passing negative width and/or height.
+
+# Invert - Vertical Flip
+
+Inverting can be achieved with almost any libyuv function by passing a negative source height.
+
+I420Mirror and ARGBMirror can also be used to rotate by 180 degrees by passing a negative height.
+
+
diff --git a/chromium/third_party/libyuv/drover.properties b/chromium/third_party/libyuv/drover.properties
deleted file mode 100644
index f16d4e0d4c1..00000000000
--- a/chromium/third_party/libyuv/drover.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-BASE_URL = "https://libyuv.googlecode.com/svn"
-TRUNK_URL = BASE_URL + "/trunk"
-BRANCH_URL = BASE_URL + "/branches/$branch"
-SKIP_CHECK_WORKING = True
-FILE_PATTERN = file_pattern_ = r"[ ]+([MADUC])[ ]+/((?:trunk|branches/.*?)(.*)/(.*))"
-PROMPT_FOR_AUTHOR = False
diff --git a/chromium/third_party/libyuv/include/libyuv/compare_row.h b/chromium/third_party/libyuv/include/libyuv/compare_row.h
index 4562da04700..f5836da11da 100644
--- a/chromium/third_party/libyuv/include/libyuv/compare_row.h
+++ b/chromium/third_party/libyuv/include/libyuv/compare_row.h
@@ -36,7 +36,8 @@ extern "C" {
#endif // clang >= 3.4
#endif // __clang__
-#if defined(_M_IX86) && (defined(VISUALC_HAS_AVX2) || defined(CLANG_HAS_AVX2))
+#if !defined(LIBYUV_DISABLE_X86) && \
+ defined(_M_IX86) && (defined(VISUALC_HAS_AVX2) || defined(CLANG_HAS_AVX2))
#define HAS_HASHDJB2_AVX2
#endif
diff --git a/chromium/third_party/libyuv/include/libyuv/cpu_id.h b/chromium/third_party/libyuv/include/libyuv/cpu_id.h
index a83edd501d3..2ccc3e7dd3b 100644
--- a/chromium/third_party/libyuv/include/libyuv/cpu_id.h
+++ b/chromium/third_party/libyuv/include/libyuv/cpu_id.h
@@ -41,7 +41,7 @@ static const int kCpuHasAVX3 = 0x2000;
// These flags are only valid on MIPS processors.
static const int kCpuHasMIPS = 0x10000;
-static const int kCpuHasMIPS_DSPR2 = 0x20000;
+static const int kCpuHasDSPR2 = 0x20000;
// Internal function used to auto-init.
LIBYUV_API
diff --git a/chromium/third_party/libyuv/include/libyuv/planar_functions.h b/chromium/third_party/libyuv/include/libyuv/planar_functions.h
index 9d30225d4cb..95870b9aea6 100644
--- a/chromium/third_party/libyuv/include/libyuv/planar_functions.h
+++ b/chromium/third_party/libyuv/include/libyuv/planar_functions.h
@@ -384,12 +384,6 @@ int ARGBUnattenuate(const uint8* src_argb, int src_stride_argb,
uint8* dst_argb, int dst_stride_argb,
int width, int height);
-// Convert MJPG to ARGB.
-LIBYUV_API
-int MJPGToARGB(const uint8* sample, size_t sample_size,
- uint8* argb, int argb_stride,
- int w, int h, int dw, int dh);
-
// Internal function - do not call directly.
// Computes table of cumulative sum for image where the value is the sum
// of all values above and to the left of the entry. Used by ARGBBlur.
diff --git a/chromium/third_party/libyuv/include/libyuv/rotate_row.h b/chromium/third_party/libyuv/include/libyuv/rotate_row.h
index e3838295ce1..d9f4d079287 100644
--- a/chromium/third_party/libyuv/include/libyuv/rotate_row.h
+++ b/chromium/third_party/libyuv/include/libyuv/rotate_row.h
@@ -51,8 +51,8 @@ extern "C" {
#if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \
defined(__mips__) && \
defined(__mips_dsp) && (__mips_dsp_rev >= 2)
-#define HAS_TRANSPOSEWX8_MIPS_DSPR2
-#define HAS_TRANSPOSEUVWX8_MIPS_DSPR2
+#define HAS_TRANSPOSEWX8_DSPR2
+#define HAS_TRANSPOSEUVWX8_DSPR2
#endif // defined(__mips__)
void TransposeWxH_C(const uint8* src, int src_stride,
@@ -66,10 +66,10 @@ void TransposeWx8_SSSE3(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width);
void TransposeWx8_Fast_SSSE3(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width);
-void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+void TransposeWx8_DSPR2(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_Fast_DSPR2(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width);
-void TransposeWx8_Fast_MIPS_DSPR2(const uint8* src, int src_stride,
- uint8* dst, int dst_stride, int width);
void TransposeWx8_Any_NEON(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width);
@@ -77,8 +77,8 @@ void TransposeWx8_Any_SSSE3(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width);
void TransposeWx8_Fast_Any_SSSE3(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width);
-void TransposeWx8_Any_MIPS_DSPR2(const uint8* src, int src_stride,
- uint8* dst, int dst_stride, int width);
+void TransposeWx8_Any_DSPR2(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
void TransposeUVWxH_C(const uint8* src, int src_stride,
uint8* dst_a, int dst_stride_a,
@@ -94,9 +94,9 @@ void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
void TransposeUVWx8_NEON(const uint8* src, int src_stride,
uint8* dst_a, int dst_stride_a,
uint8* dst_b, int dst_stride_b, int width);
-void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride,
- uint8* dst_a, int dst_stride_a,
- uint8* dst_b, int dst_stride_b, int width);
+void TransposeUVWx8_DSPR2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
void TransposeUVWx8_Any_SSE2(const uint8* src, int src_stride,
uint8* dst_a, int dst_stride_a,
@@ -104,9 +104,9 @@ void TransposeUVWx8_Any_SSE2(const uint8* src, int src_stride,
void TransposeUVWx8_Any_NEON(const uint8* src, int src_stride,
uint8* dst_a, int dst_stride_a,
uint8* dst_b, int dst_stride_b, int width);
-void TransposeUVWx8_Any_MIPS_DSPR2(const uint8* src, int src_stride,
- uint8* dst_a, int dst_stride_a,
- uint8* dst_b, int dst_stride_b, int width);
+void TransposeUVWx8_Any_DSPR2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libyuv/include/libyuv/row.h b/chromium/third_party/libyuv/include/libyuv/row.h
index f7620eea102..60115244874 100644
--- a/chromium/third_party/libyuv/include/libyuv/row.h
+++ b/chromium/third_party/libyuv/include/libyuv/row.h
@@ -93,7 +93,6 @@ extern "C" {
#define HAS_ARGBTORGB24ROW_SSSE3
#define HAS_ARGBTORGB565DITHERROW_SSE2
#define HAS_ARGBTORGB565ROW_SSE2
-#define HAS_ARGBTOUV422ROW_SSSE3
#define HAS_ARGBTOUV444ROW_SSSE3
#define HAS_ARGBTOUVJROW_SSSE3
#define HAS_ARGBTOUVROW_SSSE3
@@ -105,17 +104,6 @@ extern "C" {
#define HAS_COPYROW_SSE2
#define HAS_H422TOARGBROW_SSSE3
#define HAS_I400TOARGBROW_SSE2
-// The following functions fail on gcc/clang 32 bit with fpic and framepointer.
-// caveat: clangcl uses row_win.cc which works.
-#if defined(NDEBUG) || !(defined(_DEBUG) && defined(__i386__)) || \
- !defined(__i386__) || defined(_MSC_VER)
-// TODO(fbarchard): fix build error on x86 debug
-// https://code.google.com/p/libyuv/issues/detail?id=524
-#define HAS_I411TOARGBROW_SSSE3
-// TODO(fbarchard): fix build error on android_full_debug=1
-// https://code.google.com/p/libyuv/issues/detail?id=517
-#define HAS_I422ALPHATOARGBROW_SSSE3
-#endif
#define HAS_I422TOARGB1555ROW_SSSE3
#define HAS_I422TOARGB4444ROW_SSSE3
#define HAS_I422TOARGBROW_SSSE3
@@ -129,7 +117,6 @@ extern "C" {
#define HAS_J422TOARGBROW_SSSE3
#define HAS_MERGEUVROW_SSE2
#define HAS_MIRRORROW_SSSE3
-#define HAS_MIRRORROW_UV_SSSE3
#define HAS_MIRRORUVROW_SSSE3
#define HAS_NV12TOARGBROW_SSSE3
#define HAS_NV12TORGB565ROW_SSSE3
@@ -173,6 +160,7 @@ extern "C" {
#define HAS_ARGBSHADEROW_SSE2
#define HAS_ARGBSUBTRACTROW_SSE2
#define HAS_ARGBUNATTENUATEROW_SSE2
+#define HAS_BLENDPLANEROW_SSSE3
#define HAS_COMPUTECUMULATIVESUMROW_SSE2
#define HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
#define HAS_INTERPOLATEROW_SSSE3
@@ -182,7 +170,18 @@ extern "C" {
#define HAS_SOBELXROW_SSE2
#define HAS_SOBELXYROW_SSE2
#define HAS_SOBELYROW_SSE2
-#define HAS_BLENDPLANEROW_SSSE3
+
+// The following functions fail on gcc/clang 32 bit with fpic and framepointer.
+// caveat: clangcl uses row_win.cc which works.
+#if defined(NDEBUG) || !(defined(_DEBUG) && defined(__i386__)) || \
+ !defined(__i386__) || defined(_MSC_VER)
+// TODO(fbarchard): fix build error on x86 debug
+// https://code.google.com/p/libyuv/issues/detail?id=524
+#define HAS_I411TOARGBROW_SSSE3
+// TODO(fbarchard): fix build error on android_full_debug=1
+// https://code.google.com/p/libyuv/issues/detail?id=517
+#define HAS_I422ALPHATOARGBROW_SSSE3
+#endif
#endif
// The following are available on all x86 platforms, but
@@ -196,6 +195,7 @@ extern "C" {
#define HAS_ARGBPOLYNOMIALROW_AVX2
#define HAS_ARGBSHUFFLEROW_AVX2
#define HAS_ARGBTORGB565DITHERROW_AVX2
+#define HAS_ARGBTOUVJROW_AVX2
#define HAS_ARGBTOUVROW_AVX2
#define HAS_ARGBTOYJROW_AVX2
#define HAS_ARGBTOYROW_AVX2
@@ -207,15 +207,20 @@ extern "C" {
// https://code.google.com/p/libyuv/issues/detail?id=517
#define HAS_I422ALPHATOARGBROW_AVX2
#endif
-#define HAS_I444TOARGBROW_AVX2
+#define HAS_I411TOARGBROW_AVX2
+#define HAS_I422TOARGB1555ROW_AVX2
+#define HAS_I422TOARGB4444ROW_AVX2
#define HAS_I422TOARGBROW_AVX2
#define HAS_I422TORGB24ROW_AVX2
+#define HAS_I422TORGB565ROW_AVX2
#define HAS_I422TORGBAROW_AVX2
+#define HAS_I444TOARGBROW_AVX2
#define HAS_INTERPOLATEROW_AVX2
#define HAS_J422TOARGBROW_AVX2
#define HAS_MERGEUVROW_AVX2
#define HAS_MIRRORROW_AVX2
#define HAS_NV12TOARGBROW_AVX2
+#define HAS_NV12TORGB565ROW_AVX2
#define HAS_NV21TOARGBROW_AVX2
#define HAS_SPLITUVROW_AVX2
#define HAS_UYVYTOARGBROW_AVX2
@@ -245,12 +250,7 @@ extern "C" {
#define HAS_ARGBTOARGB1555ROW_AVX2
#define HAS_ARGBTOARGB4444ROW_AVX2
#define HAS_ARGBTORGB565ROW_AVX2
-#define HAS_I411TOARGBROW_AVX2
-#define HAS_I422TOARGB1555ROW_AVX2
-#define HAS_I422TOARGB4444ROW_AVX2
-#define HAS_I422TORGB565ROW_AVX2
#define HAS_J400TOARGBROW_AVX2
-#define HAS_NV12TORGB565ROW_AVX2
#define HAS_RGB565TOARGBROW_AVX2
#endif
@@ -264,7 +264,6 @@ extern "C" {
// The following are available on Neon platforms:
#if !defined(LIBYUV_DISABLE_NEON) && \
(defined(__aarch64__) || defined(__ARM_NEON__) || defined(LIBYUV_NEON))
-#define HAS_I422ALPHATOARGBROW_NEON
#define HAS_ABGRTOUVROW_NEON
#define HAS_ABGRTOYROW_NEON
#define HAS_ARGB1555TOARGBROW_NEON
@@ -281,7 +280,6 @@ extern "C" {
#define HAS_ARGBTORGB565DITHERROW_NEON
#define HAS_ARGBTORGB565ROW_NEON
#define HAS_ARGBTOUV411ROW_NEON
-#define HAS_ARGBTOUV422ROW_NEON
#define HAS_ARGBTOUV444ROW_NEON
#define HAS_ARGBTOUVJROW_NEON
#define HAS_ARGBTOUVROW_NEON
@@ -292,6 +290,7 @@ extern "C" {
#define HAS_COPYROW_NEON
#define HAS_I400TOARGBROW_NEON
#define HAS_I411TOARGBROW_NEON
+#define HAS_I422ALPHATOARGBROW_NEON
#define HAS_I422TOARGB1555ROW_NEON
#define HAS_I422TOARGB4444ROW_NEON
#define HAS_I422TOARGBROW_NEON
@@ -357,11 +356,11 @@ extern "C" {
(_MIPS_SIM == _MIPS_SIM_ABI32) && (__mips_isa_rev < 6)
#define HAS_COPYROW_MIPS
#if defined(__mips_dsp) && (__mips_dsp_rev >= 2)
-#define HAS_I422TOARGBROW_MIPS_DSPR2
-#define HAS_INTERPOLATEROW_MIPS_DSPR2
-#define HAS_MIRRORROW_MIPS_DSPR2
-#define HAS_MIRRORUVROW_MIPS_DSPR2
-#define HAS_SPLITUVROW_MIPS_DSPR2
+#define HAS_I422TOARGBROW_DSPR2
+#define HAS_INTERPOLATEROW_DSPR2
+#define HAS_MIRRORROW_DSPR2
+#define HAS_MIRRORUVROW_DSPR2
+#define HAS_SPLITUVROW_DSPR2
#endif
#endif
@@ -648,8 +647,6 @@ void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int width);
void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int width);
void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
int width);
-void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width);
void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
int width);
void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb,
@@ -712,8 +709,8 @@ void ARGB4444ToYRow_Any_NEON(const uint8* src_argb4444, uint8* dst_y,
void ARGBToUVRow_AVX2(const uint8* src_argb, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width);
-void ARGBToUVRow_Any_AVX2(const uint8* src_argb, int src_stride_argb,
- uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVJRow_AVX2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
void ARGBToUVRow_SSSE3(const uint8* src_argb, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width);
void ARGBToUVJRow_SSSE3(const uint8* src_argb, int src_stride_argb,
@@ -724,6 +721,10 @@ void ABGRToUVRow_SSSE3(const uint8* src_abgr, int src_stride_abgr,
uint8* dst_u, uint8* dst_v, int width);
void RGBAToUVRow_SSSE3(const uint8* src_rgba, int src_stride_rgba,
uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVRow_Any_AVX2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVJRow_Any_AVX2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
void ARGBToUVRow_Any_SSSE3(const uint8* src_argb, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width);
void ARGBToUVJRow_Any_SSSE3(const uint8* src_argb, int src_stride_argb,
@@ -736,8 +737,6 @@ void RGBAToUVRow_Any_SSSE3(const uint8* src_rgba, int src_stride_rgba,
uint8* dst_u, uint8* dst_v, int width);
void ARGBToUV444Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
int width);
-void ARGBToUV422Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width);
void ARGBToUV411Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
int width);
void ARGBToUVRow_Any_NEON(const uint8* src_argb, int src_stride_argb,
@@ -788,24 +787,15 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb,
void ARGBToUV444Row_Any_SSSE3(const uint8* src_argb,
uint8* dst_u, uint8* dst_v, int width);
-void ARGBToUV422Row_SSSE3(const uint8* src_argb,
- uint8* dst_u, uint8* dst_v, int width);
-void ARGBToUV422Row_Any_SSSE3(const uint8* src_argb,
- uint8* dst_u, uint8* dst_v, int width);
-
void ARGBToUV444Row_C(const uint8* src_argb,
uint8* dst_u, uint8* dst_v, int width);
-void ARGBToUV422Row_C(const uint8* src_argb,
- uint8* dst_u, uint8* dst_v, int width);
void ARGBToUV411Row_C(const uint8* src_argb,
uint8* dst_u, uint8* dst_v, int width);
-void ARGBToUVJ422Row_C(const uint8* src_argb,
- uint8* dst_u, uint8* dst_v, int width);
void MirrorRow_AVX2(const uint8* src, uint8* dst, int width);
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width);
void MirrorRow_NEON(const uint8* src, uint8* dst, int width);
-void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width);
+void MirrorRow_DSPR2(const uint8* src, uint8* dst, int width);
void MirrorRow_C(const uint8* src, uint8* dst, int width);
void MirrorRow_Any_AVX2(const uint8* src, uint8* dst, int width);
void MirrorRow_Any_SSSE3(const uint8* src, uint8* dst, int width);
@@ -816,10 +806,9 @@ void MirrorUVRow_SSSE3(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
-void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
- int width);
-void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
- int width);
+void MirrorUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
+void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width);
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width);
void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width);
@@ -836,16 +825,16 @@ void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
-void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
- int width);
+void SplitUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
void SplitUVRow_Any_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void SplitUVRow_Any_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
void SplitUVRow_Any_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width);
-void SplitUVRow_Any_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
- int width);
+void SplitUVRow_Any_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
void MergeUVRow_C(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
int width);
@@ -1625,18 +1614,18 @@ void UYVYToARGBRow_Any_NEON(const uint8* src_uyvy,
uint8* dst_argb,
const struct YuvConstants* yuvconstants,
int width);
-void I422ToARGBRow_MIPS_DSPR2(const uint8* src_y,
- const uint8* src_u,
- const uint8* src_v,
- uint8* dst_argb,
- const struct YuvConstants* yuvconstants,
- int width);
-void I422ToARGBRow_MIPS_DSPR2(const uint8* src_y,
- const uint8* src_u,
- const uint8* src_v,
- uint8* dst_argb,
- const struct YuvConstants* yuvconstants,
- int width);
+void I422ToARGBRow_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int width);
+void I422ToARGBRow_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int width);
void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width);
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
@@ -1846,9 +1835,9 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
void InterpolateRow_NEON(uint8* dst_ptr, const uint8* src_ptr,
ptrdiff_t src_stride_ptr, int width,
int source_y_fraction);
-void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
- ptrdiff_t src_stride_ptr, int width,
- int source_y_fraction);
+void InterpolateRow_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
void InterpolateRow_Any_NEON(uint8* dst_ptr, const uint8* src_ptr,
ptrdiff_t src_stride_ptr, int width,
int source_y_fraction);
@@ -1858,9 +1847,9 @@ void InterpolateRow_Any_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
void InterpolateRow_Any_AVX2(uint8* dst_ptr, const uint8* src_ptr,
ptrdiff_t src_stride_ptr, int width,
int source_y_fraction);
-void InterpolateRow_Any_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
- ptrdiff_t src_stride_ptr, int width,
- int source_y_fraction);
+void InterpolateRow_Any_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
void InterpolateRow_16_C(uint16* dst_ptr, const uint16* src_ptr,
ptrdiff_t src_stride_ptr,
diff --git a/chromium/third_party/libyuv/include/libyuv/scale_row.h b/chromium/third_party/libyuv/include/libyuv/scale_row.h
index b52ce40019c..a3b3ede60e3 100644
--- a/chromium/third_party/libyuv/include/libyuv/scale_row.h
+++ b/chromium/third_party/libyuv/include/libyuv/scale_row.h
@@ -90,10 +90,10 @@ extern "C" {
// The following are available on Mips platforms:
#if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \
defined(__mips__) && defined(__mips_dsp) && (__mips_dsp_rev >= 2)
-#define HAS_SCALEROWDOWN2_MIPS_DSPR2
-#define HAS_SCALEROWDOWN4_MIPS_DSPR2
-#define HAS_SCALEROWDOWN34_MIPS_DSPR2
-#define HAS_SCALEROWDOWN38_MIPS_DSPR2
+#define HAS_SCALEROWDOWN2_DSPR2
+#define HAS_SCALEROWDOWN4_DSPR2
+#define HAS_SCALEROWDOWN34_DSPR2
+#define HAS_SCALEROWDOWN38_DSPR2
#endif
// Scale ARGB vertically with bilinear interpolation.
@@ -468,28 +468,26 @@ void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr,
void ScaleFilterCols_Any_NEON(uint8* dst_ptr, const uint8* src_ptr,
int dst_width, int x, int dx);
-
-void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width);
-void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width);
-void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width);
-void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width);
-void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width);
-void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* d, int dst_width);
-void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* d, int dst_width);
-void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width);
-void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst_ptr, int dst_width);
-void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr,
- ptrdiff_t src_stride,
- uint8* dst_ptr, int dst_width);
+void ScaleRowDown2_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown34_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown34_0_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width);
+void ScaleRowDown34_1_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width);
+void ScaleRowDown38_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown38_2_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_3_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/libyuv/include/libyuv/version.h b/chromium/third_party/libyuv/include/libyuv/version.h
index 72958db3a1c..ebea641aeef 100644
--- a/chromium/third_party/libyuv/include/libyuv/version.h
+++ b/chromium/third_party/libyuv/include/libyuv/version.h
@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
-#define LIBYUV_VERSION 1563
+#define LIBYUV_VERSION 1579
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
diff --git a/chromium/third_party/libyuv/libyuv.gyp b/chromium/third_party/libyuv/libyuv.gyp
index b8e1c580d10..44dec09eeca 100644
--- a/chromium/third_party/libyuv/libyuv.gyp
+++ b/chromium/third_party/libyuv/libyuv.gyp
@@ -17,19 +17,6 @@
'GCC_PRECOMPILE_PREFIX_HEADER': 'NO',
},
'variables': {
- 'variables': {
- # Disable use of sysroot for Linux. It's enabled by default in Chromium,
- # but it currently lacks the libudev-dev package.
- # TODO(kjellander): Remove when crbug.com/561584 is fixed.
- 'conditions': [
- ['target_arch=="ia32" or target_arch=="x64"', {
- 'use_sysroot': 0,
- }, {
- 'use_sysroot%': 1,
- }],
- ],
- },
- 'use_sysroot%': '<(use_sysroot)',
'use_system_libjpeg%': 0,
'libyuv_disable_jpeg%': 0,
# 'chromium_code' treats libyuv as internal and increases warning level.
@@ -55,7 +42,7 @@
# Change type to 'shared_library' to build .so or .dll files.
'type': 'static_library',
'variables': {
- # 'optimize': 'max', # enable O2 and ltcg.
+ 'optimize': 'max', # enable O2 and ltcg.
},
# Allows libyuv.a redistributable library without external dependencies.
'standalone_static_library': 1,
diff --git a/chromium/third_party/libyuv/libyuv_test.gyp b/chromium/third_party/libyuv/libyuv_test.gyp
index 5358ffd7653..0b1c825aaed 100644
--- a/chromium/third_party/libyuv/libyuv_test.gyp
+++ b/chromium/third_party/libyuv/libyuv_test.gyp
@@ -9,7 +9,6 @@
{
'variables': {
'libyuv_disable_jpeg%': 0,
- 'libyuv_enable_svn%': 0,
},
'targets': [
{
@@ -28,14 +27,6 @@
'export_dependent_settings': [
'<(DEPTH)/testing/gtest.gyp:gtest',
],
- 'defines': [
- # Enable the following 3 macros to turn off assembly for specified CPU.
- # 'LIBYUV_DISABLE_X86',
- # 'LIBYUV_DISABLE_NEON',
- # 'LIBYUV_DISABLE_MIPS',
- # Enable the following macro to build libyuv as a shared library (dll).
- # 'LIBYUV_USING_SHARED_LIBRARY',
- ],
'sources': [
# headers
'unit_test/unit_test.h',
@@ -54,14 +45,8 @@
'unit_test/scale_test.cc',
'unit_test/unit_test.cc',
'unit_test/video_common_test.cc',
- 'unit_test/version_test.cc',
],
'conditions': [
- [ 'libyuv_enable_svn == 1', {
- 'defines': [
- 'LIBYUV_SVNREVISION="<!(svnversion -n)"',
- ],
- }],
['OS=="linux"', {
'cflags': [
'-fexceptions',
@@ -76,7 +61,15 @@
'xcode_settings': {
'DEBUGGING_SYMBOLS': 'YES',
'DEBUG_INFORMATION_FORMAT' : 'dwarf-with-dsym',
+ # Work around compile issue with isosim.mm, see
+ # https://code.google.com/p/libyuv/issues/detail?id=548 for details.
+ 'WARNING_CFLAGS': [
+ '-Wno-sometimes-uninitialized',
+ ],
},
+ 'cflags': [
+ '-Wno-sometimes-uninitialized',
+ ],
}],
[ 'OS != "ios" and libyuv_disable_jpeg != 1', {
'defines': [
@@ -97,10 +90,24 @@
'defines': [
'LIBYUV_NEON'
],
- }],
+ }],
+ # MemorySanitizer does not support assembly code yet.
+ # http://crbug.com/344505
+ [ 'msan == 1', {
+ 'defines': [
+ 'LIBYUV_DISABLE_X86',
+ ],
+ }],
], # conditions
+ 'defines': [
+ # Enable the following 3 macros to turn off assembly for specified CPU.
+ # 'LIBYUV_DISABLE_X86',
+ # 'LIBYUV_DISABLE_NEON',
+ # 'LIBYUV_DISABLE_MIPS',
+ # Enable the following macro to build libyuv as a shared library (dll).
+ # 'LIBYUV_USING_SHARED_LIBRARY',
+ ],
},
-
{
'target_name': 'compare',
'type': 'executable',
diff --git a/chromium/third_party/libyuv/setup_links.py b/chromium/third_party/libyuv/setup_links.py
index 99b526076c7..8a131b4f211 100755
--- a/chromium/third_party/libyuv/setup_links.py
+++ b/chromium/third_party/libyuv/setup_links.py
@@ -49,7 +49,7 @@ DIRECTORIES = [
'third_party/libjpeg_turbo',
'third_party/libsrtp',
'third_party/libudev',
- 'third_party/libvpx_new',
+ 'third_party/libvpx',
'third_party/libyuv',
'third_party/llvm-build',
'third_party/lss',
@@ -87,6 +87,7 @@ if 'android' in target_os:
'third_party/android_tools',
'third_party/appurify-python',
'third_party/ashmem',
+ 'third_party/catapult',
'third_party/ijar',
'third_party/jsr-305',
'third_party/junit',
@@ -98,7 +99,8 @@ if 'android' in target_os:
'third_party/robolectric',
'tools/android',
'tools/grit',
- 'tools/relocation_packer'
+ 'tools/relocation_packer',
+ 'tools/telemetry',
]
if 'ios' in target_os:
DIRECTORIES.append('third_party/class-dump')
diff --git a/chromium/third_party/libyuv/source/convert.cc b/chromium/third_party/libyuv/source/convert.cc
index 5dc279f8a67..e332bc505c4 100644
--- a/chromium/third_party/libyuv/source/convert.cc
+++ b/chromium/third_party/libyuv/source/convert.cc
@@ -303,14 +303,14 @@ static int X420ToI420(const uint8* src_y,
}
}
#endif
-#if defined(HAS_SPLITUVROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_SPLITUVROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src_uv, 4) && IS_ALIGNED(src_stride_uv, 4) &&
IS_ALIGNED(dst_u, 4) && IS_ALIGNED(dst_stride_u, 4) &&
IS_ALIGNED(dst_v, 4) && IS_ALIGNED(dst_stride_v, 4)) {
- SplitUVRow = SplitUVRow_Any_MIPS_DSPR2;
+ SplitUVRow = SplitUVRow_Any_DSPR2;
if (IS_ALIGNED(halfwidth, 16)) {
- SplitUVRow = SplitUVRow_MIPS_DSPR2;
+ SplitUVRow = SplitUVRow_DSPR2;
}
}
#endif
diff --git a/chromium/third_party/libyuv/source/convert_argb.cc b/chromium/third_party/libyuv/source/convert_argb.cc
index cf3d7228905..e586f7043ce 100644
--- a/chromium/third_party/libyuv/source/convert_argb.cc
+++ b/chromium/third_party/libyuv/source/convert_argb.cc
@@ -92,13 +92,13 @@ static int I420ToARGBMatrix(const uint8* src_y, int src_stride_y,
}
}
#endif
-#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+#if defined(HAS_I422TOARGBROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
- I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ I422ToARGBRow = I422ToARGBRow_DSPR2;
}
#endif
@@ -262,13 +262,13 @@ static int I422ToARGBMatrix(const uint8* src_y, int src_stride_y,
}
}
#endif
-#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+#if defined(HAS_I422TOARGBROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
- I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ I422ToARGBRow = I422ToARGBRow_DSPR2;
}
#endif
@@ -607,13 +607,13 @@ static int I420AlphaToARGBMatrix(const uint8* src_y, int src_stride_y,
}
}
#endif
-#if defined(HAS_I422ALPHATOARGBROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+#if defined(HAS_I422ALPHATOARGBROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
- I422AlphaToARGBRow = I422AlphaToARGBRow_MIPS_DSPR2;
+ I422AlphaToARGBRow = I422AlphaToARGBRow_DSPR2;
}
#endif
#if defined(HAS_ARGBATTENUATEROW_SSSE3)
diff --git a/chromium/third_party/libyuv/source/convert_from.cc b/chromium/third_party/libyuv/source/convert_from.cc
index 9c138d936da..3bc9eb1be45 100644
--- a/chromium/third_party/libyuv/source/convert_from.cc
+++ b/chromium/third_party/libyuv/source/convert_from.cc
@@ -445,7 +445,7 @@ int I420ToNV21(const uint8* src_y, int src_stride_y,
return I420ToNV12(src_y, src_stride_y,
src_v, src_stride_v,
src_u, src_stride_u,
- dst_y, src_stride_y,
+ dst_y, dst_stride_y,
dst_vu, dst_stride_vu,
width, height);
}
@@ -498,13 +498,13 @@ static int I420ToRGBAMatrix(const uint8* src_y, int src_stride_y,
}
}
#endif
-#if defined(HAS_I422TORGBAROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+#if defined(HAS_I422TORGBAROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
IS_ALIGNED(dst_rgba, 4) && IS_ALIGNED(dst_stride_rgba, 4)) {
- I422ToRGBARow = I422ToRGBARow_MIPS_DSPR2;
+ I422ToRGBARow = I422ToRGBARow_DSPR2;
}
#endif
@@ -888,12 +888,12 @@ int I420ToRGB565Dither(const uint8* src_y, int src_stride_y,
}
}
#endif
-#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+#if defined(HAS_I422TOARGBROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2)) {
- I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ I422ToARGBRow = I422ToARGBRow_DSPR2;
}
#endif
#if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
diff --git a/chromium/third_party/libyuv/source/convert_from_argb.cc b/chromium/third_party/libyuv/source/convert_from_argb.cc
index 6796343c0fb..2a8682b7eb4 100644
--- a/chromium/third_party/libyuv/source/convert_from_argb.cc
+++ b/chromium/third_party/libyuv/source/convert_from_argb.cc
@@ -109,13 +109,16 @@ int ARGBToI422(const uint8* src_argb, int src_stride_argb,
uint8* dst_v, int dst_stride_v,
int width, int height) {
int y;
- void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width) = ARGBToUV422Row_C;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
ARGBToYRow_C;
- if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
+ if (!src_argb ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
return -1;
}
+ // Negative height means invert the image.
if (height < 0) {
height = -height;
src_argb = src_argb + (height - 1) * src_stride_argb;
@@ -130,34 +133,22 @@ int ARGBToI422(const uint8* src_argb, int src_stride_argb,
height = 1;
src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
}
-#if defined(HAS_ARGBTOUV422ROW_SSSE3)
- if (TestCpuFlag(kCpuHasSSSE3)) {
- ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUV422Row = ARGBToUV422Row_SSSE3;
- }
- }
-#endif
-#if defined(HAS_ARGBTOUV422ROW_NEON)
- if (TestCpuFlag(kCpuHasNEON)) {
- ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUV422Row = ARGBToUV422Row_NEON;
- }
- }
-#endif
-#if defined(HAS_ARGBTOYROW_SSSE3)
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
ARGBToYRow = ARGBToYRow_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
ARGBToYRow = ARGBToYRow_SSSE3;
}
}
#endif
-#if defined(HAS_ARGBTOYROW_AVX2)
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
ARGBToYRow = ARGBToYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
ARGBToYRow = ARGBToYRow_AVX2;
}
}
@@ -170,9 +161,17 @@ int ARGBToI422(const uint8* src_argb, int src_stride_argb,
}
}
#endif
+#if defined(HAS_ARGBTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVRow = ARGBToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_NEON;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
- ARGBToUV422Row(src_argb, dst_u, dst_v, width);
+ ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
ARGBToYRow(src_argb, dst_y, width);
src_argb += src_stride_argb;
dst_y += dst_stride_y;
@@ -478,8 +477,8 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
uint8* dst_yuy2, int dst_stride_yuy2,
int width, int height) {
int y;
- void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width) = ARGBToUV422Row_C;
+ void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
ARGBToYRow_C;
void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
@@ -502,34 +501,22 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
height = 1;
src_stride_argb = dst_stride_yuy2 = 0;
}
-#if defined(HAS_ARGBTOUV422ROW_SSSE3)
- if (TestCpuFlag(kCpuHasSSSE3)) {
- ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUV422Row = ARGBToUV422Row_SSSE3;
- }
- }
-#endif
-#if defined(HAS_ARGBTOUV422ROW_NEON)
- if (TestCpuFlag(kCpuHasNEON)) {
- ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUV422Row = ARGBToUV422Row_NEON;
- }
- }
-#endif
-#if defined(HAS_ARGBTOYROW_SSSE3)
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
ARGBToYRow = ARGBToYRow_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
ARGBToYRow = ARGBToYRow_SSSE3;
}
}
#endif
-#if defined(HAS_ARGBTOYROW_AVX2)
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
ARGBToYRow = ARGBToYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
ARGBToYRow = ARGBToYRow_AVX2;
}
}
@@ -542,7 +529,14 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
}
}
#endif
-
+#if defined(HAS_ARGBTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVRow = ARGBToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_NEON;
+ }
+ }
+#endif
#if defined(HAS_I422TOYUY2ROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
@@ -567,7 +561,7 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
uint8* row_v = row_u + ((width + 63) & ~63) / 2;
for (y = 0; y < height; ++y) {
- ARGBToUV422Row(src_argb, row_u, row_v, width);
+ ARGBToUVRow(src_argb, 0, row_u, row_v, width);
ARGBToYRow(src_argb, row_y, width);
I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
src_argb += src_stride_argb;
@@ -585,8 +579,8 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
uint8* dst_uyvy, int dst_stride_uyvy,
int width, int height) {
int y;
- void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width) = ARGBToUV422Row_C;
+ void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
ARGBToYRow_C;
void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
@@ -609,34 +603,22 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
height = 1;
src_stride_argb = dst_stride_uyvy = 0;
}
-#if defined(HAS_ARGBTOUV422ROW_SSSE3)
- if (TestCpuFlag(kCpuHasSSSE3)) {
- ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUV422Row = ARGBToUV422Row_SSSE3;
- }
- }
-#endif
-#if defined(HAS_ARGBTOUV422ROW_NEON)
- if (TestCpuFlag(kCpuHasNEON)) {
- ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUV422Row = ARGBToUV422Row_NEON;
- }
- }
-#endif
-#if defined(HAS_ARGBTOYROW_SSSE3)
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
ARGBToYRow = ARGBToYRow_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
ARGBToYRow = ARGBToYRow_SSSE3;
}
}
#endif
-#if defined(HAS_ARGBTOYROW_AVX2)
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
ARGBToYRow = ARGBToYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
ARGBToYRow = ARGBToYRow_AVX2;
}
}
@@ -649,7 +631,14 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
}
}
#endif
-
+#if defined(HAS_ARGBTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVRow = ARGBToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_NEON;
+ }
+ }
+#endif
#if defined(HAS_I422TOUYVYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
@@ -674,7 +663,7 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
uint8* row_v = row_u + ((width + 63) & ~63) / 2;
for (y = 0; y < height; ++y) {
- ARGBToUV422Row(src_argb, row_u, row_v, width);
+ ARGBToUVRow(src_argb, 0, row_u, row_v, width);
ARGBToYRow(src_argb, row_y, width);
I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
src_argb += src_stride_argb;
@@ -1157,21 +1146,24 @@ int ARGBToJ420(const uint8* src_argb, int src_stride_argb,
return 0;
}
-// ARGB little endian (bgra in memory) to J422
+// Convert ARGB to J422. (JPeg full range I422).
LIBYUV_API
int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
- uint8* dst_y, int dst_stride_y,
+ uint8* dst_yj, int dst_stride_yj,
uint8* dst_u, int dst_stride_u,
uint8* dst_v, int dst_stride_v,
int width, int height) {
int y;
- void (*ARGBToUVJ422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width) = ARGBToUVJ422Row_C;
- void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_y, int width) =
+ void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
+ void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
ARGBToYJRow_C;
- if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
+ if (!src_argb ||
+ !dst_yj || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
return -1;
}
+ // Negative height means invert the image.
if (height < 0) {
height = -height;
src_argb = src_argb + (height - 1) * src_stride_argb;
@@ -1179,34 +1171,19 @@ int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
}
// Coalesce rows.
if (src_stride_argb == width * 4 &&
- dst_stride_y == width &&
+ dst_stride_yj == width &&
dst_stride_u * 2 == width &&
dst_stride_v * 2 == width) {
width *= height;
height = 1;
- src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
- }
-#if defined(HAS_ARGBTOUVJ422ROW_SSSE3)
- if (TestCpuFlag(kCpuHasSSSE3)) {
- ARGBToUVJ422Row = ARGBToUVJ422Row_Any_SSSE3;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUVJ422Row = ARGBToUVJ422Row_SSSE3;
- }
+ src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
}
-#endif
-#if defined(HAS_ARGBTOUVJ422ROW_NEON)
- if (TestCpuFlag(kCpuHasNEON)) {
- ARGBToUVJ422Row = ARGBToUVJ422Row_Any_NEON;
- if (IS_ALIGNED(width, 16)) {
- ARGBToUVJ422Row = ARGBToUVJ422Row_NEON;
- }
- }
-#endif
-
-#if defined(HAS_ARGBTOYJROW_SSSE3)
+#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
+ ARGBToUVJRow = ARGBToUVJRow_SSSE3;
ARGBToYJRow = ARGBToYJRow_SSSE3;
}
}
@@ -1227,12 +1204,20 @@ int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
}
}
#endif
+#if defined(HAS_ARGBTOUVJROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVJRow = ARGBToUVJRow_NEON;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
- ARGBToUVJ422Row(src_argb, dst_u, dst_v, width);
- ARGBToYJRow(src_argb, dst_y, width);
+ ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
+ ARGBToYJRow(src_argb, dst_yj, width);
src_argb += src_stride_argb;
- dst_y += dst_stride_y;
+ dst_yj += dst_stride_yj;
dst_u += dst_stride_u;
dst_v += dst_stride_v;
}
diff --git a/chromium/third_party/libyuv/source/cpu_id.cc b/chromium/third_party/libyuv/source/cpu_id.cc
index ff7bdbd92d0..d64d9d56572 100644
--- a/chromium/third_party/libyuv/source/cpu_id.cc
+++ b/chromium/third_party/libyuv/source/cpu_id.cc
@@ -10,12 +10,12 @@
#include "libyuv/cpu_id.h"
-#if defined(_MSC_VER) && !defined(__clang__)
+#if defined(_MSC_VER)
#include <intrin.h> // For __cpuidex()
#endif
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
!defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \
- defined(_MSC_VER) && !defined(__clang__) && (_MSC_FULL_VER >= 160040219)
+ defined(_MSC_VER) && (_MSC_FULL_VER >= 160040219)
#include <immintrin.h> // For _xgetbv()
#endif
@@ -48,7 +48,7 @@ extern "C" {
!defined(__pnacl__) && !defined(__CLR_VER)
LIBYUV_API
void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
-#if defined(_MSC_VER) && !defined(__clang__)
+#if defined(_MSC_VER)
// Visual C version uses intrinsic or inline x86 assembly.
#if (_MSC_FULL_VER >= 160040219)
__cpuidex((int*)(cpu_info), info_eax, info_ecx);
@@ -71,7 +71,7 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
}
#endif
// GCC version uses inline x86 assembly.
-#else // defined(_MSC_VER) && !defined(__clang__)
+#else // defined(_MSC_VER)
uint32 info_ebx, info_edx;
asm volatile (
#if defined( __i386__) && defined(__PIC__)
@@ -89,7 +89,7 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
cpu_info[1] = info_ebx;
cpu_info[2] = info_ecx;
cpu_info[3] = info_edx;
-#endif // defined(_MSC_VER) && !defined(__clang__)
+#endif // defined(_MSC_VER)
}
#else // (defined(_M_IX86) || defined(_M_X64) ...
LIBYUV_API
@@ -251,15 +251,11 @@ int InitCpuFlags(void) {
#endif
#if defined(__mips__) && defined(__linux__)
#if defined(__mips_dspr2)
- cpu_info |= kCpuHasMIPS_DSPR2;
+ cpu_info |= kCpuHasDSPR2;
#endif
cpu_info |= kCpuHasMIPS;
-
- if (getenv("LIBYUV_DISABLE_MIPS")) {
- cpu_info &= ~kCpuHasMIPS;
- }
- if (getenv("LIBYUV_DISABLE_MIPS_DSPR2")) {
- cpu_info &= ~kCpuHasMIPS_DSPR2;
+ if (getenv("LIBYUV_DISABLE_DSPR2")) {
+ cpu_info &= ~kCpuHasDSPR2;
}
#endif
#if defined(__arm__) || defined(__aarch64__)
diff --git a/chromium/third_party/libyuv/source/planar_functions.cc b/chromium/third_party/libyuv/source/planar_functions.cc
index df3041e4956..851c0fea911 100644
--- a/chromium/third_party/libyuv/source/planar_functions.cc
+++ b/chromium/third_party/libyuv/source/planar_functions.cc
@@ -255,11 +255,11 @@ void MirrorPlane(const uint8* src_y, int src_stride_y,
}
#endif
// TODO(fbarchard): Mirror on mips handle unaligned memory.
-#if defined(HAS_MIRRORROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_MIRRORROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(dst_y, 4) && IS_ALIGNED(dst_stride_y, 4)) {
- MirrorRow = MirrorRow_MIPS_DSPR2;
+ MirrorRow = MirrorRow_DSPR2;
}
#endif
@@ -986,13 +986,13 @@ static int I422ToRGBAMatrix(const uint8* src_y, int src_stride_y,
}
}
#endif
-#if defined(HAS_I422TORGBAROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+#if defined(HAS_I422TORGBAROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
IS_ALIGNED(dst_rgba, 4) && IS_ALIGNED(dst_stride_rgba, 4)) {
- I422ToRGBARow = I422ToRGBARow_MIPS_DSPR2;
+ I422ToRGBARow = I422ToRGBARow_DSPR2;
}
#endif
@@ -1906,13 +1906,13 @@ int InterpolatePlane(const uint8* src0, int src_stride0,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src0, 4) && IS_ALIGNED(src_stride0, 4) &&
IS_ALIGNED(src1, 4) && IS_ALIGNED(src_stride1, 4) &&
IS_ALIGNED(dst, 4) && IS_ALIGNED(dst_stride, 4) &&
IS_ALIGNED(width, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
#endif
@@ -2424,6 +2424,9 @@ int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y,
return 0;
}
+// TODO(fbarchard): Consider if width is even Y channel can be split
+// directly. A SplitUVRow_Odd function could copy the remaining chroma.
+
LIBYUV_API
int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2,
uint8* dst_y, int dst_stride_y,
@@ -2498,22 +2501,24 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2,
{
int awidth = halfwidth * 2;
- // 2 rows of uv
- align_buffer_64(rows, awidth * 2);
+ // row of y and 2 rows of uv
+ align_buffer_64(rows, awidth * 3);
for (y = 0; y < height - 1; y += 2) {
// Split Y from UV.
- SplitUVRow(src_yuy2, dst_y, rows, awidth);
- SplitUVRow(src_yuy2 + src_stride_yuy2, dst_y + dst_stride_y,
- rows + awidth, awidth);
- InterpolateRow(dst_uv, rows, awidth, awidth, 128);
+ SplitUVRow(src_yuy2, rows, rows + awidth, awidth);
+ memcpy(dst_y, rows, width);
+ SplitUVRow(src_yuy2 + src_stride_yuy2, rows, rows + awidth * 2, awidth);
+ memcpy(dst_y + dst_stride_y, rows, width);
+ InterpolateRow(dst_uv, rows + awidth, awidth, awidth, 128);
src_yuy2 += src_stride_yuy2 * 2;
dst_y += dst_stride_y * 2;
dst_uv += dst_stride_uv;
}
if (height & 1) {
// Split Y from UV.
- SplitUVRow(src_yuy2, dst_y, dst_uv, awidth);
+ SplitUVRow(src_yuy2, rows, dst_uv, awidth);
+ memcpy(dst_y, rows, width);
}
free_aligned_buffer_64(rows);
}
@@ -2594,22 +2599,24 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy,
{
int awidth = halfwidth * 2;
- // 2 rows of uv
- align_buffer_64(rows, awidth * 2);
+ // row of y and 2 rows of uv
+ align_buffer_64(rows, awidth * 3);
for (y = 0; y < height - 1; y += 2) {
// Split Y from UV.
- SplitUVRow(src_uyvy, rows, dst_y, awidth);
- SplitUVRow(src_uyvy + src_stride_uyvy, rows + awidth,
- dst_y + dst_stride_y, awidth);
- InterpolateRow(dst_uv, rows, awidth, awidth, 128);
+ SplitUVRow(src_uyvy, rows + awidth, rows, awidth);
+ memcpy(dst_y, rows, width);
+ SplitUVRow(src_uyvy + src_stride_uyvy, rows + awidth * 2, rows, awidth);
+ memcpy(dst_y + dst_stride_y, rows, width);
+ InterpolateRow(dst_uv, rows + awidth, awidth, awidth, 128);
src_uyvy += src_stride_uyvy * 2;
dst_y += dst_stride_y * 2;
dst_uv += dst_stride_uv;
}
if (height & 1) {
// Split Y from UV.
- SplitUVRow(src_uyvy, dst_uv, dst_y, awidth);
+ SplitUVRow(src_uyvy, dst_uv, rows, awidth);
+ memcpy(dst_y, rows, width);
}
free_aligned_buffer_64(rows);
}
diff --git a/chromium/third_party/libyuv/source/rotate.cc b/chromium/third_party/libyuv/source/rotate.cc
index 31e04af9c0b..01ea5c40744 100644
--- a/chromium/third_party/libyuv/source/rotate.cc
+++ b/chromium/third_party/libyuv/source/rotate.cc
@@ -49,13 +49,13 @@ void TransposePlane(const uint8* src, int src_stride,
}
}
#endif
-#if defined(HAS_TRANSPOSEWX8_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
+#if defined(HAS_TRANSPOSEWX8_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2)) {
if (IS_ALIGNED(width, 4) &&
IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) {
- TransposeWx8 = TransposeWx8_Fast_MIPS_DSPR2;
+ TransposeWx8 = TransposeWx8_Fast_DSPR2;
} else {
- TransposeWx8 = TransposeWx8_MIPS_DSPR2;
+ TransposeWx8 = TransposeWx8_DSPR2;
}
}
#endif
@@ -134,11 +134,11 @@ void RotatePlane180(const uint8* src, int src_stride,
}
#endif
// TODO(fbarchard): Mirror on mips handle unaligned memory.
-#if defined(HAS_MIRRORROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_MIRRORROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst, 4) && IS_ALIGNED(dst_stride, 4)) {
- MirrorRow = MirrorRow_MIPS_DSPR2;
+ MirrorRow = MirrorRow_DSPR2;
}
#endif
#if defined(HAS_COPYROW_SSE2)
@@ -203,10 +203,10 @@ void TransposeUV(const uint8* src, int src_stride,
}
}
#endif
-#if defined(HAS_TRANSPOSEUVWX8_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 2) &&
+#if defined(HAS_TRANSPOSEUVWX8_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 2) &&
IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) {
- TransposeUVWx8 = TransposeUVWx8_MIPS_DSPR2;
+ TransposeUVWx8 = TransposeUVWx8_DSPR2;
}
#endif
@@ -267,22 +267,22 @@ void RotateUV180(const uint8* src, int src_stride,
uint8* dst_b, int dst_stride_b,
int width, int height) {
int i;
- void (*MirrorRowUV)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) =
+ void (*MirrorUVRow)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) =
MirrorUVRow_C;
#if defined(HAS_MIRRORUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
- MirrorRowUV = MirrorUVRow_NEON;
+ MirrorUVRow = MirrorUVRow_NEON;
}
#endif
-#if defined(HAS_MIRRORROW_UV_SSSE3)
+#if defined(HAS_MIRRORUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) {
- MirrorRowUV = MirrorUVRow_SSSE3;
+ MirrorUVRow = MirrorUVRow_SSSE3;
}
#endif
-#if defined(HAS_MIRRORUVROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_MIRRORUVROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) {
- MirrorRowUV = MirrorUVRow_MIPS_DSPR2;
+ MirrorUVRow = MirrorUVRow_DSPR2;
}
#endif
@@ -290,7 +290,7 @@ void RotateUV180(const uint8* src, int src_stride,
dst_b += dst_stride_b * (height - 1);
for (i = 0; i < height; ++i) {
- MirrorRowUV(src, dst_a, dst_b, width);
+ MirrorUVRow(src, dst_a, dst_b, width);
src += src_stride;
dst_a -= dst_stride_a;
dst_b -= dst_stride_b;
diff --git a/chromium/third_party/libyuv/source/rotate_any.cc b/chromium/third_party/libyuv/source/rotate_any.cc
index d12bad5dc77..31a74c31555 100644
--- a/chromium/third_party/libyuv/source/rotate_any.cc
+++ b/chromium/third_party/libyuv/source/rotate_any.cc
@@ -38,8 +38,8 @@ TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, 7)
#ifdef HAS_TRANSPOSEWX8_FAST_SSSE3
TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15)
#endif
-#ifdef HAS_TRANSPOSEWX8_MIPS_DSPR2
-TANY(TransposeWx8_Any_MIPS_DSPR2, TransposeWx8_MIPS_DSPR2, 7)
+#ifdef HAS_TRANSPOSEWX8_DSPR2
+TANY(TransposeWx8_Any_DSPR2, TransposeWx8_DSPR2, 7)
#endif
#undef TANY
@@ -64,8 +64,8 @@ TUVANY(TransposeUVWx8_Any_NEON, TransposeUVWx8_NEON, 7)
#ifdef HAS_TRANSPOSEUVWX8_SSE2
TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7)
#endif
-#ifdef HAS_TRANSPOSEUVWX8_MIPS_DSPR2
-TUVANY(TransposeUVWx8_Any_MIPS_DSPR2, TransposeUVWx8_MIPS_DSPR2, 7)
+#ifdef HAS_TRANSPOSEUVWX8_DSPR2
+TUVANY(TransposeUVWx8_Any_DSPR2, TransposeUVWx8_DSPR2, 7)
#endif
#undef TUVANY
diff --git a/chromium/third_party/libyuv/source/rotate_mips.cc b/chromium/third_party/libyuv/source/rotate_mips.cc
index efe6bd909e1..23e89fbad4c 100644
--- a/chromium/third_party/libyuv/source/rotate_mips.cc
+++ b/chromium/third_party/libyuv/source/rotate_mips.cc
@@ -22,7 +22,7 @@ extern "C" {
defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \
(_MIPS_SIM == _MIPS_SIM_ABI32)
-void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+void TransposeWx8_DSPR2(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width) {
__asm__ __volatile__ (
".set push \n"
@@ -106,7 +106,7 @@ void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride,
);
}
-void TransposeWx8_Fast_MIPS_DSPR2(const uint8* src, int src_stride,
+void TransposeWx8_Fast_DSPR2(const uint8* src, int src_stride,
uint8* dst, int dst_stride, int width) {
__asm__ __volatile__ (
".set noat \n"
@@ -308,7 +308,7 @@ void TransposeWx8_Fast_MIPS_DSPR2(const uint8* src, int src_stride,
);
}
-void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+void TransposeUVWx8_DSPR2(const uint8* src, int src_stride,
uint8* dst_a, int dst_stride_a,
uint8* dst_b, int dst_stride_b,
int width) {
diff --git a/chromium/third_party/libyuv/source/row_any.cc b/chromium/third_party/libyuv/source/row_any.cc
index 5e5f435a6fe..29b7a343d53 100644
--- a/chromium/third_party/libyuv/source/row_any.cc
+++ b/chromium/third_party/libyuv/source/row_any.cc
@@ -596,8 +596,8 @@ ANY11T(InterpolateRow_Any_SSSE3, InterpolateRow_SSSE3, 1, 1, 15)
#ifdef HAS_INTERPOLATEROW_NEON
ANY11T(InterpolateRow_Any_NEON, InterpolateRow_NEON, 1, 1, 15)
#endif
-#ifdef HAS_INTERPOLATEROW_MIPS_DSPR2
-ANY11T(InterpolateRow_Any_MIPS_DSPR2, InterpolateRow_MIPS_DSPR2, 1, 1, 3)
+#ifdef HAS_INTERPOLATEROW_DSPR2
+ANY11T(InterpolateRow_Any_DSPR2, InterpolateRow_DSPR2, 1, 1, 3)
#endif
#undef ANY11T
@@ -705,8 +705,8 @@ ANY12(SplitUVRow_Any_AVX2, SplitUVRow_AVX2, 0, 2, 0, 31)
#ifdef HAS_SPLITUVROW_NEON
ANY12(SplitUVRow_Any_NEON, SplitUVRow_NEON, 0, 2, 0, 15)
#endif
-#ifdef HAS_SPLITUVROW_MIPS_DSPR2
-ANY12(SplitUVRow_Any_MIPS_DSPR2, SplitUVRow_MIPS_DSPR2, 0, 2, 0, 15)
+#ifdef HAS_SPLITUVROW_DSPR2
+ANY12(SplitUVRow_Any_DSPR2, SplitUVRow_DSPR2, 0, 2, 0, 15)
#endif
#ifdef HAS_ARGBTOUV444ROW_SSSE3
ANY12(ARGBToUV444Row_Any_SSSE3, ARGBToUV444Row_SSSE3, 0, 4, 0, 15)
@@ -715,16 +715,12 @@ ANY12(ARGBToUV444Row_Any_SSSE3, ARGBToUV444Row_SSSE3, 0, 4, 0, 15)
ANY12(YUY2ToUV422Row_Any_AVX2, YUY2ToUV422Row_AVX2, 1, 4, 1, 31)
ANY12(UYVYToUV422Row_Any_AVX2, UYVYToUV422Row_AVX2, 1, 4, 1, 31)
#endif
-#ifdef HAS_ARGBTOUV422ROW_SSSE3
-ANY12(ARGBToUV422Row_Any_SSSE3, ARGBToUV422Row_SSSE3, 0, 4, 1, 15)
-#endif
#ifdef HAS_YUY2TOUV422ROW_SSE2
ANY12(YUY2ToUV422Row_Any_SSE2, YUY2ToUV422Row_SSE2, 1, 4, 1, 15)
ANY12(UYVYToUV422Row_Any_SSE2, UYVYToUV422Row_SSE2, 1, 4, 1, 15)
#endif
#ifdef HAS_YUY2TOUV422ROW_NEON
ANY12(ARGBToUV444Row_Any_NEON, ARGBToUV444Row_NEON, 0, 4, 0, 7)
-ANY12(ARGBToUV422Row_Any_NEON, ARGBToUV422Row_NEON, 0, 4, 1, 15)
ANY12(ARGBToUV411Row_Any_NEON, ARGBToUV411Row_NEON, 0, 4, 2, 31)
ANY12(YUY2ToUV422Row_Any_NEON, YUY2ToUV422Row_NEON, 1, 4, 1, 15)
ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15)
@@ -760,6 +756,9 @@ ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15)
#ifdef HAS_ARGBTOUVROW_AVX2
ANY12S(ARGBToUVRow_Any_AVX2, ARGBToUVRow_AVX2, 0, 4, 31)
#endif
+#ifdef HAS_ARGBTOUVJROW_AVX2
+ANY12S(ARGBToUVJRow_Any_AVX2, ARGBToUVJRow_AVX2, 0, 4, 31)
+#endif
#ifdef HAS_ARGBTOUVROW_SSSE3
ANY12S(ARGBToUVRow_Any_SSSE3, ARGBToUVRow_SSSE3, 0, 4, 15)
ANY12S(ARGBToUVJRow_Any_SSSE3, ARGBToUVJRow_SSSE3, 0, 4, 15)
diff --git a/chromium/third_party/libyuv/source/row_common.cc b/chromium/third_party/libyuv/source/row_common.cc
index c820cdf1f87..2b80d074cea 100644
--- a/chromium/third_party/libyuv/source/row_common.cc
+++ b/chromium/third_party/libyuv/source/row_common.cc
@@ -433,28 +433,6 @@ void NAME ## ToUVJRow_C(const uint8* src_rgb0, int src_stride_rgb, \
MAKEROWYJ(ARGB, 2, 1, 0, 4)
#undef MAKEROWYJ
-void ARGBToUVJ422Row_C(const uint8* src_argb,
- uint8* dst_u, uint8* dst_v, int width) {
- int x;
- for (x = 0; x < width - 1; x += 2) {
- uint8 ab = (src_argb[0] + src_argb[4]) >> 1;
- uint8 ag = (src_argb[1] + src_argb[5]) >> 1;
- uint8 ar = (src_argb[2] + src_argb[6]) >> 1;
- dst_u[0] = RGBToUJ(ar, ag, ab);
- dst_v[0] = RGBToVJ(ar, ag, ab);
- src_argb += 8;
- dst_u += 1;
- dst_v += 1;
- }
- if (width & 1) {
- uint8 ab = src_argb[0];
- uint8 ag = src_argb[1];
- uint8 ar = src_argb[2];
- dst_u[0] = RGBToUJ(ar, ag, ab);
- dst_v[0] = RGBToVJ(ar, ag, ab);
- }
-}
-
void RGB565ToYRow_C(const uint8* src_rgb565, uint8* dst_y, int width) {
int x;
for (x = 0; x < width; ++x) {
@@ -658,28 +636,6 @@ void ARGBToUV444Row_C(const uint8* src_argb,
}
}
-void ARGBToUV422Row_C(const uint8* src_argb,
- uint8* dst_u, uint8* dst_v, int width) {
- int x;
- for (x = 0; x < width - 1; x += 2) {
- uint8 ab = (src_argb[0] + src_argb[4]) >> 1;
- uint8 ag = (src_argb[1] + src_argb[5]) >> 1;
- uint8 ar = (src_argb[2] + src_argb[6]) >> 1;
- dst_u[0] = RGBToU(ar, ag, ab);
- dst_v[0] = RGBToV(ar, ag, ab);
- src_argb += 8;
- dst_u += 1;
- dst_v += 1;
- }
- if (width & 1) {
- uint8 ab = src_argb[0];
- uint8 ag = src_argb[1];
- uint8 ar = src_argb[2];
- dst_u[0] = RGBToU(ar, ag, ab);
- dst_v[0] = RGBToV(ar, ag, ab);
- }
-}
-
void ARGBToUV411Row_C(const uint8* src_argb,
uint8* dst_u, uint8* dst_v, int width) {
int x;
@@ -2539,7 +2495,11 @@ void I422ToRGB565Row_AVX2(const uint8* src_y,
while (width > 0) {
int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth);
+#if defined(HAS_ARGBTORGB565ROW_AVX2)
ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth);
+#else
+ ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth);
+#endif
src_y += twidth;
src_u += twidth / 2;
src_v += twidth / 2;
@@ -2561,7 +2521,11 @@ void I422ToARGB1555Row_AVX2(const uint8* src_y,
while (width > 0) {
int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth);
+#if defined(HAS_ARGBTOARGB1555ROW_AVX2)
ARGBToARGB1555Row_AVX2(row, dst_argb1555, twidth);
+#else
+ ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth);
+#endif
src_y += twidth;
src_u += twidth / 2;
src_v += twidth / 2;
@@ -2583,7 +2547,11 @@ void I422ToARGB4444Row_AVX2(const uint8* src_y,
while (width > 0) {
int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth);
+#if defined(HAS_ARGBTOARGB4444ROW_AVX2)
ARGBToARGB4444Row_AVX2(row, dst_argb4444, twidth);
+#else
+ ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth);
+#endif
src_y += twidth;
src_u += twidth / 2;
src_v += twidth / 2;
@@ -2627,7 +2595,11 @@ void NV12ToRGB565Row_AVX2(const uint8* src_y,
while (width > 0) {
int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
NV12ToARGBRow_AVX2(src_y, src_uv, row, yuvconstants, twidth);
+#if defined(HAS_ARGBTORGB565ROW_AVX2)
ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth);
+#else
+ ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth);
+#endif
src_y += twidth;
src_uv += twidth;
dst_rgb565 += twidth * 2;
diff --git a/chromium/third_party/libyuv/source/row_gcc.cc b/chromium/third_party/libyuv/source/row_gcc.cc
index 80b2a95aa10..d5174516e71 100644
--- a/chromium/third_party/libyuv/source/row_gcc.cc
+++ b/chromium/third_party/libyuv/source/row_gcc.cc
@@ -1023,6 +1023,67 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
}
#endif // HAS_ARGBTOUVROW_AVX2
+#ifdef HAS_ARGBTOUVJROW_AVX2
+void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "vbroadcastf128 %5,%%ymm5 \n"
+ "vbroadcastf128 %6,%%ymm6 \n"
+ "vbroadcastf128 %7,%%ymm7 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "vmovdqu " MEMACCESS2(0x40,0) ",%%ymm2 \n"
+ "vmovdqu " MEMACCESS2(0x60,0) ",%%ymm3 \n"
+ VMEMOPREG(vpavgb,0x00,0,4,1,ymm0,ymm0) // vpavgb (%0,%4,1),%%ymm0,%%ymm0
+ VMEMOPREG(vpavgb,0x20,0,4,1,ymm1,ymm1)
+ VMEMOPREG(vpavgb,0x40,0,4,1,ymm2,ymm2)
+ VMEMOPREG(vpavgb,0x60,0,4,1,ymm3,ymm3)
+ "lea " MEMLEA(0x80,0) ",%0 \n"
+ "vshufps $0x88,%%ymm1,%%ymm0,%%ymm4 \n"
+ "vshufps $0xdd,%%ymm1,%%ymm0,%%ymm0 \n"
+ "vpavgb %%ymm4,%%ymm0,%%ymm0 \n"
+ "vshufps $0x88,%%ymm3,%%ymm2,%%ymm4 \n"
+ "vshufps $0xdd,%%ymm3,%%ymm2,%%ymm2 \n"
+ "vpavgb %%ymm4,%%ymm2,%%ymm2 \n"
+
+ "vpmaddubsw %%ymm7,%%ymm0,%%ymm1 \n"
+ "vpmaddubsw %%ymm7,%%ymm2,%%ymm3 \n"
+ "vpmaddubsw %%ymm6,%%ymm0,%%ymm0 \n"
+ "vpmaddubsw %%ymm6,%%ymm2,%%ymm2 \n"
+ "vphaddw %%ymm3,%%ymm1,%%ymm1 \n"
+ "vphaddw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpaddw %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpaddw %%ymm5,%%ymm1,%%ymm1 \n"
+ "vpsraw $0x8,%%ymm1,%%ymm1 \n"
+ "vpsraw $0x8,%%ymm0,%%ymm0 \n"
+ "vpacksswb %%ymm0,%%ymm1,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpshufb %8,%%ymm0,%%ymm0 \n"
+
+ "vextractf128 $0x0,%%ymm0," MEMACCESS(1) " \n"
+ VEXTOPMEM(vextractf128,1,ymm0,0x0,1,2,1) // vextractf128 $1,%%ymm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_argb)), // %4
+ "m"(kAddUVJ128), // %5
+ "m"(kARGBToVJ), // %6
+ "m"(kARGBToUJ), // %7
+ "m"(kShufARGBToUV_AVX) // %8
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBTOUVJROW_AVX2
+
#ifdef HAS_ARGBTOUVJROW_SSSE3
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width) {
@@ -1144,59 +1205,6 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
}
#endif // HAS_ARGBTOUV444ROW_SSSE3
-#ifdef HAS_ARGBTOUV422ROW_SSSE3
-void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
- uint8* dst_u, uint8* dst_v, int width) {
- asm volatile (
- "movdqa %4,%%xmm3 \n"
- "movdqa %5,%%xmm4 \n"
- "movdqa %6,%%xmm5 \n"
- "sub %1,%2 \n"
- LABELALIGN
- "1: \n"
- "movdqu " MEMACCESS(0) ",%%xmm0 \n"
- "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
- "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
- "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
- "lea " MEMLEA(0x40,0) ",%0 \n"
- "movdqa %%xmm0,%%xmm7 \n"
- "shufps $0x88,%%xmm1,%%xmm0 \n"
- "shufps $0xdd,%%xmm1,%%xmm7 \n"
- "pavgb %%xmm7,%%xmm0 \n"
- "movdqa %%xmm2,%%xmm7 \n"
- "shufps $0x88,%%xmm6,%%xmm2 \n"
- "shufps $0xdd,%%xmm6,%%xmm7 \n"
- "pavgb %%xmm7,%%xmm2 \n"
- "movdqa %%xmm0,%%xmm1 \n"
- "movdqa %%xmm2,%%xmm6 \n"
- "pmaddubsw %%xmm4,%%xmm0 \n"
- "pmaddubsw %%xmm4,%%xmm2 \n"
- "pmaddubsw %%xmm3,%%xmm1 \n"
- "pmaddubsw %%xmm3,%%xmm6 \n"
- "phaddw %%xmm2,%%xmm0 \n"
- "phaddw %%xmm6,%%xmm1 \n"
- "psraw $0x8,%%xmm0 \n"
- "psraw $0x8,%%xmm1 \n"
- "packsswb %%xmm1,%%xmm0 \n"
- "paddb %%xmm5,%%xmm0 \n"
- "movlps %%xmm0," MEMACCESS(1) " \n"
- MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
- "lea " MEMLEA(0x8,1) ",%1 \n"
- "sub $0x10,%3 \n"
- "jg 1b \n"
- : "+r"(src_argb0), // %0
- "+r"(dst_u), // %1
- "+r"(dst_v), // %2
- "+rm"(width) // %3
- : "m"(kARGBToV), // %4
- "m"(kARGBToU), // %5
- "m"(kAddUV128) // %6
- : "memory", "cc", NACL_R14
- "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
- );
-}
-#endif // HAS_ARGBTOUV422ROW_SSSE3
-
void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int width) {
asm volatile (
"movdqa %4,%%xmm5 \n"
@@ -1484,7 +1492,7 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
#if defined(HAS_I422TOARGBROW_SSSE3) || defined(HAS_I422TOARGBROW_AVX2)
-// Read 8 UV from 411
+// Read 8 UV from 444
#define READYUV444 \
"movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(movq, 0x00, [u_buf], [v_buf], 1, xmm1) \
@@ -1528,7 +1536,7 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
#define READYUV411_TEMP \
"movzwl " MEMACCESS([u_buf]) ",%[temp] \n" \
"movd %[temp],%%xmm0 \n" \
- MEMOPARG(movzwl,0x00,[u_buf],[v_buf],1,[temp]) " \n" \
+ MEMOPARG(movzwl, 0x00, [u_buf], [v_buf], 1, [temp]) " \n" \
"movd %[temp],%%xmm1 \n" \
"lea " MEMLEA(0x2, [u_buf]) ",%[u_buf] \n" \
"punpcklbw %%xmm1,%%xmm0 \n" \
@@ -2005,6 +2013,20 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"vpermq $0xd8,%%ymm5,%%ymm5 \n" \
"lea " MEMLEA(0x10, [a_buf]) ",%[a_buf] \n"
+// Read 4 UV from 411, upsample to 16 UV.
+#define READYUV411_AVX2 \
+ "vmovd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
+ MEMOPREG(vmovd, 0x00, [u_buf], [v_buf], 1, xmm1) \
+ "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \
+ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
+ "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
+ "vpunpckldq %%ymm0,%%ymm0,%%ymm0 \n" \
+ "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \
+ "vpermq $0xd8,%%ymm4,%%ymm4 \n" \
+ "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \
+ "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
+
// Read 8 UV from NV12, upsample to 16 UV.
#define READNV12_AVX2 \
"vmovdqu " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
@@ -2071,7 +2093,7 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"vpackuswb %%ymm2,%%ymm2,%%ymm2 \n"
#define YUVTORGB_REGS_AVX2 \
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
-#else// Convert 16 pixels: 16 UV and 16 Y.
+#else // Convert 16 pixels: 16 UV and 16 Y.
#define YUVTORGB_SETUP_AVX2(yuvconstants)
#define YUVTORGB_AVX2(yuvconstants) \
"vpmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%ymm0,%%ymm2 \n" \
@@ -2120,7 +2142,7 @@ void OMITFP I444ToARGBRow_AVX2(const uint8* y_buf,
asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants)
"sub %[u_buf],%[v_buf] \n"
- "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
LABELALIGN
"1: \n"
READYUV444_AVX2
@@ -2141,6 +2163,39 @@ void OMITFP I444ToARGBRow_AVX2(const uint8* y_buf,
}
#endif // HAS_I444TOARGBROW_AVX2
+#ifdef HAS_I411TOARGBROW_AVX2
+// 16 pixels
+// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+void OMITFP I411ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ const struct YuvConstants* yuvconstants,
+ int width) {
+ asm volatile (
+ YUVTORGB_SETUP_AVX2(yuvconstants)
+ "sub %[u_buf],%[v_buf] \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV411_AVX2
+ YUVTORGB_AVX2(yuvconstants)
+ STOREARGB_AVX2
+ "sub $0x10,%[width] \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [yuvconstants]"r"(yuvconstants) // %[yuvconstants]
+ : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_I411TOARGBROW_AVX2
+
#if defined(HAS_I422TOARGBROW_AVX2)
// 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
@@ -2153,7 +2208,7 @@ void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf,
asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants)
"sub %[u_buf],%[v_buf] \n"
- "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
LABELALIGN
"1: \n"
READYUV422_AVX2
@@ -2521,7 +2576,7 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
}
#endif // HAS_MIRRORROW_AVX2
-#ifdef HAS_MIRRORROW_UV_SSSE3
+#ifdef HAS_MIRRORUVROW_SSSE3
// Shuffle table for reversing the bytes of UV channels.
static uvec8 kShuffleMirrorUV = {
14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
@@ -2552,7 +2607,7 @@ void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
"xmm0", "xmm1"
);
}
-#endif // HAS_MIRRORROW_UV_SSSE3
+#endif // HAS_MIRRORUVROW_SSSE3
#ifdef HAS_ARGBMIRRORROW_SSE2
@@ -2953,7 +3008,7 @@ void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
#ifdef HAS_SETROW_X86
void SetRow_X86(uint8* dst, uint8 v8, int width) {
size_t width_tmp = (size_t)(width >> 2);
- const uint32 v32 = v8 * 0x01010101; // Duplicate byte to all bytes.
+ const uint32 v32 = v8 * 0x01010101u; // Duplicate byte to all bytes.
asm volatile (
"rep stosl " MEMSTORESTRING(eax,0) " \n"
: "+D"(dst), // %0
diff --git a/chromium/third_party/libyuv/source/row_mips.cc b/chromium/third_party/libyuv/source/row_mips.cc
index d12cf6ab790..2c55b786b2a 100644
--- a/chromium/third_party/libyuv/source/row_mips.cc
+++ b/chromium/third_party/libyuv/source/row_mips.cc
@@ -375,12 +375,12 @@ void CopyRow_MIPS(const uint8* src, uint8* dst, int count) {
}
#endif // HAS_COPYROW_MIPS
-// MIPS DSPR2 functions
+// DSPR2 functions
#if !defined(LIBYUV_DISABLE_MIPS) && defined(__mips_dsp) && \
(__mips_dsp_rev >= 2) && \
(_MIPS_SIM == _MIPS_SIM_ABI32) && (__mips_isa_rev < 6)
-void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+void SplitUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width) {
__asm__ __volatile__ (
".set push \n"
@@ -446,7 +446,7 @@ void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
);
}
-void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width) {
+void MirrorRow_DSPR2(const uint8* src, uint8* dst, int width) {
__asm__ __volatile__ (
".set push \n"
".set noreorder \n"
@@ -496,7 +496,7 @@ void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width) {
);
}
-void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+void MirrorUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
int width) {
int x = 0;
int y = 0;
@@ -653,7 +653,7 @@ void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
"addu.ph $t1, $t1, $s5 \n"
// TODO(fbarchard): accept yuv conversion constants.
-void I422ToARGBRow_MIPS_DSPR2(const uint8* y_buf,
+void I422ToARGBRow_DSPR2(const uint8* y_buf,
const uint8* u_buf,
const uint8* v_buf,
uint8* rgb_buf,
@@ -716,7 +716,7 @@ void I422ToARGBRow_MIPS_DSPR2(const uint8* y_buf,
}
// Bilinear filter 8x2 -> 8x1
-void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
+void InterpolateRow_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
ptrdiff_t src_stride, int dst_width,
int source_y_fraction) {
int y0_fraction = 256 - source_y_fraction;
diff --git a/chromium/third_party/libyuv/source/row_neon.cc b/chromium/third_party/libyuv/source/row_neon.cc
index 5b4ff3b5a88..91d6aa857b6 100644
--- a/chromium/third_party/libyuv/source/row_neon.cc
+++ b/chromium/third_party/libyuv/source/row_neon.cc
@@ -317,16 +317,11 @@ void I422ToRGB24Row_NEON(const uint8* src_y,
}
#define ARGBTORGB565 \
- "vshr.u8 d20, d20, #3 \n" /* B */ \
- "vshr.u8 d21, d21, #2 \n" /* G */ \
- "vshr.u8 d22, d22, #3 \n" /* R */ \
- "vmovl.u8 q8, d20 \n" /* B */ \
- "vmovl.u8 q9, d21 \n" /* G */ \
- "vmovl.u8 q10, d22 \n" /* R */ \
- "vshl.u16 q9, q9, #5 \n" /* G */ \
- "vshl.u16 q10, q10, #11 \n" /* R */ \
- "vorr q0, q8, q9 \n" /* BG */ \
- "vorr q0, q0, q10 \n" /* BGR */
+ "vshll.u8 q0, d22, #8 \n" /* R */ \
+ "vshll.u8 q8, d21, #8 \n" /* G */ \
+ "vshll.u8 q9, d20, #8 \n" /* B */ \
+ "vsri.16 q0, q8, #5 \n" /* RG */ \
+ "vsri.16 q0, q9, #11 \n" /* RGB */
void I422ToRGB565Row_NEON(const uint8* src_y,
const uint8* src_u,
@@ -359,19 +354,13 @@ void I422ToRGB565Row_NEON(const uint8* src_y,
}
#define ARGBTOARGB1555 \
- "vshr.u8 q10, q10, #3 \n" /* B */ \
- "vshr.u8 d22, d22, #3 \n" /* R */ \
- "vshr.u8 d23, d23, #7 \n" /* A */ \
- "vmovl.u8 q8, d20 \n" /* B */ \
- "vmovl.u8 q9, d21 \n" /* G */ \
- "vmovl.u8 q10, d22 \n" /* R */ \
- "vmovl.u8 q11, d23 \n" /* A */ \
- "vshl.u16 q9, q9, #5 \n" /* G */ \
- "vshl.u16 q10, q10, #10 \n" /* R */ \
- "vshl.u16 q11, q11, #15 \n" /* A */ \
- "vorr q0, q8, q9 \n" /* BG */ \
- "vorr q1, q10, q11 \n" /* RA */ \
- "vorr q0, q0, q1 \n" /* BGRA */
+ "vshll.u8 q0, d23, #8 \n" /* A */ \
+ "vshll.u8 q8, d22, #8 \n" /* R */ \
+ "vshll.u8 q9, d21, #8 \n" /* G */ \
+ "vshll.u8 q10, d20, #8 \n" /* B */ \
+ "vsri.16 q0, q8, #1 \n" /* AR */ \
+ "vsri.16 q0, q9, #6 \n" /* ARG */ \
+ "vsri.16 q0, q10, #11 \n" /* ARGB */
void I422ToARGB1555Row_NEON(const uint8* src_y,
const uint8* src_u,
@@ -1374,55 +1363,6 @@ void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
);
}
-// 16x1 pixels -> 8x1. width is number of argb pixels. e.g. 16.
-void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width) {
- asm volatile (
- "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
- "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
- "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
- "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
- "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
- "vmov.u16 q15, #0x8080 \n" // 128.5
- "1: \n"
- MEMACCESS(0)
- "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
- MEMACCESS(0)
- "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
-
- "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
- "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
- "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
-
- "subs %3, %3, #16 \n" // 16 processed per loop.
- "vmul.s16 q8, q0, q10 \n" // B
- "vmls.s16 q8, q1, q11 \n" // G
- "vmls.s16 q8, q2, q12 \n" // R
- "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned
-
- "vmul.s16 q9, q2, q10 \n" // R
- "vmls.s16 q9, q1, q14 \n" // G
- "vmls.s16 q9, q0, q13 \n" // B
- "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned
-
- "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U
- "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V
-
- MEMACCESS(1)
- "vst1.8 {d0}, [%1]! \n" // store 8 pixels U.
- MEMACCESS(2)
- "vst1.8 {d1}, [%2]! \n" // store 8 pixels V.
- "bgt 1b \n"
- : "+r"(src_argb), // %0
- "+r"(dst_u), // %1
- "+r"(dst_v), // %2
- "+r"(width) // %3
- :
- : "cc", "memory", "q0", "q1", "q2", "q3",
- "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
- );
-}
-
// 32x1 pixels -> 8x1. width is number of argb pixels. e.g. 32.
void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
int width) {
diff --git a/chromium/third_party/libyuv/source/row_neon64.cc b/chromium/third_party/libyuv/source/row_neon64.cc
index 6fe5a108030..ee42af12e37 100644
--- a/chromium/third_party/libyuv/source/row_neon64.cc
+++ b/chromium/third_party/libyuv/source/row_neon64.cc
@@ -323,8 +323,8 @@ void I422ToRGB24Row_NEON(const uint8* src_y,
#define ARGBTORGB565 \
"shll v0.8h, v22.8b, #8 \n" /* R */ \
- "shll v20.8h, v20.8b, #8 \n" /* B */ \
"shll v21.8h, v21.8b, #8 \n" /* G */ \
+ "shll v20.8h, v20.8b, #8 \n" /* B */ \
"sri v0.8h, v21.8h, #5 \n" /* RG */ \
"sri v0.8h, v20.8h, #11 \n" /* RGB */
@@ -363,8 +363,8 @@ void I422ToRGB565Row_NEON(const uint8* src_y,
#define ARGBTOARGB1555 \
"shll v0.8h, v23.8b, #8 \n" /* A */ \
"shll v22.8h, v22.8b, #8 \n" /* R */ \
- "shll v20.8h, v20.8b, #8 \n" /* B */ \
"shll v21.8h, v21.8b, #8 \n" /* G */ \
+ "shll v20.8h, v20.8b, #8 \n" /* B */ \
"sri v0.8h, v22.8h, #1 \n" /* AR */ \
"sri v0.8h, v21.8h, #6 \n" /* ARG */ \
"sri v0.8h, v20.8h, #11 \n" /* ARGB */
@@ -1477,50 +1477,6 @@ void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
"movi v24.8h, #47, lsl #0 \n" /* VG coefficient (-0.7344) / 2 */ \
"movi v25.16b, #0x80 \n" /* 128.5 (0x8080 in 16-bit) */
-// 16x1 pixels -> 8x1. width is number of argb pixels. e.g. 16.
-#ifdef HAS_ARGBTOUV422ROW_NEON
-void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
- int width) {
- asm volatile (
- RGBTOUV_SETUP_REG
- "1: \n"
- MEMACCESS(0)
- "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
-
- "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
- "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
- "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
-
- "subs %w3, %w3, #16 \n" // 16 processed per loop.
- "mul v3.8h, v0.8h, v20.8h \n" // B
- "mls v3.8h, v1.8h, v21.8h \n" // G
- "mls v3.8h, v2.8h, v22.8h \n" // R
- "add v3.8h, v3.8h, v25.8h \n" // +128 -> unsigned
-
- "mul v4.8h, v2.8h, v20.8h \n" // R
- "mls v4.8h, v1.8h, v24.8h \n" // G
- "mls v4.8h, v0.8h, v23.8h \n" // B
- "add v4.8h, v4.8h, v25.8h \n" // +128 -> unsigned
-
- "uqshrn v0.8b, v3.8h, #8 \n" // 16 bit to 8 bit U
- "uqshrn v1.8b, v4.8h, #8 \n" // 16 bit to 8 bit V
-
- MEMACCESS(1)
- "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels U.
- MEMACCESS(2)
- "st1 {v1.8b}, [%2], #8 \n" // store 8 pixels V.
- "b.gt 1b \n"
- : "+r"(src_argb), // %0
- "+r"(dst_u), // %1
- "+r"(dst_v), // %2
- "+r"(width) // %3
- :
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
- "v20", "v21", "v22", "v23", "v24", "v25"
- );
-}
-#endif // HAS_ARGBTOUV422ROW_NEON
-
// 32x1 pixels -> 8x1. width is number of argb pixels. e.g. 32.
#ifdef HAS_ARGBTOUV411ROW_NEON
void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
diff --git a/chromium/third_party/libyuv/source/row_win.cc b/chromium/third_party/libyuv/source/row_win.cc
index 5cb5d1e4f5d..a8c16c3c1ef 100644
--- a/chromium/third_party/libyuv/source/row_win.cc
+++ b/chromium/third_party/libyuv/source/row_win.cc
@@ -1505,7 +1505,7 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
pmaddubsw xmm3, xmm6
phaddw xmm0, xmm2
phaddw xmm1, xmm3
- paddw xmm0, xmm5 // +.5 rounding -> unsigned
+ paddw xmm0, xmm5 // +.5 rounding -> unsigned
paddw xmm1, xmm5
psraw xmm0, 8
psraw xmm1, 8
@@ -1590,6 +1590,73 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
}
#endif // HAS_ARGBTOUVROW_AVX2
+#ifdef HAS_ARGBTOUVJROW_AVX2
+__declspec(naked)
+void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // width
+ vbroadcastf128 ymm5, xmmword ptr kAddUV128
+ vbroadcastf128 ymm6, xmmword ptr kARGBToV
+ vbroadcastf128 ymm7, xmmword ptr kARGBToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 32x2 argb pixels to 16x1 */
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ vmovdqu ymm2, [eax + 64]
+ vmovdqu ymm3, [eax + 96]
+ vpavgb ymm0, ymm0, [eax + esi]
+ vpavgb ymm1, ymm1, [eax + esi + 32]
+ vpavgb ymm2, ymm2, [eax + esi + 64]
+ vpavgb ymm3, ymm3, [eax + esi + 96]
+ lea eax, [eax + 128]
+ vshufps ymm4, ymm0, ymm1, 0x88
+ vshufps ymm0, ymm0, ymm1, 0xdd
+ vpavgb ymm0, ymm0, ymm4 // mutated by vshufps
+ vshufps ymm4, ymm2, ymm3, 0x88
+ vshufps ymm2, ymm2, ymm3, 0xdd
+ vpavgb ymm2, ymm2, ymm4 // mutated by vshufps
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 32 different pixels, its 16 pixels of U and 16 of V
+ vpmaddubsw ymm1, ymm0, ymm7 // U
+ vpmaddubsw ymm3, ymm2, ymm7
+ vpmaddubsw ymm0, ymm0, ymm6 // V
+ vpmaddubsw ymm2, ymm2, ymm6
+ vphaddw ymm1, ymm1, ymm3 // mutates
+ vphaddw ymm0, ymm0, ymm2
+ vpaddw ymm1, ymm1, ymm5 // +.5 rounding -> unsigned
+ vpaddw ymm0, ymm0, ymm5
+ vpsraw ymm1, ymm1, 8
+ vpsraw ymm0, ymm0, 8
+ vpacksswb ymm0, ymm1, ymm0 // mutates
+ vpermq ymm0, ymm0, 0xd8 // For vpacksswb
+ vpshufb ymm0, ymm0, ymmword ptr kShufARGBToUV_AVX // for vshufps/vphaddw
+
+ // step 3 - store 16 U and 16 V values
+ vextractf128 [edx], ymm0, 0 // U
+ vextractf128 [edx + edi], ymm0, 1 // V
+ lea edx, [edx + 16]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTOUVJROW_AVX2
+
__declspec(naked)
void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
uint8* dst_u, uint8* dst_v, int width) {
@@ -1648,64 +1715,6 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
}
__declspec(naked)
-void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
- uint8* dst_u, uint8* dst_v, int width) {
- __asm {
- push edi
- mov eax, [esp + 4 + 4] // src_argb
- mov edx, [esp + 4 + 8] // dst_u
- mov edi, [esp + 4 + 12] // dst_v
- mov ecx, [esp + 4 + 16] // width
- movdqa xmm5, xmmword ptr kAddUV128
- movdqa xmm6, xmmword ptr kARGBToV
- movdqa xmm7, xmmword ptr kARGBToU
- sub edi, edx // stride from u to v
-
- convertloop:
- /* step 1 - subsample 16x2 argb pixels to 8x1 */
- movdqu xmm0, [eax]
- movdqu xmm1, [eax + 16]
- movdqu xmm2, [eax + 32]
- movdqu xmm3, [eax + 48]
- lea eax, [eax + 64]
- movdqa xmm4, xmm0
- shufps xmm0, xmm1, 0x88
- shufps xmm4, xmm1, 0xdd
- pavgb xmm0, xmm4
- movdqa xmm4, xmm2
- shufps xmm2, xmm3, 0x88
- shufps xmm4, xmm3, 0xdd
- pavgb xmm2, xmm4
-
- // step 2 - convert to U and V
- // from here down is very similar to Y code except
- // instead of 16 different pixels, its 8 pixels of U and 8 of V
- movdqa xmm1, xmm0
- movdqa xmm3, xmm2
- pmaddubsw xmm0, xmm7 // U
- pmaddubsw xmm2, xmm7
- pmaddubsw xmm1, xmm6 // V
- pmaddubsw xmm3, xmm6
- phaddw xmm0, xmm2
- phaddw xmm1, xmm3
- psraw xmm0, 8
- psraw xmm1, 8
- packsswb xmm0, xmm1
- paddb xmm0, xmm5 // -> unsigned
-
- // step 3 - store 8 U and 8 V values
- movlps qword ptr [edx], xmm0 // U
- movhps qword ptr [edx + edi], xmm0 // V
- lea edx, [edx + 8]
- sub ecx, 16
- jg convertloop
-
- pop edi
- ret
- }
-}
-
-__declspec(naked)
void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width) {
__asm {
@@ -3154,7 +3163,7 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
}
#endif // HAS_MIRRORROW_AVX2
-#ifdef HAS_MIRRORROW_UV_SSSE3
+#ifdef HAS_MIRRORUVROW_SSSE3
// Shuffle table for reversing the bytes of UV channels.
static const uvec8 kShuffleMirrorUV = {
14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
@@ -3187,7 +3196,7 @@ void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
ret
}
}
-#endif // HAS_MIRRORROW_UV_SSSE3
+#endif // HAS_MIRRORUVROW_SSSE3
#ifdef HAS_ARGBMIRRORROW_SSE2
__declspec(naked)
diff --git a/chromium/third_party/libyuv/source/scale.cc b/chromium/third_party/libyuv/source/scale.cc
index 595314f35cb..36e3fe52813 100644
--- a/chromium/third_party/libyuv/source/scale.cc
+++ b/chromium/third_party/libyuv/source/scale.cc
@@ -85,12 +85,12 @@ static void ScalePlaneDown2(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_SCALEROWDOWN2_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_ptr, 4) &&
+#if defined(HAS_SCALEROWDOWN2_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_ptr, 4) &&
IS_ALIGNED(src_stride, 4) && IS_ALIGNED(row_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
ScaleRowDown2 = filtering ?
- ScaleRowDown2Box_MIPS_DSPR2 : ScaleRowDown2_MIPS_DSPR2;
+ ScaleRowDown2Box_DSPR2 : ScaleRowDown2_DSPR2;
}
#endif
@@ -135,12 +135,12 @@ static void ScalePlaneDown2_16(int src_width, int src_height,
ScaleRowDown2Box_16_SSE2);
}
#endif
-#if defined(HAS_SCALEROWDOWN2_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_ptr, 4) &&
+#if defined(HAS_SCALEROWDOWN2_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_ptr, 4) &&
IS_ALIGNED(src_stride, 4) && IS_ALIGNED(row_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
ScaleRowDown2 = filtering ?
- ScaleRowDown2Box_16_MIPS_DSPR2 : ScaleRowDown2_16_MIPS_DSPR2;
+ ScaleRowDown2Box_16_DSPR2 : ScaleRowDown2_16_DSPR2;
}
#endif
@@ -200,12 +200,12 @@ static void ScalePlaneDown4(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_SCALEROWDOWN4_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(row_stride, 4) &&
+#if defined(HAS_SCALEROWDOWN4_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(row_stride, 4) &&
IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
ScaleRowDown4 = filtering ?
- ScaleRowDown4Box_MIPS_DSPR2 : ScaleRowDown4_MIPS_DSPR2;
+ ScaleRowDown4Box_DSPR2 : ScaleRowDown4_DSPR2;
}
#endif
@@ -245,12 +245,12 @@ static void ScalePlaneDown4_16(int src_width, int src_height,
ScaleRowDown4_16_SSE2;
}
#endif
-#if defined(HAS_SCALEROWDOWN4_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(row_stride, 4) &&
+#if defined(HAS_SCALEROWDOWN4_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(row_stride, 4) &&
IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
ScaleRowDown4 = filtering ?
- ScaleRowDown4Box_16_MIPS_DSPR2 : ScaleRowDown4_16_MIPS_DSPR2;
+ ScaleRowDown4Box_16_DSPR2 : ScaleRowDown4_16_DSPR2;
}
#endif
@@ -325,16 +325,16 @@ static void ScalePlaneDown34(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_SCALEROWDOWN34_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 24 == 0) &&
+#if defined(HAS_SCALEROWDOWN34_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 24 == 0) &&
IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
if (!filtering) {
- ScaleRowDown34_0 = ScaleRowDown34_MIPS_DSPR2;
- ScaleRowDown34_1 = ScaleRowDown34_MIPS_DSPR2;
+ ScaleRowDown34_0 = ScaleRowDown34_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_DSPR2;
} else {
- ScaleRowDown34_0 = ScaleRowDown34_0_Box_MIPS_DSPR2;
- ScaleRowDown34_1 = ScaleRowDown34_1_Box_MIPS_DSPR2;
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_DSPR2;
}
}
#endif
@@ -404,16 +404,16 @@ static void ScalePlaneDown34_16(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_SCALEROWDOWN34_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 24 == 0) &&
+#if defined(HAS_SCALEROWDOWN34_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 24 == 0) &&
IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
if (!filtering) {
- ScaleRowDown34_0 = ScaleRowDown34_16_MIPS_DSPR2;
- ScaleRowDown34_1 = ScaleRowDown34_16_MIPS_DSPR2;
+ ScaleRowDown34_0 = ScaleRowDown34_16_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_16_DSPR2;
} else {
- ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_MIPS_DSPR2;
- ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_MIPS_DSPR2;
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_DSPR2;
}
}
#endif
@@ -517,16 +517,16 @@ static void ScalePlaneDown38(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_SCALEROWDOWN38_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 12 == 0) &&
+#if defined(HAS_SCALEROWDOWN38_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 12 == 0) &&
IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
if (!filtering) {
- ScaleRowDown38_3 = ScaleRowDown38_MIPS_DSPR2;
- ScaleRowDown38_2 = ScaleRowDown38_MIPS_DSPR2;
+ ScaleRowDown38_3 = ScaleRowDown38_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_DSPR2;
} else {
- ScaleRowDown38_3 = ScaleRowDown38_3_Box_MIPS_DSPR2;
- ScaleRowDown38_2 = ScaleRowDown38_2_Box_MIPS_DSPR2;
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_DSPR2;
}
}
#endif
@@ -595,16 +595,16 @@ static void ScalePlaneDown38_16(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_SCALEROWDOWN38_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 12 == 0) &&
+#if defined(HAS_SCALEROWDOWN38_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 12 == 0) &&
IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
if (!filtering) {
- ScaleRowDown38_3 = ScaleRowDown38_16_MIPS_DSPR2;
- ScaleRowDown38_2 = ScaleRowDown38_16_MIPS_DSPR2;
+ ScaleRowDown38_3 = ScaleRowDown38_16_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_16_DSPR2;
} else {
- ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_MIPS_DSPR2;
- ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_MIPS_DSPR2;
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_DSPR2;
}
}
#endif
@@ -659,7 +659,6 @@ static void ScaleAddCols2_C(int dst_width, int boxheight, int x, int dx,
int i;
int scaletbl[2];
int minboxwidth = dx >> 16;
- int* scaleptr = scaletbl - minboxwidth;
int boxwidth;
scaletbl[0] = 65536 / (MIN1(minboxwidth) * boxheight);
scaletbl[1] = 65536 / (MIN1(minboxwidth + 1) * boxheight);
@@ -667,7 +666,8 @@ static void ScaleAddCols2_C(int dst_width, int boxheight, int x, int dx,
int ix = x >> 16;
x += dx;
boxwidth = MIN1((x >> 16) - ix);
- *dst_ptr++ = SumPixels(boxwidth, src_ptr + ix) * scaleptr[boxwidth] >> 16;
+ *dst_ptr++ = SumPixels(boxwidth, src_ptr + ix) *
+ scaletbl[boxwidth - minboxwidth] >> 16;
}
}
@@ -676,7 +676,6 @@ static void ScaleAddCols2_16_C(int dst_width, int boxheight, int x, int dx,
int i;
int scaletbl[2];
int minboxwidth = dx >> 16;
- int* scaleptr = scaletbl - minboxwidth;
int boxwidth;
scaletbl[0] = 65536 / (MIN1(minboxwidth) * boxheight);
scaletbl[1] = 65536 / (MIN1(minboxwidth + 1) * boxheight);
@@ -684,8 +683,8 @@ static void ScaleAddCols2_16_C(int dst_width, int boxheight, int x, int dx,
int ix = x >> 16;
x += dx;
boxwidth = MIN1((x >> 16) - ix);
- *dst_ptr++ =
- SumPixels_16(boxwidth, src_ptr + ix) * scaleptr[boxwidth] >> 16;
+ *dst_ptr++ = SumPixels_16(boxwidth, src_ptr + ix) *
+ scaletbl[boxwidth - minboxwidth] >> 16;
}
}
@@ -899,11 +898,11 @@ void ScalePlaneBilinearDown(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
- InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2)) {
+ InterpolateRow = InterpolateRow_Any_DSPR2;
if (IS_ALIGNED(src_width, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
}
#endif
@@ -1003,11 +1002,11 @@ void ScalePlaneBilinearDown_16(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
- InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2;
+#if defined(HAS_INTERPOLATEROW_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2)) {
+ InterpolateRow = InterpolateRow_Any_16_DSPR2;
if (IS_ALIGNED(src_width, 4)) {
- InterpolateRow = InterpolateRow_16_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_16_DSPR2;
}
}
#endif
@@ -1088,11 +1087,11 @@ void ScalePlaneBilinearUp(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
- InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2)) {
+ InterpolateRow = InterpolateRow_Any_DSPR2;
if (IS_ALIGNED(dst_width, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
}
#endif
@@ -1227,11 +1226,11 @@ void ScalePlaneBilinearUp_16(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
- InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2;
+#if defined(HAS_INTERPOLATEROW_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2)) {
+ InterpolateRow = InterpolateRow_Any_16_DSPR2;
if (IS_ALIGNED(dst_width, 4)) {
- InterpolateRow = InterpolateRow_16_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_16_DSPR2;
}
}
#endif
diff --git a/chromium/third_party/libyuv/source/scale_argb.cc b/chromium/third_party/libyuv/source/scale_argb.cc
index adddf9db5a4..17f51ae9bf8 100644
--- a/chromium/third_party/libyuv/source/scale_argb.cc
+++ b/chromium/third_party/libyuv/source/scale_argb.cc
@@ -234,12 +234,12 @@ static void ScaleARGBBilinearDown(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4)) {
- InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_Any_DSPR2;
if (IS_ALIGNED(clip_src_width, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
}
#endif
@@ -324,10 +324,10 @@ static void ScaleARGBBilinearUp(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
#endif
if (src_width >= 32768) {
@@ -465,13 +465,13 @@ static void ScaleYUVToARGBBilinearUp(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_width, 4) &&
+#if defined(HAS_I422TOARGBROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
- I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ I422ToARGBRow = I422ToARGBRow_DSPR2;
}
#endif
@@ -502,10 +502,10 @@ static void ScaleYUVToARGBBilinearUp(int src_width, int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
#endif
@@ -835,7 +835,6 @@ int YUVToARGBScaleClip(const uint8* src_y, int src_stride_y,
int dst_width, int dst_height,
int clip_x, int clip_y, int clip_width, int clip_height,
enum FilterMode filtering) {
-
uint8* argb_buffer = (uint8*)malloc(src_width * src_height * 4);
int r;
I420ToARGB(src_y, src_stride_y,
diff --git a/chromium/third_party/libyuv/source/scale_common.cc b/chromium/third_party/libyuv/source/scale_common.cc
index 30ff18c5075..d3992df2e6a 100644
--- a/chromium/third_party/libyuv/source/scale_common.cc
+++ b/chromium/third_party/libyuv/source/scale_common.cc
@@ -922,13 +922,13 @@ void ScalePlaneVertical(int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_INTERPOLATEROW_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
- InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_Any_DSPR2;
if (IS_ALIGNED(dst_width_bytes, 4)) {
- InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_DSPR2;
}
}
#endif
@@ -996,13 +996,13 @@ void ScalePlaneVertical_16(int src_height,
}
}
#endif
-#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2)
- if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+#if defined(HAS_INTERPOLATEROW_16_DSPR2)
+ if (TestCpuFlag(kCpuHasDSPR2) &&
IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4) &&
IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
- InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_Any_16_DSPR2;
if (IS_ALIGNED(dst_width_bytes, 4)) {
- InterpolateRow = InterpolateRow_16_MIPS_DSPR2;
+ InterpolateRow = InterpolateRow_16_DSPR2;
}
}
#endif
diff --git a/chromium/third_party/libyuv/source/scale_mips.cc b/chromium/third_party/libyuv/source/scale_mips.cc
index 2298a74b956..ae953073fa8 100644
--- a/chromium/third_party/libyuv/source/scale_mips.cc
+++ b/chromium/third_party/libyuv/source/scale_mips.cc
@@ -21,8 +21,8 @@ extern "C" {
defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \
(_MIPS_SIM == _MIPS_SIM_ABI32)
-void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width) {
+void ScaleRowDown2_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
__asm__ __volatile__(
".set push \n"
".set noreorder \n"
@@ -77,8 +77,8 @@ void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width) {
+void ScaleRowDown2Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
const uint8* t = src_ptr + src_stride;
__asm__ __volatile__ (
@@ -176,8 +176,8 @@ void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width) {
+void ScaleRowDown4_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
__asm__ __volatile__ (
".set push \n"
".set noreorder \n"
@@ -231,8 +231,8 @@ void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width) {
+void ScaleRowDown4Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
intptr_t stride = src_stride;
const uint8* s1 = src_ptr + stride;
const uint8* s2 = s1 + stride;
@@ -310,8 +310,8 @@ void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width) {
+void ScaleRowDown34_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
__asm__ __volatile__ (
".set push \n"
".set noreorder \n"
@@ -356,8 +356,8 @@ void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* d, int dst_width) {
+void ScaleRowDown34_0_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width) {
__asm__ __volatile__ (
".set push \n"
".set noreorder \n"
@@ -412,8 +412,8 @@ void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* d, int dst_width) {
+void ScaleRowDown34_1_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width) {
__asm__ __volatile__ (
".set push \n"
".set noreorder \n"
@@ -464,8 +464,8 @@ void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst, int dst_width) {
+void ScaleRowDown38_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
__asm__ __volatile__ (
".set push \n"
".set noreorder \n"
@@ -510,8 +510,8 @@ void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
- uint8* dst_ptr, int dst_width) {
+void ScaleRowDown38_2_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
intptr_t stride = src_stride;
const uint8* t = src_ptr + stride;
const int c = 0x2AAA;
@@ -563,9 +563,9 @@ void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
);
}
-void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr,
- ptrdiff_t src_stride,
- uint8* dst_ptr, int dst_width) {
+void ScaleRowDown38_3_Box_DSPR2(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
intptr_t stride = src_stride;
const uint8* s1 = src_ptr + stride;
stride += stride;
diff --git a/chromium/third_party/libyuv/source/scale_win.cc b/chromium/third_party/libyuv/source/scale_win.cc
index 5ab4fa0ccc2..21b1ed923fa 100644
--- a/chromium/third_party/libyuv/source/scale_win.cc
+++ b/chromium/third_party/libyuv/source/scale_win.cc
@@ -289,7 +289,7 @@ void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
vpmaddubsw ymm3, ymm3, ymm4
vpaddw ymm0, ymm0, ymm2 // vertical add
vpaddw ymm1, ymm1, ymm3
- vpsrlw ymm0, ymm0, 1
+ vpsrlw ymm0, ymm0, 1 // (x + 2) / 4 = (x / 2 + 1) / 2
vpsrlw ymm1, ymm1, 1
vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2
vpavgw ymm1, ymm1, ymm5
diff --git a/chromium/third_party/libyuv/third_party/gflags/BUILD.gn b/chromium/third_party/libyuv/third_party/gflags/BUILD.gn
index 2b8d9c8d2a6..913c5587541 100644
--- a/chromium/third_party/libyuv/third_party/gflags/BUILD.gn
+++ b/chromium/third_party/libyuv/third_party/gflags/BUILD.gn
@@ -16,7 +16,7 @@ if (is_win) {
}
config("gflags_config") {
- include_dirs = [
+ include_dirs = [
"$gflags_gen_arch_root/include", # For configured files.
"src", # For everything else.
]
@@ -28,6 +28,13 @@ config("gflags_config") {
"GFLAGS_DLL_DECLARE_FLAG=",
"GFLAGS_DLL_DEFINE_FLAG=",
]
+
+ # GN orders flags on a target before flags from configs. The default config
+ # adds -Wall, and this flag have to be after -Wall -- so they need to
+ # come from a config and can't be on the target directly.
+ if (is_clang) {
+ cflags = [ "-Wno-unused-local-typedef" ]
+ }
}
source_set("gflags") {
@@ -45,9 +52,7 @@ source_set("gflags") {
]
}
- include_dirs = [
- "$gflags_gen_arch_root/include/private", # For config.h
- ]
+ include_dirs = [ "$gflags_gen_arch_root/include/private" ] # For config.h
public_configs = [ ":gflags_config" ]
@@ -60,3 +65,4 @@ source_set("gflags") {
configs -= [ "//build/config/clang:extra_warnings" ]
}
}
+
diff --git a/chromium/third_party/libyuv/third_party/gflags/gflags.gyp b/chromium/third_party/libyuv/third_party/gflags/gflags.gyp
index 57567fa3ba0..7ce3f80f6cb 100644
--- a/chromium/third_party/libyuv/third_party/gflags/gflags.gyp
+++ b/chromium/third_party/libyuv/third_party/gflags/gflags.gyp
@@ -76,8 +76,10 @@
},
}],
['clang==1', {
+ 'cflags': ['-Wno-unused-local-typedef',],
'cflags!': ['-Wheader-hygiene',],
'xcode_settings': {
+ 'WARNING_CFLAGS': ['-Wno-unused-local-typedef',],
'WARNING_CFLAGS!': ['-Wheader-hygiene',],
},
}],
@@ -85,3 +87,4 @@
},
],
}
+
diff --git a/chromium/third_party/libyuv/tools/OWNERS b/chromium/third_party/libyuv/tools/OWNERS
new file mode 100644
index 00000000000..aca046d45e3
--- /dev/null
+++ b/chromium/third_party/libyuv/tools/OWNERS
@@ -0,0 +1 @@
+kjellander@chromium.org
diff --git a/chromium/third_party/libyuv/tools/msan/OWNERS b/chromium/third_party/libyuv/tools/msan/OWNERS
new file mode 100644
index 00000000000..60351e7ea2a
--- /dev/null
+++ b/chromium/third_party/libyuv/tools/msan/OWNERS
@@ -0,0 +1,3 @@
+pbos@chromium.org
+kjellander@chromium.org
+
diff --git a/chromium/third_party/libyuv/tools/msan/blacklist.txt b/chromium/third_party/libyuv/tools/msan/blacklist.txt
new file mode 100644
index 00000000000..8b5e42a7b3d
--- /dev/null
+++ b/chromium/third_party/libyuv/tools/msan/blacklist.txt
@@ -0,0 +1,9 @@
+# The rules in this file are only applied at compile time.
+# Because the Chrome buildsystem does not automatically touch the files
+# mentioned here, changing this file requires clobbering all MSan bots.
+#
+# Please think twice before you add or remove these rules.
+
+# This is a stripped down copy of Chromium's blacklist.txt, to enable
+# adding libyuv-specific blacklist entries.
+
diff --git a/chromium/third_party/libyuv/tools/ubsan/OWNERS b/chromium/third_party/libyuv/tools/ubsan/OWNERS
new file mode 100644
index 00000000000..b608519abf6
--- /dev/null
+++ b/chromium/third_party/libyuv/tools/ubsan/OWNERS
@@ -0,0 +1,4 @@
+pbos@webrtc.org
+kjellander@webrtc.org
+fbarchard@chromium.org
+
diff --git a/chromium/third_party/libyuv/tools/ubsan/blacklist.txt b/chromium/third_party/libyuv/tools/ubsan/blacklist.txt
new file mode 100644
index 00000000000..8bcb29073bc
--- /dev/null
+++ b/chromium/third_party/libyuv/tools/ubsan/blacklist.txt
@@ -0,0 +1,15 @@
+#############################################################################
+# UBSan blacklist.
+# Please think twice before you add or remove these rules.
+
+# This is a stripped down copy of Chromium's blacklist.txt, to enable
+# adding WebRTC-specific blacklist entries.
+
+#############################################################################
+# YASM does some funny things that UBsan doesn't like.
+# https://crbug.com/489901
+src:*/third_party/yasm/*
+
+#############################################################################
+# Ignore system libraries.
+src:*/usr/*
diff --git a/chromium/third_party/libyuv/tools/ubsan/vptr_blacklist.txt b/chromium/third_party/libyuv/tools/ubsan/vptr_blacklist.txt
new file mode 100644
index 00000000000..8ed070c05d2
--- /dev/null
+++ b/chromium/third_party/libyuv/tools/ubsan/vptr_blacklist.txt
@@ -0,0 +1,21 @@
+#############################################################################
+# UBSan vptr blacklist.
+# Function and type based blacklisting use a mangled name, and it is especially
+# tricky to represent C++ types. For now, any possible changes by name manglings
+# are simply represented as wildcard expressions of regexp, and thus it might be
+# over-blacklisted.
+#
+# Please think twice before you add or remove these rules.
+#
+# This is a stripped down copy of Chromium's vptr_blacklist.txt, to enable
+# adding libyuv-specific blacklist entries.
+
+#############################################################################
+# Using raw pointer values.
+#
+# A raw pointer value (16) is used to infer the field offset by
+# GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET.
+
+# Example:
+# src:*/third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc
+
diff --git a/chromium/third_party/libyuv/unit_test/color_test.cc b/chromium/third_party/libyuv/unit_test/color_test.cc
index 8bc324dcdd1..555413f79a4 100644
--- a/chromium/third_party/libyuv/unit_test/color_test.cc
+++ b/chromium/third_party/libyuv/unit_test/color_test.cc
@@ -81,7 +81,19 @@ namespace libyuv {
} \
p += HN; \
} \
- \
+ if ((benchmark_height_ & 1) && HS == 2) { \
+ for (int x = 0; x < benchmark_width_ - 1; x += 2) { \
+ uint8 r = static_cast<uint8>(fastrand()); \
+ p[0] = r; \
+ p[1] = r; \
+ p += 2; \
+ } \
+ if (benchmark_width_ & 1) { \
+ uint8 r = static_cast<uint8>(fastrand()); \
+ p[0] = r; \
+ p += 1; \
+ } \
+ } \
/* Start with YUV converted to ARGB. */ \
YUVTOARGB(orig_y, benchmark_width_, \
orig_u, (benchmark_width_ + 1) / 2, \
diff --git a/chromium/third_party/libyuv/unit_test/convert_test.cc b/chromium/third_party/libyuv/unit_test/convert_test.cc
index 1d736bdfed0..c4d264a48eb 100644
--- a/chromium/third_party/libyuv/unit_test/convert_test.cc
+++ b/chromium/third_party/libyuv/unit_test/convert_test.cc
@@ -766,8 +766,10 @@ TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2, 4)
// arm version subsamples by summing 4 pixels then multiplying by matrix with
// 4x smaller coefficients which are rounded to nearest integer.
TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 4)
+TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 4)
#else
TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 0)
+TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 0)
#endif
TESTATOPLANAR(BGRA, 4, 1, I420, 2, 2, 4)
TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2, 4)
@@ -788,12 +790,12 @@ TESTATOPLANAR(UYVY, 2, 1, I422, 2, 1, 2)
TESTATOPLANAR(I400, 1, 1, I420, 2, 2, 2)
TESTATOPLANAR(J400, 1, 1, J420, 2, 2, 2)
-#define TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
- W1280, N, NEG, OFF) \
+#define TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y,\
+ W1280, N, NEG, OFF) \
TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
const int kHeight = benchmark_height_; \
- const int kStride = (kWidth * 8 * BPP_A + 7) / 8; \
+ const int kStride = SUBSAMPLE(kWidth, SUB_A) * BPP_A; \
const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
align_buffer_64(src_argb, kStride * kHeight + OFF); \
align_buffer_64(dst_y_c, kWidth * kHeight); \
@@ -847,20 +849,20 @@ TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
free_aligned_buffer_64(src_argb); \
}
-#define TESTATOBIPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
- TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
+#define TESTATOBIPLANAR(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
+ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
benchmark_width_ - 4, _Any, +, 0) \
- TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
+ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
benchmark_width_, _Unaligned, +, 1) \
- TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
+ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
benchmark_width_, _Invert, -, 0) \
- TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
+ TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
benchmark_width_, _Opt, +, 0)
-TESTATOBIPLANAR(ARGB, 4, NV12, 2, 2)
-TESTATOBIPLANAR(ARGB, 4, NV21, 2, 2)
-TESTATOBIPLANAR(YUY2, 2, NV12, 2, 2)
-TESTATOBIPLANAR(UYVY, 2, NV12, 2, 2)
+TESTATOBIPLANAR(ARGB, 1, 4, NV12, 2, 2)
+TESTATOBIPLANAR(ARGB, 1, 4, NV21, 2, 2)
+TESTATOBIPLANAR(YUY2, 2, 4, NV12, 2, 2)
+TESTATOBIPLANAR(UYVY, 2, 4, NV12, 2, 2)
#define TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
@@ -1346,7 +1348,7 @@ TEST_F(LibYUVConvertTest, MJPGToARGB) {
#endif // HAVE_JPEG
-TEST_F(LibYUVConvertTest, CropNV12) {
+TEST_F(LibYUVConvertTest, NV12Crop) {
const int SUBSAMP_X = 2;
const int SUBSAMP_Y = 2;
const int kWidth = benchmark_width_;
@@ -1682,12 +1684,12 @@ TEST_F(LibYUVConvertTest, NAME) { \
TESTPTOB(TestYUY2ToNV12, YUY2ToI420, YUY2ToNV12)
TESTPTOB(TestUYVYToNV12, UYVYToI420, UYVYToNV12)
-#define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+#define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
W1280, N, NEG, OFF, FMT_C, BPP_C) \
TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
const int kHeight = benchmark_height_; \
- const int kStrideB = kWidth * BPP_B; \
+ const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
@@ -1735,58 +1737,56 @@ TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
free_aligned_buffer_64(dst_argb_bc); \
}
-#define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+#define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
FMT_C, BPP_C) \
- TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C) \
- TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C) \
- TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \
- TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
-TESTPLANARTOE(I420, 2, 2, ARGB, 4, ABGR, 4)
-TESTPLANARTOE(J420, 2, 2, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(J420, 2, 2, ABGR, 4, ARGB, 4)
-TESTPLANARTOE(H420, 2, 2, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(H420, 2, 2, ABGR, 4, ARGB, 4)
-TESTPLANARTOE(I420, 2, 2, BGRA, 4, ARGB, 4)
-TESTPLANARTOE(I420, 2, 2, ABGR, 4, ARGB, 4)
-TESTPLANARTOE(I420, 2, 2, RGBA, 4, ARGB, 4)
-TESTPLANARTOE(I420, 2, 2, RGB24, 3, ARGB, 4)
-TESTPLANARTOE(I420, 2, 2, RAW, 3, RGB24, 3)
-TESTPLANARTOE(I420, 2, 2, RGB24, 3, RAW, 3)
-TESTPLANARTOE(I420, 2, 2, ARGB, 4, RAW, 3)
-TESTPLANARTOE(I420, 2, 2, RAW, 3, ARGB, 4)
-TESTPLANARTOE(I420, 2, 2, ARGB, 4, RGB565, 2)
-TESTPLANARTOE(I420, 2, 2, ARGB, 4, ARGB1555, 2)
-TESTPLANARTOE(I420, 2, 2, ARGB, 4, ARGB4444, 2)
-TESTPLANARTOE(I422, 2, 1, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(J422, 2, 1, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(J422, 2, 1, ABGR, 4, ARGB, 4)
-TESTPLANARTOE(H422, 2, 1, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(H422, 2, 1, ABGR, 4, ARGB, 4)
-TESTPLANARTOE(I422, 2, 1, BGRA, 4, ARGB, 4)
-TESTPLANARTOE(I422, 2, 1, ABGR, 4, ARGB, 4)
-TESTPLANARTOE(I422, 2, 1, RGBA, 4, ARGB, 4)
-TESTPLANARTOE(I411, 4, 1, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(I444, 1, 1, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(J444, 1, 1, ARGB, 4, ARGB, 4)
-TESTPLANARTOE(I444, 1, 1, ABGR, 4, ARGB, 4)
-// TESTPLANARTOE(I420, 2, 2, YUY2, 2, ARGB, 4)
-// TESTPLANARTOE(I420, 2, 2, UYVY, 2, ARGB, 4)
-TESTPLANARTOE(I422, 2, 1, YUY2, 2, ARGB, 4)
-TESTPLANARTOE(I422, 2, 1, UYVY, 2, ARGB, 4)
-// TESTPLANARTOE(I420, 2, 2, ARGB, 4, I400, 1)
-// TESTPLANARTOE(J420, 2, 2, ARGB, 4, J400, 1)
-
-#define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ABGR, 4)
+TESTPLANARTOE(J420, 2, 2, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(J420, 2, 2, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(H420, 2, 2, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3)
+TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3)
+TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3)
+TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2)
+TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2)
+TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2)
+TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(J422, 2, 1, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(J422, 2, 1, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(H422, 2, 1, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(H422, 2, 1, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4)
+TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4)
+TESTPLANARTOE(I411, 4, 1, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(I444, 1, 1, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(J444, 1, 1, ARGB, 1, 4, ARGB, 4)
+TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4)
+TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4)
+TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4)
+TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4)
+
+#define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
W1280, N, NEG, OFF, FMT_C, BPP_C, ATTEN) \
TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
const int kHeight = benchmark_height_; \
- const int kStrideB = kWidth * BPP_B; \
+ const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
const int kSizeUV = \
SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \
align_buffer_64(src_y, kWidth * kHeight + OFF); \
@@ -1840,20 +1840,20 @@ TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
free_aligned_buffer_64(dst_argb_bc); \
}
-#define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+#define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
FMT_C, BPP_C) \
- TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C, 0) \
- TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C, 0) \
- TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Invert, -, 0, FMT_C, BPP_C, 0) \
- TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) \
- TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
+ TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
benchmark_width_, _Premult, +, 0, FMT_C, BPP_C, 1)
-TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 4, ABGR, 4)
-TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 4, ARGB, 4)
+TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
+TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
} // namespace libyuv
diff --git a/chromium/third_party/libyuv/unit_test/cpu_test.cc b/chromium/third_party/libyuv/unit_test/cpu_test.cc
index 020205a3c3f..5933ee442d8 100644
--- a/chromium/third_party/libyuv/unit_test/cpu_test.cc
+++ b/chromium/third_party/libyuv/unit_test/cpu_test.cc
@@ -48,8 +48,8 @@ TEST_F(LibYUVBaseTest, TestCpuHas) {
printf("Has AVX3 %x\n", has_avx3);
int has_mips = TestCpuFlag(kCpuHasMIPS);
printf("Has MIPS %x\n", has_mips);
- int has_mips_dspr2 = TestCpuFlag(kCpuHasMIPS_DSPR2);
- printf("Has MIPS DSPR2 %x\n", has_mips_dspr2);
+ int has_dspr2 = TestCpuFlag(kCpuHasDSPR2);
+ printf("Has DSPR2 %x\n", has_dspr2);
}
TEST_F(LibYUVBaseTest, TestCpuCompilerEnabled) {
diff --git a/chromium/third_party/libyuv/unit_test/planar_test.cc b/chromium/third_party/libyuv/unit_test/planar_test.cc
index 3740c078397..9146c9a4550 100644
--- a/chromium/third_party/libyuv/unit_test/planar_test.cc
+++ b/chromium/third_party/libyuv/unit_test/planar_test.cc
@@ -1149,111 +1149,6 @@ TEST_F(LibYUVPlanarTest, ARGBBlend_Opt) {
EXPECT_LE(max_diff, 1);
}
-#ifdef HAS_BLENDPLANEROW_AVX2
-static void TestBlendPlaneRow(int width, int height, int benchmark_iterations,
- int invert, int off) {
- int has_ssse3 = TestCpuFlag(kCpuHasSSSE3);
- int has_avx2 = TestCpuFlag(kCpuHasAVX2);
- width = width * height;
- height = 1;
- if (width < 256) {
- width = 256;
- }
- const int kBpp = 1;
- const int kStride = width * kBpp;
- align_buffer_64(src_argb_a, kStride * height + off);
- align_buffer_64(src_argb_b, kStride * height + off);
- align_buffer_64(src_argb_alpha, kStride * height + off);
- align_buffer_64(dst_argb_c, kStride * height + off);
- align_buffer_64(dst_argb_opt, kStride * height + off);
- memset(dst_argb_c, 255, kStride * height + off);
- memset(dst_argb_opt, 255, kStride * height + off);
-
- if (has_ssse3) {
- // Test source is maintained exactly if alpha is 255.
- for (int i = 0; i < 256; ++i) {
- src_argb_a[i + off] = i;
- src_argb_b[i + off] = 255 - i;
- src_argb_alpha[i + off] = 255;
- }
- BlendPlaneRow_SSSE3(src_argb_a + off,
- src_argb_b + off,
- src_argb_alpha + off,
- dst_argb_opt + off,
- 256);
- for (int i = 0; i < 256; ++i) {
- EXPECT_EQ(src_argb_a[i + off], dst_argb_opt[i + off]);
- }
- // Test destination is maintained exactly if alpha is 0.
- for (int i = 0; i < 256; ++i) {
- src_argb_a[i + off] = i;
- src_argb_b[i + off] = 255 - i;
- src_argb_alpha[i + off] = 0;
- }
- BlendPlaneRow_SSSE3(src_argb_a + off,
- src_argb_b + off,
- src_argb_alpha + off,
- dst_argb_opt + off,
- 256);
- for (int i = 0; i < 256; ++i) {
- EXPECT_EQ(src_argb_b[i + off], dst_argb_opt[i + off]);
- }
- }
- for (int i = 0; i < kStride * height; ++i) {
- src_argb_a[i + off] = (fastrand() & 0xff);
- src_argb_b[i + off] = (fastrand() & 0xff);
- src_argb_alpha[i + off] = (fastrand() & 0xff);
- }
-
- BlendPlaneRow_C(src_argb_a + off,
- src_argb_b + off,
- src_argb_alpha + off,
- dst_argb_c + off,
- width * height);
- for (int i = 0; i < benchmark_iterations; ++i) {
- if (has_avx2) {
- BlendPlaneRow_AVX2(src_argb_a + off,
- src_argb_b + off,
- src_argb_alpha + off,
- dst_argb_opt + off,
- width * height);
- } else {
- if (has_ssse3) {
- BlendPlaneRow_SSSE3(src_argb_a + off,
- src_argb_b + off,
- src_argb_alpha + off,
- dst_argb_opt + off,
- width * height);
- } else {
- BlendPlaneRow_C(src_argb_a + off,
- src_argb_b + off,
- src_argb_alpha + off,
- dst_argb_opt + off,
- width * height);
- }
- }
- }
- for (int i = 0; i < kStride * height; ++i) {
- EXPECT_EQ(dst_argb_c[i + off], dst_argb_opt[i + off]);
- }
- free_aligned_buffer_64(src_argb_a);
- free_aligned_buffer_64(src_argb_b);
- free_aligned_buffer_64(src_argb_alpha);
- free_aligned_buffer_64(dst_argb_c);
- free_aligned_buffer_64(dst_argb_opt);
- return;
-}
-
-TEST_F(LibYUVPlanarTest, BlendPlaneRow_Opt) {
- TestBlendPlaneRow(benchmark_width_, benchmark_height_, benchmark_iterations_,
- +1, 0);
-}
-TEST_F(LibYUVPlanarTest, BlendPlaneRow_Unaligned) {
- TestBlendPlaneRow(benchmark_width_, benchmark_height_, benchmark_iterations_,
- +1, 1);
-}
-#endif
-
static void TestBlendPlane(int width, int height, int benchmark_iterations,
int disable_cpu_flags, int benchmark_cpu_info,
int invert, int off) {
@@ -1435,7 +1330,9 @@ TEST_F(LibYUVPlanarTest, I420Blend_Unaligned) {
TestI420Blend(benchmark_width_, benchmark_height_, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_, +1, 1);
}
-TEST_F(LibYUVPlanarTest, I420Blend_Any) {
+
+// TODO(fbarchard): DISABLED because _Any uses C. Avoid C and re-enable.
+TEST_F(LibYUVPlanarTest, DISABLED_I420Blend_Any) {
TestI420Blend(benchmark_width_ - 4, benchmark_height_, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_, +1, 0);
}
diff --git a/chromium/third_party/libyuv/unit_test/rotate_argb_test.cc b/chromium/third_party/libyuv/unit_test/rotate_argb_test.cc
index d557a19b367..24640800a19 100644
--- a/chromium/third_party/libyuv/unit_test/rotate_argb_test.cc
+++ b/chromium/third_party/libyuv/unit_test/rotate_argb_test.cc
@@ -98,62 +98,34 @@ static void ARGBTestRotate(int src_width, int src_height,
disable_cpu_flags, benchmark_cpu_info, 4);
}
-TEST_F(LibYUVRotateTest, ARGBRotate0) {
+TEST_F(LibYUVRotateTest, ARGBRotate0_Opt) {
ARGBTestRotate(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, ARGBRotate90) {
+TEST_F(LibYUVRotateTest, ARGBRotate90_Opt) {
ARGBTestRotate(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, ARGBRotate180) {
+TEST_F(LibYUVRotateTest, ARGBRotate180_Opt) {
ARGBTestRotate(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, ARGBRotate270) {
+TEST_F(LibYUVRotateTest, ARGBRotate270_Opt) {
ARGBTestRotate(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate270, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, ARGBRotate0_Odd) {
- ARGBTestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
- benchmark_width_ - 3, benchmark_height_ - 1,
- kRotate0, benchmark_iterations_,
- disable_cpu_flags_, benchmark_cpu_info_);
-}
-
-TEST_F(LibYUVRotateTest, ARGBRotate90_Odd) {
- ARGBTestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
- benchmark_height_ - 1, benchmark_width_ - 3,
- kRotate90, benchmark_iterations_,
- disable_cpu_flags_, benchmark_cpu_info_);
-}
-
-TEST_F(LibYUVRotateTest, ARGBRotate180_Odd) {
- ARGBTestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
- benchmark_width_ - 3, benchmark_height_ - 1,
- kRotate180, benchmark_iterations_,
- disable_cpu_flags_, benchmark_cpu_info_);
-}
-
-TEST_F(LibYUVRotateTest, ARGBRotate270_Odd) {
- ARGBTestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
- benchmark_height_ - 1, benchmark_width_ - 3,
- kRotate270, benchmark_iterations_,
- disable_cpu_flags_, benchmark_cpu_info_);
-}
-
static void TestRotatePlane(int src_width, int src_height,
int dst_width, int dst_height,
libyuv::RotationMode mode,
@@ -166,56 +138,56 @@ static void TestRotatePlane(int src_width, int src_height,
disable_cpu_flags, benchmark_cpu_info, 1);
}
-TEST_F(LibYUVRotateTest, RotatePlane0) {
+TEST_F(LibYUVRotateTest, RotatePlane0_Opt) {
TestRotatePlane(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane90) {
+TEST_F(LibYUVRotateTest, RotatePlane90_Opt) {
TestRotatePlane(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane180) {
+TEST_F(LibYUVRotateTest, RotatePlane180_Opt) {
TestRotatePlane(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane270) {
+TEST_F(LibYUVRotateTest, RotatePlane270_Opt) {
TestRotatePlane(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate270, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane0_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_RotatePlane0_Odd) {
TestRotatePlane(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_width_ - 3, benchmark_height_ - 1,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane90_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_RotatePlane90_Odd) {
TestRotatePlane(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_height_ - 1, benchmark_width_ - 3,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane180_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_RotatePlane180_Odd) {
TestRotatePlane(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_width_ - 3, benchmark_height_ - 1,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, RotatePlane270_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_RotatePlane270_Odd) {
TestRotatePlane(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_height_ - 1, benchmark_width_ - 3,
kRotate270, benchmark_iterations_,
diff --git a/chromium/third_party/libyuv/unit_test/rotate_test.cc b/chromium/third_party/libyuv/unit_test/rotate_test.cc
index c97cc208bb4..1f5b86e95a0 100644
--- a/chromium/third_party/libyuv/unit_test/rotate_test.cc
+++ b/chromium/third_party/libyuv/unit_test/rotate_test.cc
@@ -83,56 +83,59 @@ static void I420TestRotate(int src_width, int src_height,
free_aligned_buffer_64(src_i420);
}
-TEST_F(LibYUVRotateTest, I420Rotate0) {
+TEST_F(LibYUVRotateTest, I420Rotate0_Opt) {
I420TestRotate(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate90) {
+TEST_F(LibYUVRotateTest, I420Rotate90_Opt) {
I420TestRotate(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate180) {
+TEST_F(LibYUVRotateTest, I420Rotate180_Opt) {
I420TestRotate(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate270) {
+TEST_F(LibYUVRotateTest, I420Rotate270_Opt) {
I420TestRotate(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate270, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate0_Odd) {
+// TODO(fbarchard): Remove odd width tests.
+// Odd width tests work but disabled because they use C code and can be
+// tested by passing an odd width command line or environment variable.
+TEST_F(LibYUVRotateTest, DISABLED_I420Rotate0_Odd) {
I420TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_width_ - 3, benchmark_height_ - 1,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate90_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_I420Rotate90_Odd) {
I420TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_height_ - 1, benchmark_width_ - 3,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate180_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_I420Rotate180_Odd) {
I420TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_width_ - 3, benchmark_height_ - 1,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, I420Rotate270_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_I420Rotate270_Odd) {
I420TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_height_ - 1, benchmark_width_ - 3,
kRotate270, benchmark_iterations_,
@@ -203,84 +206,84 @@ static void NV12TestRotate(int src_width, int src_height,
free_aligned_buffer_64(src_nv12);
}
-TEST_F(LibYUVRotateTest, NV12Rotate0) {
+TEST_F(LibYUVRotateTest, NV12Rotate0_Opt) {
NV12TestRotate(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate90) {
+TEST_F(LibYUVRotateTest, NV12Rotate90_Opt) {
NV12TestRotate(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate180) {
+TEST_F(LibYUVRotateTest, NV12Rotate180_Opt) {
NV12TestRotate(benchmark_width_, benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate270) {
+TEST_F(LibYUVRotateTest, NV12Rotate270_Opt) {
NV12TestRotate(benchmark_width_, benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate270, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate0_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_NV12Rotate0_Odd) {
NV12TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_width_ - 3, benchmark_height_ - 1,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate90_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_NV12Rotate90_Odd) {
NV12TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_height_ - 1, benchmark_width_ - 3,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate180_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_NV12Rotate180_Odd) {
NV12TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_width_ - 3, benchmark_height_ - 1,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate270_Odd) {
+TEST_F(LibYUVRotateTest, DISABLED_NV12Rotate270_Odd) {
NV12TestRotate(benchmark_width_ - 3, benchmark_height_ - 1,
benchmark_height_ - 1, benchmark_width_ - 3,
kRotate270, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate0_Inverted) {
+TEST_F(LibYUVRotateTest, NV12Rotate0_Invert) {
NV12TestRotate(benchmark_width_, -benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate0, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate90_Inverted) {
+TEST_F(LibYUVRotateTest, NV12Rotate90_Invert) {
NV12TestRotate(benchmark_width_, -benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate90, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate180_Inverted) {
+TEST_F(LibYUVRotateTest, NV12Rotate180_Invert) {
NV12TestRotate(benchmark_width_, -benchmark_height_,
benchmark_width_, benchmark_height_,
kRotate180, benchmark_iterations_,
disable_cpu_flags_, benchmark_cpu_info_);
}
-TEST_F(LibYUVRotateTest, NV12Rotate270_Inverted) {
+TEST_F(LibYUVRotateTest, NV12Rotate270_Invert) {
NV12TestRotate(benchmark_width_, -benchmark_height_,
benchmark_height_, benchmark_width_,
kRotate270, benchmark_iterations_,
diff --git a/chromium/third_party/libyuv/unit_test/unit_test.cc b/chromium/third_party/libyuv/unit_test/unit_test.cc
index c3fec6b39fa..c98c285cbfe 100644
--- a/chromium/third_party/libyuv/unit_test/unit_test.cc
+++ b/chromium/third_party/libyuv/unit_test/unit_test.cc
@@ -20,7 +20,7 @@
// TODO(fbarchard): Add command line parsing to pass this as option.
#define BENCHMARK_ITERATIONS 1
-int fastrand_seed = 0xfb;
+unsigned int fastrand_seed = 0xfb;
DEFINE_int32(libyuv_width, 0, "width of test image.");
DEFINE_int32(libyuv_height, 0, "height of test image.");
@@ -125,7 +125,7 @@ LibYUVColorTest::LibYUVColorTest() :
benchmark_cpu_info_ = atoi(cpu_flags); // NOLINT
}
if (FLAGS_libyuv_cpu_info) {
- benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
+ benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
}
benchmark_pixels_div256_ = static_cast<int>((
static_cast<double>(Abs(benchmark_width_)) *
@@ -177,7 +177,7 @@ LibYUVScaleTest::LibYUVScaleTest() :
benchmark_cpu_info_ = atoi(cpu_flags); // NOLINT
}
if (FLAGS_libyuv_cpu_info) {
- benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
+ benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
}
benchmark_pixels_div256_ = static_cast<int>((
static_cast<double>(Abs(benchmark_width_)) *
@@ -229,7 +229,7 @@ LibYUVRotateTest::LibYUVRotateTest() :
benchmark_cpu_info_ = atoi(cpu_flags); // NOLINT
}
if (FLAGS_libyuv_cpu_info) {
- benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
+ benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
}
benchmark_pixels_div256_ = static_cast<int>((
static_cast<double>(Abs(benchmark_width_)) *
@@ -281,7 +281,7 @@ LibYUVPlanarTest::LibYUVPlanarTest() :
benchmark_cpu_info_ = atoi(cpu_flags); // NOLINT
}
if (FLAGS_libyuv_cpu_info) {
- benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
+ benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
}
benchmark_pixels_div256_ = static_cast<int>((
static_cast<double>(Abs(benchmark_width_)) *
@@ -333,7 +333,7 @@ LibYUVBaseTest::LibYUVBaseTest() :
benchmark_cpu_info_ = atoi(cpu_flags); // NOLINT
}
if (FLAGS_libyuv_cpu_info) {
- benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
+ benchmark_cpu_info_ = FLAGS_libyuv_cpu_info;
}
benchmark_pixels_div256_ = static_cast<int>((
static_cast<double>(Abs(benchmark_width_)) *
diff --git a/chromium/third_party/libyuv/unit_test/unit_test.h b/chromium/third_party/libyuv/unit_test/unit_test.h
index f816ec711ac..009ff62abf6 100644
--- a/chromium/third_party/libyuv/unit_test/unit_test.h
+++ b/chromium/third_party/libyuv/unit_test/unit_test.h
@@ -55,10 +55,10 @@ static inline double get_time() {
}
#endif
-extern int fastrand_seed;
+extern unsigned int fastrand_seed;
inline int fastrand() {
- fastrand_seed = fastrand_seed * 214013 + 2531011;
- return (fastrand_seed >> 16) & 0xffff;
+ fastrand_seed = fastrand_seed * 214013u + 2531011u;
+ return static_cast<int>((fastrand_seed >> 16) & 0xffff);
}
static inline void MemRandomize(uint8* dst, int64 len) {
diff --git a/chromium/third_party/libyuv/unit_test/version_test.cc b/chromium/third_party/libyuv/unit_test/version_test.cc
deleted file mode 100644
index 667d984e9ce..00000000000
--- a/chromium/third_party/libyuv/unit_test/version_test.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2012 The LibYuv Project Authors. All rights reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "libyuv/basic_types.h"
-#include "libyuv/version.h"
-#include "../unit_test/unit_test.h"
-
-namespace libyuv {
-
-// Tests SVN version against include/libyuv/version.h
-// SVN version is bumped by documentation changes as well as code.
-// Although the versions should match, once checked in, a tolerance is allowed.
-TEST_F(LibYUVBaseTest, DISABLED_TestVersion) {
- EXPECT_GE(LIBYUV_VERSION, 169); // 169 is first version to support version.
- printf("LIBYUV_VERSION %d\n", LIBYUV_VERSION);
-#ifdef LIBYUV_SVNREVISION
- const char *ver = strchr(LIBYUV_SVNREVISION, ':');
- if (ver) {
- ++ver;
- } else {
- ver = LIBYUV_SVNREVISION;
- }
- int svn_revision = atoi(ver); // NOLINT
- printf("LIBYUV_SVNREVISION %d\n", svn_revision);
- EXPECT_NEAR(LIBYUV_VERSION, svn_revision, 20); // Allow version to be close.
- if (LIBYUV_VERSION != svn_revision) {
- printf("WARNING - Versions do not match.\n");
- }
-#else
- printf("WARNING - SVN Version unavailable. Test not run.\n");
-#endif
-}
-
-} // namespace libyuv
diff --git a/chromium/third_party/libyuv/util/cpuid.c b/chromium/third_party/libyuv/util/cpuid.c
index 322cdf611cf..94e245b11d0 100644
--- a/chromium/third_party/libyuv/util/cpuid.c
+++ b/chromium/third_party/libyuv/util/cpuid.c
@@ -66,8 +66,8 @@ int main(int argc, const char* argv[]) {
printf("Has NEON %x\n", has_neon);
}
if (has_mips) {
- int has_mips_dspr2 = TestCpuFlag(kCpuHasMIPS_DSPR2);
- printf("Has MIPS DSPR2 %x\n", has_mips_dspr2);
+ int has_dspr2 = TestCpuFlag(kCpuHasDSPR2);
+ printf("Has DSPR2 %x\n", has_dspr2);
}
if (has_x86) {
int has_sse2 = TestCpuFlag(kCpuHasSSE2);