summaryrefslogtreecommitdiff
path: root/chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c
diff options
context:
space:
mode:
authorDale Curtis <dalecurtis@chromium.org>2022-12-16 22:37:46 +0000
committerMichael Brüning <michael.bruning@qt.io>2023-03-27 08:12:03 +0000
commitc885ec409f9b6ffa25e03851729b1bc2ad2005b3 (patch)
tree0c9f205efc231ede87d2704b2780d1569caf5111 /chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c
parent0d63fc949d16f3e37ed7ab43d335b9d81cc6fdf7 (diff)
downloadqtwebengine-chromium-102-based.tar.gz
[Backport] Security bug 1401571102-based
Manual update of libdav1d to match the version introduced by patch https://chromium-review.googlesource.com/c/chromium/src/+/4114163: Roll src/third_party/dav1d/libdav1d/ 87f9a81cd..ed63a7459 (104 commits) This roll required a few changes to get working: - "properties" => "built in options" crossfile configuration change due to Meson deprecation. - generic config creation never worked, so fixed. - PPC64 configs were never checked in, so switched to generic. - copyright header changes for generate_sources. - Updated readme.chromium with potential issues that can arise. https://chromium.googlesource.com/external/github.com/videolan/dav1d.git/+log/87f9a81cd770..ed63a7459376 $ git log 87f9a81cd..ed63a7459 --date=short --no-merges --format='%ad %ae %s' 2022-12-09 jamrial dav1d: add an option to skip decoding some frame types 2022-12-08 jamrial picture: support creating and freeing refs without tile data 2022-12-07 gramner x86: Add 10bpc 8x32/32x8 itx AVX-512 (Ice Lake) asm 2022-12-07 gramner x86: Add minor DC-only IDCT optimizations 2022-12-13 gramner getbits: Fix assertion failure 2022-12-13 gramner checkasm: Fix integer overflow in refmvs test 2022-01-26 gramner dav1dplay: Update to new libplacebo API 2022-12-09 gramner Add minor getbits improvements 2022-12-09 gramner Add a separate getbits function for getting a single bit 2022-12-09 gramner Remove redundant zeroing in sequence header parsing 2022-12-09 gramner Set the correct default value of initial_display_delay 2022-12-09 jamrial tools: remove the null last entry in inloop_filters_tbl 2022-12-04 lu_zero Do not assume the picture allocation starts as the left edge 2022-11-21 lu_zero ppc: Allocate the correct temp buffer size 2022-11-21 lu_zero ppc: Do not use static const with vec_splats 2022-11-02 charlie.c.hayden Add info to dav1d_send_data docs 2022-10-30 jbeich build: drop -D_DARWIN_C_SOURCE on macOS/iOS after 6b611d36acab 2022-10-30 jbeich build: drop -D_POSIX_C_SOURCE on non-Linux after 6b611d36acab 2022-06-28 victorien threading: Add a pending list for async task insertion 2022-10-26 martin Implement atomic_compare_exchange_strong in the atomic compat headers 2022-10-06 victorien threading: Fix a race around frame completion (frame-mt) 2022-10-07 sebastian Handle host_machine.system() 'ios' and 'tvos' the same way as 'darwin' 2022-09-23 gramner x86: Add 10-bit 8x8/8x16/16x8/16x16 itx AVX-512 (Ice Lake) asm 2022-09-30 gramner Specify hidden visibility for global data symbol declarations 2022-09-28 gramner build: strip() the result of cc.get_define() 2022-09-26 gramner checkasm: Move printf format string to .rodata on x86 2022-09-26 gramner checkasm: Improve 32-bit parameter clobbering on x86-64 2022-09-26 gramner x86: Fix incorrect 32-bit parameter usage in high bit-depth AVX-512 mc 2022-09-09 martin arm: itx: Add clipping to row_clip_min/max in the 10 bpc codepaths 2022-09-15 gramner x86: Fix overflows in 12bpc AVX2 IDCT/IADST 2022-09-15 gramner x86: Fix overflows in 12bpc AVX2 DC-only IDCT 2022-09-15 gramner x86: Fix clipping in high bit-depth AVX2 4x16 IDCT 2022-03-21 martin Don't use gas-preprocessor with clang-cl for arm targets 2022-06-07 david_conrad Fix checking the reference dimesions for the projection process 2022-06-07 david_conrad Fix calculation of OBMC lap dimensions 2022-06-07 david_conrad Support film grain application whose only effect is clipping to video range 2022-06-07 david_conrad Ignore T.35 metadata if the OBU contains no payload 2022-06-07 david_conrad Fix chroma deblock filter size calculation for lossless 2022-06-07 david_conrad Fix rounding in the calculation of initialSubpelX 2022-06-07 david_conrad Fix overflow when saturating dequantized coefficients clipped to 0 2022-06-08 david_conrad Fix overflow in 8-bit NEON ADST 2022-09-14 martin tools: Allocate the priv structs with proper alignment 2022-09-08 gramner x86: Fix clipping in 10bpc SSE4.1 IDCT asm 2022-09-08 gramner build: Improve Windows linking options 2022-09-08 gramner tools: Improve demuxer probing 2022-08-30 code CI: Disable trimming on some tests 2022-08-30 code CI: Remove git 'safe.directory' config 2022-08-30 code gcovr: Ignore parsing errors 2022-08-30 code crossfiles: Update Android toolchains 2022-08-30 code CI: Update images (...) 2022-09-01 victorien checkasm: Add short options 2022-09-01 victorien checkasm: Add pattern matching to --test 2022-09-01 victorien checkasm: Remove pattern matching from --bench 2022-08-29 victorien checkasm: Add a --function option 2022-08-30 victorien threading: Fix copy_lpf_progress initialization 2022-08-19 jamrial data: don't overwrite the Dav1dDataProps size value 2022-07-18 gramner Adjust inlining attributes on some functions 2022-07-19 gramner x86: Remove leftover instruction in loopfilter AVX2 asm 2022-06-07 david_conrad Enable pointer authentication in assembly when building arm64e 2022-06-07 david_conrad Don't trash the return stack buffer in the NEON loop filter 2022-07-03 thresh CI: Removed snap package generation 2022-07-06 gramner Eliminate unused C DSP functions at compile time 2022-07-06 gramner cpu: Inline dav1d_get_cpu_flags() 2022-06-22 gramner x86: Add minor loopfilter asm improvements 2022-06-15 gramner checkasm: Speed up signal handling 2022-06-15 gramner checkasm: Improve seed generation on Windows 2022-06-20 gramner ci: Don't specify a specific MacOS version 2022-06-14 gramner x86: Add high bit-depth loopfilter AVX-512 (Ice Lake) asm 2022-06-13 victorien checkasm/lpf: Use operating dimensions 2022-06-03 gramner checkasm: Print the cpu model and cpuid signature on x86 2022-06-03 gramner checkasm: Add a vzeroupper check on x86 2022-06-02 gramner x86: Add a workaround for quirky AVX-512 hardware behavior 2022-05-31 victorien checkasm: Fix uninitialized variable 2022-05-14 code CI: Update coverage collecting 2022-05-05 code CI: Add a build with the minimum requirements 2022-05-05 code CI: Deactivate git 'safe.directory' 2022-03-24 code CI: Update images 2022-05-25 victorien Fix typo 2022-05-19 gramner x86: Add high bit-depth cdef_filter AVX-512 (Ice Lake) asm 2022-05-20 gramner checkasm: Print --help message to stderr instead of stdout 2022-05-20 gramner checkasm: Split cdef test into separate pri/sec/pri+sec parts 2022-05-20 gramner checkasm: Improve benchmarking of functions that modify their input 2022-05-18 b x86/itx_avx2: fix typo 2022-04-22 code CI: Add gcc12 and clang14 builds with mold linker 2022-04-26 code CI: Trigger documentation rebuild if configuration changes 2022-04-24 code meson/doc: Fix doxygen config 2022-04-28 gramner Use a relaxed memory ordering in dav1d_ref_inc() 2022-04-28 gramner Remove redundant code in dav1d_cdf_thread_unref() 2022-04-28 gramner Inline dav1d_ref_inc() 2022-04-24 code x86/itx: Add 32x8 12bpc AVX2 transforms 2022-04-24 code x86/itx: Add 8x32 12bpc AVX2 transforms 2022-04-24 code x86/itx: Deduplicate dconly code 2022-04-23 code lib: Fix typo in documentation 2022-04-07 jamrial obu: don't output invisible but showable key frames more than once 2022-04-07 jamrial obu: check that the frame referenced by existing_frame_idx is showable 2022-04-07 jamrial obu: check refresh_frame_flags is not equal to allFrames on Intra Only frames 2022-03-29 robux4 remove multipass wait from dav1d_decode_frame 2022-04-07 jamrial picture: ensure the new seq header and op param info flags are attached to the next visible picture in display order 2022-03-31 jamrial lib: add a function to query the decoder frame delay 2022-03-31 jamrial lib: split calculating thread count to its own function Created with: roll-dep src/third_party/dav1d/libdav1d Fixed: 1401571 Change-Id: Ic3cef540a87a2cf411abe6071fd4c9963ea61f75 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4114163 Reviewed-by: Wan-Teh Chang <wtc@google.com> Commit-Queue: Dale Curtis <dalecurtis@chromium.org> Cr-Commit-Position: refs/heads/main@{#1084574} Reviewed-on: https://codereview.qt-project.org/c/qt/qtwebengine-chromium/+/468619 Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c')
-rw-r--r--chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c487
1 files changed, 487 insertions, 0 deletions
diff --git a/chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c b/chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c
new file mode 100644
index 00000000000..e2e759810f7
--- /dev/null
+++ b/chromium/third_party/dav1d/libdav1d/src/ppc/cdef_tmpl.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright © 2019, Luca Barbato
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/ppc/dav1d_types.h"
+#include "src/ppc/cdef.h"
+
+#if BITDEPTH == 8
+static inline i16x8 vconstrain(const i16x8 diff, const int16_t threshold,
+ const int damping)
+{
+ const i16x8 zero = vec_splat_s16(0);
+ if (!threshold) return zero;
+ const uint16_t shift = imax(0, damping - ulog2(threshold));
+ const i16x8 abs_diff = vec_abs(diff);
+ const b16x8 mask = vec_cmplt(diff, zero);
+ const i16x8 thr = vec_splats(threshold);
+ const i16x8 sub = vec_sub(thr, vec_sra(abs_diff, vec_splats(shift)));
+ const i16x8 max = vec_max(zero, sub);
+ const i16x8 min = vec_min(abs_diff, max);
+ const i16x8 neg = vec_sub(zero, min);
+ return vec_sel(min, neg, mask);
+}
+
+static inline void copy4xN(uint16_t *tmp, const ptrdiff_t tmp_stride,
+ const uint8_t *src, const ptrdiff_t src_stride,
+ const uint8_t (*left)[2], const uint8_t *const top,
+ const uint8_t *const bottom, const int w, const int h,
+ const enum CdefEdgeFlags edges)
+{
+ const u16x8 fill = vec_splats((uint16_t)INT16_MAX);
+
+ u16x8 l0;
+ u16x8 l1;
+
+ int y_start = -2, y_end = h + 2;
+
+ // Copy top and bottom first
+ if (!(edges & CDEF_HAVE_TOP)) {
+ l0 = fill;
+ l1 = fill;
+ y_start = 0;
+ } else {
+ l0 = u8h_to_u16(vec_vsx_ld(0, top + 0 * src_stride - 2));
+ l1 = u8h_to_u16(vec_vsx_ld(0, top + 1 * src_stride - 2));
+ }
+
+ vec_st(l0, 0, tmp - 2 * 8);
+ vec_st(l1, 0, tmp - 1 * 8);
+
+ if (!(edges & CDEF_HAVE_BOTTOM)) {
+ l0 = fill;
+ l1 = fill;
+ y_end -= 2;
+ } else {
+ l0 = u8h_to_u16(vec_vsx_ld(0, bottom + 0 * src_stride - 2));
+ l1 = u8h_to_u16(vec_vsx_ld(0, bottom + 1 * src_stride - 2));
+ }
+
+ vec_st(l0, 0, tmp + (h + 0) * 8);
+ vec_st(l1, 0, tmp + (h + 1) * 8);
+
+ int y_with_left_edge = 0;
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ u16x8 l = u8h_to_u16(vec_vsx_ld(0, src));
+ vec_vsx_st(l, 0, tmp + 2);
+
+ y_with_left_edge = 1;
+ }
+
+ for (int y = y_with_left_edge; y < h; y++) {
+ u16x8 l = u8h_to_u16(vec_vsx_ld(0, src - 2 + y * src_stride));
+ vec_st(l, 0, tmp + y * 8);
+ }
+
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[y * 8] = INT16_MAX;
+ tmp[1 + y * 8] = INT16_MAX;
+ }
+ } else {
+ for (int y = 0; y < h; y++) {
+ tmp[y * 8] = left[y][0];
+ tmp[1 + y * 8] = left[y][1];
+ }
+ }
+ if (!(edges & CDEF_HAVE_RIGHT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[- 2 + (y + 1) * 8] = INT16_MAX;
+ tmp[- 1 + (y + 1) * 8] = INT16_MAX;
+ }
+ }
+}
+
+static inline void copy8xN(uint16_t *tmp, const ptrdiff_t tmp_stride,
+ const uint8_t *src, const ptrdiff_t src_stride,
+ const uint8_t (*left)[2], const uint8_t *const top,
+ const uint8_t *const bottom, const int w, const int h,
+ const enum CdefEdgeFlags edges)
+{
+ const u16x8 fill = vec_splats((uint16_t)INT16_MAX);
+
+ u16x8 l0h, l0l;
+ u16x8 l1h, l1l;
+
+ int y_start = -2, y_end = h + 2;
+
+ // Copy top and bottom first
+ if (!(edges & CDEF_HAVE_TOP)) {
+ l0h = fill;
+ l0l = fill;
+ l1h = fill;
+ l1l = fill;
+ y_start = 0;
+ } else {
+ u8x16 l0 = vec_vsx_ld(0, top + 0 * src_stride - 2);
+ u8x16 l1 = vec_vsx_ld(0, top + 1 * src_stride - 2);
+ l0h = u8h_to_u16(l0);
+ l0l = u8l_to_u16(l0);
+ l1h = u8h_to_u16(l1);
+ l1l = u8l_to_u16(l1);
+ }
+
+ vec_st(l0h, 0, tmp - 4 * 8);
+ vec_st(l0l, 0, tmp - 3 * 8);
+ vec_st(l1h, 0, tmp - 2 * 8);
+ vec_st(l1l, 0, tmp - 1 * 8);
+
+ if (!(edges & CDEF_HAVE_BOTTOM)) {
+ l0h = fill;
+ l0l = fill;
+ l1h = fill;
+ l1l = fill;
+ y_end -= 2;
+ } else {
+ u8x16 l0 = vec_vsx_ld(0, bottom + 0 * src_stride - 2);
+ u8x16 l1 = vec_vsx_ld(0, bottom + 1 * src_stride - 2);
+ l0h = u8h_to_u16(l0);
+ l0l = u8l_to_u16(l0);
+ l1h = u8h_to_u16(l1);
+ l1l = u8l_to_u16(l1);
+ }
+
+ vec_st(l0h, 0, tmp + (h + 0) * 16);
+ vec_st(l0l, 0, tmp + (h + 0) * 16 + 8);
+ vec_st(l1h, 0, tmp + (h + 1) * 16);
+ vec_st(l1l, 0, tmp + (h + 1) * 16 + 8);
+
+ int y_with_left_edge = 0;
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ u8x16 l = vec_vsx_ld(0, src);
+ u16x8 lh = u8h_to_u16(l);
+ u16x8 ll = u8l_to_u16(l);
+ vec_vsx_st(lh, 0, tmp + 2);
+ vec_vsx_st(ll, 0, tmp + 8 + 2);
+
+ y_with_left_edge = 1;
+ }
+
+ for (int y = y_with_left_edge; y < h; y++) {
+ u8x16 l = vec_vsx_ld(0, src - 2 + y * src_stride);
+ u16x8 lh = u8h_to_u16(l);
+ u16x8 ll = u8l_to_u16(l);
+ vec_st(lh, 0, tmp + y * 16);
+ vec_st(ll, 0, tmp + 8 + y * 16);
+ }
+
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[y * 16] = INT16_MAX;
+ tmp[1 + y * 16] = INT16_MAX;
+ }
+ } else {
+ for (int y = 0; y < h; y++) {
+ tmp[y * 16] = left[y][0];
+ tmp[1 + y * 16] = left[y][1];
+ }
+ }
+ if (!(edges & CDEF_HAVE_RIGHT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[- 6 + (y + 1) * 16] = INT16_MAX;
+ tmp[- 5 + (y + 1) * 16] = INT16_MAX;
+ }
+ }
+}
+
+static inline i16x8 max_mask(i16x8 a, i16x8 b) {
+ const i16x8 I16X8_INT16_MAX = vec_splats((int16_t)INT16_MAX);
+
+ const b16x8 mask = vec_cmpeq(a, I16X8_INT16_MAX);
+
+ const i16x8 val = vec_sel(a, b, mask);
+
+ return vec_max(val, b);
+}
+
+#define LOAD_PIX(addr) \
+ const i16x8 px = (i16x8)vec_vsx_ld(0, addr); \
+ i16x8 max = px; \
+ i16x8 min = px; \
+ i16x8 sum = vec_splat_s16(0);
+
+#define LOAD_PIX4(addr) \
+ const i16x8 a = (i16x8)vec_vsx_ld(0, addr); \
+ const i16x8 b = (i16x8)vec_vsx_ld(0, addr + tmp_stride); \
+ const i16x8 px = vec_xxpermdi(a, b, 0); \
+ i16x8 max = px; \
+ i16x8 min = px; \
+ i16x8 sum = vec_splat_s16(0);
+
+#define LOAD_DIR(p, addr, o0, o1) \
+ const i16x8 p ## 0 = (i16x8)vec_vsx_ld(0, addr + o0); \
+ const i16x8 p ## 1 = (i16x8)vec_vsx_ld(0, addr - o0); \
+ const i16x8 p ## 2 = (i16x8)vec_vsx_ld(0, addr + o1); \
+ const i16x8 p ## 3 = (i16x8)vec_vsx_ld(0, addr - o1);
+
+#define LOAD_DIR4(p, addr, o0, o1) \
+ LOAD_DIR(p ## a, addr, o0, o1) \
+ LOAD_DIR(p ## b, addr + tmp_stride, o0, o1) \
+ const i16x8 p ## 0 = vec_xxpermdi(p ## a ## 0, p ## b ## 0, 0); \
+ const i16x8 p ## 1 = vec_xxpermdi(p ## a ## 1, p ## b ## 1, 0); \
+ const i16x8 p ## 2 = vec_xxpermdi(p ## a ## 2, p ## b ## 2, 0); \
+ const i16x8 p ## 3 = vec_xxpermdi(p ## a ## 3, p ## b ## 3, 0);
+
+#define CONSTRAIN(p, strength) \
+ const i16x8 p ## _d0 = vec_sub(p ## 0, px); \
+ const i16x8 p ## _d1 = vec_sub(p ## 1, px); \
+ const i16x8 p ## _d2 = vec_sub(p ## 2, px); \
+ const i16x8 p ## _d3 = vec_sub(p ## 3, px); \
+\
+ i16x8 p ## _c0 = vconstrain(p ## _d0, strength, damping); \
+ i16x8 p ## _c1 = vconstrain(p ## _d1, strength, damping); \
+ i16x8 p ## _c2 = vconstrain(p ## _d2, strength, damping); \
+ i16x8 p ## _c3 = vconstrain(p ## _d3, strength, damping);
+
+#define MIN_MAX(p) \
+ max = max_mask(p ## 0, max); \
+ min = vec_min(p ## 0, min); \
+ max = max_mask(p ## 1, max); \
+ min = vec_min(p ## 1, min); \
+ max = max_mask(p ## 2, max); \
+ min = vec_min(p ## 2, min); \
+ max = max_mask(p ## 3, max); \
+ min = vec_min(p ## 3, min);
+
+#define PRI_0(p) \
+ p ## _c0 = vec_add(vec_sl(p ## _c0, vec_splat_u16(1)), vec_sl(p ## _c0, vec_splats(tap_even))); \
+ p ## _c1 = vec_add(vec_sl(p ## _c1, vec_splat_u16(1)), vec_sl(p ## _c1, vec_splats(tap_even)));
+
+#define PRI_1(p) \
+ p ## _c2 = vec_sub(vec_sl(p ## _c2, vec_splat_u16(2)), vec_sl(p ## _c2, vec_splats(tap_even))); \
+ p ## _c3 = vec_sub(vec_sl(p ## _c3, vec_splat_u16(2)), vec_sl(p ## _c3, vec_splats(tap_even)));
+
+#define SEC_0(p) \
+ p ## _c0 = vec_sl(p ## _c0, vec_splat_u16(1)); \
+ p ## _c1 = vec_sl(p ## _c1, vec_splat_u16(1)); \
+ p ## _c2 = vec_sl(p ## _c2, vec_splat_u16(1)); \
+ p ## _c3 = vec_sl(p ## _c3, vec_splat_u16(1));
+
+#define UPDATE_SUM(p) \
+ const i16x8 p ## sum0 = vec_add(p ## _c0, p ## _c1); \
+ const i16x8 p ## sum1 = vec_add(p ## _c2, p ## _c3); \
+ sum = vec_add(sum, p ## sum0); \
+ sum = vec_add(sum, p ## sum1);
+
+static inline void
+filter_4xN(pixel *dst, const ptrdiff_t dst_stride,
+ const pixel (*left)[2], const pixel *const top,
+ const pixel *const bottom, const int w, const int h,
+ const int pri_strength, const int sec_strength, const int dir,
+ const int damping, const enum CdefEdgeFlags edges,
+ const ptrdiff_t tmp_stride, uint16_t *tmp)
+{
+ const int8_t cdef_directions[8 /* dir */][2 /* pass */] = {
+ { -1 * tmp_stride + 1, -2 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, -1 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 0 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 1 * tmp_stride + 2 },
+ { 1 * tmp_stride + 1, 2 * tmp_stride + 2 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 1 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 0 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride - 1 }
+ };
+ const int bitdepth_min_8 = bitdepth_from_max(bitdepth_max) - 8;
+ const uint16_t tap_even = !((pri_strength >> bitdepth_min_8) & 1);
+ const int off1 = cdef_directions[dir][0];
+ const int off1_1 = cdef_directions[dir][1];
+
+ const int off2 = cdef_directions[(dir + 2) & 7][0];
+ const int off3 = cdef_directions[(dir + 6) & 7][0];
+
+ const int off2_1 = cdef_directions[(dir + 2) & 7][1];
+ const int off3_1 = cdef_directions[(dir + 6) & 7][1];
+
+ copy4xN(tmp - 2, tmp_stride, dst, dst_stride, left, top, bottom, w, h, edges);
+
+ for (int y = 0; y < h / 2; y++) {
+ LOAD_PIX4(tmp)
+
+ // Primary pass
+ LOAD_DIR4(p, tmp, off1, off1_1)
+
+ CONSTRAIN(p, pri_strength)
+
+ MIN_MAX(p)
+
+ PRI_0(p)
+ PRI_1(p)
+
+ UPDATE_SUM(p)
+
+ // Secondary pass 1
+ LOAD_DIR4(s, tmp, off2, off3)
+
+ CONSTRAIN(s, sec_strength)
+
+ MIN_MAX(s)
+
+ SEC_0(s)
+
+ UPDATE_SUM(s)
+
+ // Secondary pass 2
+ LOAD_DIR4(s2, tmp, off2_1, off3_1)
+
+ CONSTRAIN(s2, sec_strength)
+
+ MIN_MAX(s2)
+
+ UPDATE_SUM(s2)
+
+ // Store
+ i16x8 bias = vec_and((i16x8)vec_cmplt(sum, vec_splat_s16(0)), vec_splat_s16(1));
+ bias = vec_sub(vec_splat_s16(8), bias);
+ i16x8 unclamped = vec_add(px, vec_sra(vec_add(sum, bias), vec_splat_u16(4)));
+ i16x8 vdst = vec_max(vec_min(unclamped, max), min);
+
+ dst[0] = vdst[0];
+ dst[1] = vdst[1];
+ dst[2] = vdst[2];
+ dst[3] = vdst[3];
+
+ tmp += tmp_stride;
+ dst += PXSTRIDE(dst_stride);
+ dst[0] = vdst[4];
+ dst[1] = vdst[5];
+ dst[2] = vdst[6];
+ dst[3] = vdst[7];
+
+ tmp += tmp_stride;
+ dst += PXSTRIDE(dst_stride);
+ }
+}
+
+static inline void
+filter_8xN(pixel *dst, const ptrdiff_t dst_stride,
+ const pixel (*left)[2], const pixel *const top,
+ const pixel *const bottom, const int w, const int h,
+ const int pri_strength, const int sec_strength, const int dir,
+ const int damping, const enum CdefEdgeFlags edges,
+ const ptrdiff_t tmp_stride, uint16_t *tmp)
+{
+ const int8_t cdef_directions[8 /* dir */][2 /* pass */] = {
+ { -1 * tmp_stride + 1, -2 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, -1 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 0 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 1 * tmp_stride + 2 },
+ { 1 * tmp_stride + 1, 2 * tmp_stride + 2 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 1 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 0 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride - 1 }
+ };
+ const int bitdepth_min_8 = bitdepth_from_max(bitdepth_max) - 8;
+
+
+ const uint16_t tap_even = !((pri_strength >> bitdepth_min_8) & 1);
+ const int off1 = cdef_directions[dir][0];
+ const int off1_1 = cdef_directions[dir][1];
+
+ const int off2 = cdef_directions[(dir + 2) & 7][0];
+ const int off3 = cdef_directions[(dir + 6) & 7][0];
+
+ const int off2_1 = cdef_directions[(dir + 2) & 7][1];
+ const int off3_1 = cdef_directions[(dir + 6) & 7][1];
+
+ copy8xN(tmp - 2, tmp_stride, dst, dst_stride, left, top, bottom, w, h, edges);
+
+ for (int y = 0; y < h; y++) {
+ LOAD_PIX(tmp)
+
+ // Primary pass
+ LOAD_DIR(p, tmp, off1, off1_1)
+
+ CONSTRAIN(p, pri_strength)
+
+ MIN_MAX(p)
+
+ PRI_0(p)
+ PRI_1(p)
+
+ UPDATE_SUM(p)
+
+ // Secondary pass 1
+ LOAD_DIR(s, tmp, off2, off3)
+
+ CONSTRAIN(s, sec_strength)
+
+ MIN_MAX(s)
+
+ SEC_0(s)
+
+ UPDATE_SUM(s)
+
+ // Secondary pass 2
+ LOAD_DIR(s2, tmp, off2_1, off3_1)
+
+ CONSTRAIN(s2, sec_strength)
+
+ MIN_MAX(s2)
+
+ UPDATE_SUM(s2)
+
+ // Store
+ i16x8 bias = vec_and((i16x8)vec_cmplt(sum, vec_splat_s16(0)), vec_splat_s16(1));
+ bias = vec_sub(vec_splat_s16(8), bias);
+ i16x8 unclamped = vec_add(px, vec_sra(vec_add(sum, bias), vec_splat_u16(4)));
+ i16x8 vdst = vec_max(vec_min(unclamped, max), min);
+
+ dst[0] = vdst[0];
+ dst[1] = vdst[1];
+ dst[2] = vdst[2];
+ dst[3] = vdst[3];
+ dst[4] = vdst[4];
+ dst[5] = vdst[5];
+ dst[6] = vdst[6];
+ dst[7] = vdst[7];
+
+ tmp += tmp_stride;
+ dst += PXSTRIDE(dst_stride);
+ }
+
+}
+
+#define cdef_fn(w, h, tmp_stride) \
+void dav1d_cdef_filter_##w##x##h##_vsx(pixel *const dst, \
+ const ptrdiff_t dst_stride, \
+ const pixel (*left)[2], \
+ const pixel *const top, \
+ const pixel *const bottom, \
+ const int pri_strength, \
+ const int sec_strength, \
+ const int dir, \
+ const int damping, \
+ const enum CdefEdgeFlags edges) \
+{ \
+ ALIGN_STK_16(uint16_t, tmp_buf, 12 * tmp_stride + 8,); \
+ uint16_t *tmp = tmp_buf + 2 * tmp_stride + 2; \
+ filter_##w##xN(dst, dst_stride, left, top, bottom, w, h, pri_strength, \
+ sec_strength, dir, damping, edges, tmp_stride, tmp); \
+}
+
+cdef_fn(4, 4, 8);
+cdef_fn(4, 8, 8);
+cdef_fn(8, 8, 16);
+#endif