diff options
author | Johann <johannkoenig@google.com> | 2016-09-02 11:29:20 -0700 |
---|---|---|
committer | Johann <johannkoenig@google.com> | 2016-09-15 14:56:47 -0700 |
commit | d9dce2f48eed1368a44c368fa87a506bd89ffec5 (patch) | |
tree | 224fa01a75dc5534e65a691388af99f997397202 /vp8/common/arm | |
parent | 4d1540f8ce1c9965bc89674ba4e46e332f52599d (diff) | |
download | libvpx-d9dce2f48eed1368a44c368fa87a506bd89ffec5.tar.gz |
Restore vp8_sixtap_predict4x4_neon
This function was removed when clang started introducing alignment hints
which caused the 32 bit vld1_lane_u32/vst1_lane_u32 to fail:
https://llvm.org/bugs/show_bug.cgi?id=24421
The load has been rendered safe with an implementation ~indiscernible
performance-wise that uses _u8 and over-reads just a touch.
The store, when unaligned, has a version that is ~25% slower but safe
when xoffset = 0 (second pass filter only). When the first pass filter
(or both) are in play, the new version is almost identical in speed.
Worst case performance (both filters, unaligned stores) is roughly 3-4x
faster than C.
BUG=webm:817
BUG=webm:1273
Change-Id: I1e490e94453e0872151fe0dafb05557463f6247d
Diffstat (limited to 'vp8/common/arm')
-rw-r--r-- | vp8/common/arm/neon/sixtappredict_neon.c | 394 |
1 files changed, 394 insertions, 0 deletions
diff --git a/vp8/common/arm/neon/sixtappredict_neon.c b/vp8/common/arm/neon/sixtappredict_neon.c index 622baa3c5..623ab0241 100644 --- a/vp8/common/arm/neon/sixtappredict_neon.c +++ b/vp8/common/arm/neon/sixtappredict_neon.c @@ -9,6 +9,8 @@ */ #include <arm_neon.h> +#include <string.h> +#include "./vpx_config.h" #include "vpx_ports/mem.h" static const int8_t vp8_sub_pel_filters[8][8] = { @@ -22,6 +24,398 @@ static const int8_t vp8_sub_pel_filters[8][8] = { { 0, -1, 12, 123, -6, 0, 0, 0 }, }; +// This table is derived from vp8/common/filter.c:vp8_sub_pel_filters. +// Apply abs() to all the values. Elements 0, 2, 3, and 5 are always positive. +// Elements 1 and 4 are either 0 or negative. The code accounts for this with +// multiply/accumulates which either add or subtract as needed. The other +// functions will be updated to use this table later. +// It is also expanded to 8 elements to allow loading into 64 bit neon +// registers. +static const uint8_t abs_filters[8][8] = { + { 0, 0, 128, 0, 0, 0, 0, 0 }, { 0, 6, 123, 12, 1, 0, 0, 0 }, + { 2, 11, 108, 36, 8, 1, 0, 0 }, { 0, 9, 93, 50, 6, 0, 0, 0 }, + { 3, 16, 77, 77, 16, 3, 0, 0 }, { 0, 6, 50, 93, 9, 0, 0, 0 }, + { 1, 8, 36, 108, 11, 2, 0, 0 }, { 0, 1, 12, 123, 6, 0, 0, 0 }, +}; + +static INLINE uint8x8_t load_and_shift(const unsigned char *a) { + return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32)); +} + +static INLINE void store4x4(unsigned char *dst, int dst_stride, + const uint8x8_t a0, const uint8x8_t a1) { + if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) { + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0); + dst += dst_stride; + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1); + dst += dst_stride; + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0); + dst += dst_stride; + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1); + } else { + // Store to the aligned local buffer and memcpy instead of vget_lane_u8 + // which is really really slow. + uint32_t output_buffer[4]; + vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0); + vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1); + vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0); + vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1); + + memcpy(dst, output_buffer, 4); + dst += dst_stride; + memcpy(dst, output_buffer + 1, 4); + dst += dst_stride; + memcpy(dst, output_buffer + 2, 4); + dst += dst_stride; + memcpy(dst, output_buffer + 3, 4); + } +} + +static INLINE void filter_add_accumulate(const uint8x16_t a, const uint8x16_t b, + const uint8x8_t filter, uint16x8_t *c, + uint16x8_t *d) { + const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)), + vreinterpret_u32_u8(vget_high_u8(a))); + const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)), + vreinterpret_u32_u8(vget_high_u8(b))); + *c = vmlal_u8(*c, vreinterpret_u8_u32(a_shuf.val[0]), filter); + *d = vmlal_u8(*d, vreinterpret_u8_u32(b_shuf.val[0]), filter); +} + +static INLINE void filter_sub_accumulate(const uint8x16_t a, const uint8x16_t b, + const uint8x8_t filter, uint16x8_t *c, + uint16x8_t *d) { + const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)), + vreinterpret_u32_u8(vget_high_u8(a))); + const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)), + vreinterpret_u32_u8(vget_high_u8(b))); + *c = vmlsl_u8(*c, vreinterpret_u8_u32(a_shuf.val[0]), filter); + *d = vmlsl_u8(*d, vreinterpret_u8_u32(b_shuf.val[0]), filter); +} + +static INLINE void yonly4x4(const unsigned char *src, int src_stride, + int filter_offset, unsigned char *dst, + int dst_stride) { + uint8x8_t a0, a1, a2, a3, a4, a5, a6, a7, a8; + uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7, b8; + uint16x8_t c0, c1, c2, c3; + uint8x8_t d0, d1; + + const uint8x8_t filter = vld1_u8(abs_filters[filter_offset]); + const uint8x8_t filter0 = vdup_lane_u8(filter, 0); + const uint8x8_t filter1 = vdup_lane_u8(filter, 1); + const uint8x8_t filter2 = vdup_lane_u8(filter, 2); + const uint8x8_t filter3 = vdup_lane_u8(filter, 3); + const uint8x8_t filter4 = vdup_lane_u8(filter, 4); + const uint8x8_t filter5 = vdup_lane_u8(filter, 5); + + src -= src_stride * 2; + // Shift the even rows to allow using 'vext' to combine the vectors. armv8 + // has vcopy_lane which would be interesting. This started as just a + // horrible workaround for clang adding alignment hints to 32bit loads: + // https://llvm.org/bugs/show_bug.cgi?id=24421 + // But it turns out it almost identical to casting the loads. + a0 = load_and_shift(src); + src += src_stride; + a1 = vld1_u8(src); + src += src_stride; + a2 = load_and_shift(src); + src += src_stride; + a3 = vld1_u8(src); + src += src_stride; + a4 = load_and_shift(src); + src += src_stride; + a5 = vld1_u8(src); + src += src_stride; + a6 = load_and_shift(src); + src += src_stride; + a7 = vld1_u8(src); + src += src_stride; + a8 = vld1_u8(src); + + // Combine the rows so we can operate on 8 at a time. + b0 = vext_u8(a0, a1, 4); + b2 = vext_u8(a2, a3, 4); + b4 = vext_u8(a4, a5, 4); + b6 = vext_u8(a6, a7, 4); + b8 = a8; + + // To keep with the 8-at-a-time theme, combine *alternate* rows. This + // allows combining the odd rows with the even. + b1 = vext_u8(b0, b2, 4); + b3 = vext_u8(b2, b4, 4); + b5 = vext_u8(b4, b6, 4); + b7 = vext_u8(b6, b8, 4); + + // Multiply and expand to 16 bits. + c0 = vmull_u8(b0, filter0); + c1 = vmull_u8(b2, filter0); + c2 = vmull_u8(b5, filter5); + c3 = vmull_u8(b7, filter5); + + // Multiply, subtract and accumulate for filters 1 and 4 (the negative + // ones). + c0 = vmlsl_u8(c0, b4, filter4); + c1 = vmlsl_u8(c1, b6, filter4); + c2 = vmlsl_u8(c2, b1, filter1); + c3 = vmlsl_u8(c3, b3, filter1); + + // Add more positive ones. vmlal should really return a signed type. + // It's doing signed math internally, as evidenced by the fact we can do + // subtractions followed by more additions. Ideally we could use + // vqmlal/sl but that instruction doesn't exist. Might be able to + // shoehorn vqdmlal/vqdmlsl in here but it would take some effort. + c0 = vmlal_u8(c0, b2, filter2); + c1 = vmlal_u8(c1, b4, filter2); + c2 = vmlal_u8(c2, b3, filter3); + c3 = vmlal_u8(c3, b5, filter3); + + // Use signed saturation math because vmlsl may have left some negative + // numbers in there. + c0 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(c2), vreinterpretq_s16_u16(c0))); + c1 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(c3), vreinterpretq_s16_u16(c1))); + + // Shift and narrow. + d0 = vqrshrn_n_u16(c0, 7); + d1 = vqrshrn_n_u16(c1, 7); + + store4x4(dst, dst_stride, d0, d1); +} + +void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, + unsigned char *dst_ptr, int dst_pitch) { + uint8x16_t s0, s1, s2, s3, s4; + uint64x2_t s01, s23; + // Variables to hold src[] elements for the given filter[] + uint8x8_t s0_f5, s1_f5, s2_f5, s3_f5, s4_f5; + uint8x8_t s4_f1, s4_f2, s4_f3, s4_f4; + uint8x16_t s01_f0, s23_f0; + uint64x2_t s01_f3, s23_f3; + uint32x2x2_t s01_f3_q, s23_f3_q, s01_f5_q, s23_f5_q; + // Accumulator variables. + uint16x8_t d0123, d4567, d89; + uint16x8_t d0123_a, d4567_a, d89_a; + // Second pass intermediates. + uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7, b8; + uint16x8_t c0, c1, c2, c3; + uint8x8_t d0, d1; + uint8x8_t filter, filter0, filter1, filter2, filter3, filter4, filter5; + + if (xoffset == 0) { // Second pass only. + yonly4x4(src_ptr, src_pixels_per_line, yoffset, dst_ptr, dst_pitch); + return; + } + + if (yoffset == 0) { // First pass only. + src_ptr -= 2; + } else { // Add context for the second pass. 2 extra lines on top. + src_ptr -= 2 + (src_pixels_per_line * 2); + } + + filter = vld1_u8(abs_filters[xoffset]); + filter0 = vdup_lane_u8(filter, 0); + filter1 = vdup_lane_u8(filter, 1); + filter2 = vdup_lane_u8(filter, 2); + filter3 = vdup_lane_u8(filter, 3); + filter4 = vdup_lane_u8(filter, 4); + filter5 = vdup_lane_u8(filter, 5); + + // 2 bytes of context, 4 bytes of src values, 3 bytes of context, 7 bytes of + // garbage. So much effort for that last single bit. + // The low values of each pair are for filter0. + s0 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s1 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s2 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s3 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + + // Shift to extract values for filter[5] + // If src[] is 0, this puts: + // 3 4 5 6 7 8 9 10 in s0_f5 + // Can't use vshr.u64 because it crosses the double word boundary. + s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5); + s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5); + s2_f5 = vext_u8(vget_low_u8(s2), vget_high_u8(s2), 5); + s3_f5 = vext_u8(vget_low_u8(s3), vget_high_u8(s3), 5); + + s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1)); + s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3)); + + s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5)); + s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5)); + d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5); + d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5); + + // Keep original src data as 64 bits to simplify shifting and extracting. + s01 = vreinterpretq_u64_u8(s01_f0); + s23 = vreinterpretq_u64_u8(s23_f0); + + // 3 4 5 6 * filter0 + filter_add_accumulate(s01_f0, s23_f0, filter0, &d0123, &d4567); + + // Shift over one to use -1, 0, 1, 2 for filter1 + // -1 0 1 2 * filter1 + filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 8)), + vreinterpretq_u8_u64(vshrq_n_u64(s23, 8)), filter1, + &d0123, &d4567); + + // 2 3 4 5 * filter4 + filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 32)), + vreinterpretq_u8_u64(vshrq_n_u64(s23, 32)), filter4, + &d0123, &d4567); + + // 0 1 2 3 * filter2 + filter_add_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 16)), + vreinterpretq_u8_u64(vshrq_n_u64(s23, 16)), filter2, + &d0123, &d4567); + + // 1 2 3 4 * filter3 + s01_f3 = vshrq_n_u64(s01, 24); + s23_f3 = vshrq_n_u64(s23, 24); + s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), + vreinterpret_u32_u64(vget_high_u64(s01_f3))); + s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), + vreinterpret_u32_u64(vget_high_u64(s23_f3))); + // Accumulate into different registers so it can use saturated addition. + d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3); + d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3); + + d0123 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(d0123), vreinterpretq_s16_u16(d0123_a))); + d4567 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(d4567), vreinterpretq_s16_u16(d4567_a))); + + // Shift and narrow. + b0 = vqrshrn_n_u16(d0123, 7); + b2 = vqrshrn_n_u16(d4567, 7); + + if (yoffset == 0) { // firstpass_filter4x4_only + store4x4(dst_ptr, dst_pitch, b0, b2); + return; + } + + // Load additional context when doing both filters. + s0 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s1 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s2 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s3 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + s4 = vld1q_u8(src_ptr); + + s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5); + s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5); + s2_f5 = vext_u8(vget_low_u8(s2), vget_high_u8(s2), 5); + s3_f5 = vext_u8(vget_low_u8(s3), vget_high_u8(s3), 5); + s4_f5 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 5); + + // 3 4 5 6 * filter0 + s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1)); + s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3)); + + s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5)); + s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5)); + // But this time instead of 16 pixels to filter, there are 20. So an extra + // run with a doubleword register. + d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5); + d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5); + d89 = vmull_u8(s4_f5, filter5); + + // Save a copy as u64 for shifting. + s01 = vreinterpretq_u64_u8(s01_f0); + s23 = vreinterpretq_u64_u8(s23_f0); + + filter_add_accumulate(s01_f0, s23_f0, filter0, &d0123, &d4567); + d89 = vmlal_u8(d89, vget_low_u8(s4), filter0); + + filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 8)), + vreinterpretq_u8_u64(vshrq_n_u64(s23, 8)), filter1, + &d0123, &d4567); + s4_f1 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 1); + d89 = vmlsl_u8(d89, s4_f1, filter1); + + filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 32)), + vreinterpretq_u8_u64(vshrq_n_u64(s23, 32)), filter4, + &d0123, &d4567); + s4_f4 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 4); + d89 = vmlsl_u8(d89, s4_f4, filter4); + + filter_add_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 16)), + vreinterpretq_u8_u64(vshrq_n_u64(s23, 16)), filter2, + &d0123, &d4567); + s4_f2 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 2); + d89 = vmlal_u8(d89, s4_f2, filter2); + + s01_f3 = vshrq_n_u64(s01, 24); + s23_f3 = vshrq_n_u64(s23, 24); + s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), + vreinterpret_u32_u64(vget_high_u64(s01_f3))); + s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), + vreinterpret_u32_u64(vget_high_u64(s23_f3))); + s4_f3 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 3); + d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3); + d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3); + d89_a = vmull_u8(s4_f3, filter3); + + d0123 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(d0123), vreinterpretq_s16_u16(d0123_a))); + d4567 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(d4567), vreinterpretq_s16_u16(d4567_a))); + d89 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(d89), vreinterpretq_s16_u16(d89_a))); + + b4 = vqrshrn_n_u16(d0123, 7); + b6 = vqrshrn_n_u16(d4567, 7); + b8 = vqrshrn_n_u16(d89, 7); + + // Second pass: 4x4 + filter = vld1_u8(abs_filters[yoffset]); + filter0 = vdup_lane_u8(filter, 0); + filter1 = vdup_lane_u8(filter, 1); + filter2 = vdup_lane_u8(filter, 2); + filter3 = vdup_lane_u8(filter, 3); + filter4 = vdup_lane_u8(filter, 4); + filter5 = vdup_lane_u8(filter, 5); + + b1 = vext_u8(b0, b2, 4); + b3 = vext_u8(b2, b4, 4); + b5 = vext_u8(b4, b6, 4); + b7 = vext_u8(b6, b8, 4); + + c0 = vmull_u8(b0, filter0); + c1 = vmull_u8(b2, filter0); + c2 = vmull_u8(b5, filter5); + c3 = vmull_u8(b7, filter5); + + c0 = vmlsl_u8(c0, b4, filter4); + c1 = vmlsl_u8(c1, b6, filter4); + c2 = vmlsl_u8(c2, b1, filter1); + c3 = vmlsl_u8(c3, b3, filter1); + + c0 = vmlal_u8(c0, b2, filter2); + c1 = vmlal_u8(c1, b4, filter2); + c2 = vmlal_u8(c2, b3, filter3); + c3 = vmlal_u8(c3, b5, filter3); + + c0 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(c2), vreinterpretq_s16_u16(c0))); + c1 = vreinterpretq_u16_s16( + vqaddq_s16(vreinterpretq_s16_u16(c3), vreinterpretq_s16_u16(c1))); + + d0 = vqrshrn_n_u16(c0, 7); + d1 = vqrshrn_n_u16(c1, 7); + + store4x4(dst_ptr, dst_pitch, d0, d1); +} + void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch) { |