summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCedric BAIL <cedric@osg.samsung.com>2015-04-28 23:40:04 +0200
committerCedric BAIL <cedric@osg.samsung.com>2015-05-07 09:53:11 +0200
commitad1076525a519ccc53be8a2a6992c870b857bf4a (patch)
tree1e67af59a759daed5468af5905e83d8caf6d1125
parent2c2983aadb8cd7351d821c0a5b01efafd445665d (diff)
downloadefl-ad1076525a519ccc53be8a2a6992c870b857bf4a.tar.gz
evas: implement _op_blend_rel_mas_c_dp_neon using NEON intrinsics
Summary: NEON intrinsics can be built both for armv7 and armv8. Reviewers: raster, cedric Reviewed By: cedric Subscribers: cedric Projects: #efl Differential Revision: https://phab.enlightenment.org/D2442 Signed-off-by: Cedric BAIL <cedric@osg.samsung.com>
-rw-r--r--src/lib/evas/common/evas_op_blend/op_blend_mask_color_neon.c149
1 files changed, 136 insertions, 13 deletions
diff --git a/src/lib/evas/common/evas_op_blend/op_blend_mask_color_neon.c b/src/lib/evas/common/evas_op_blend/op_blend_mask_color_neon.c
index e492bb057b..2c0fad78ec 100644
--- a/src/lib/evas/common/evas_op_blend/op_blend_mask_color_neon.c
+++ b/src/lib/evas/common/evas_op_blend/op_blend_mask_color_neon.c
@@ -1,8 +1,6 @@
#ifdef BUILD_NEON
-#ifdef BUILD_NEON_INTRINSICS
#include <arm_neon.h>
#endif
-#endif
#define NEONDEBUG 0
@@ -689,19 +687,144 @@ init_blend_mask_color_pt_funcs_neon(void)
#ifdef BUILD_NEON
static void
_op_blend_rel_mas_c_dp_neon(DATA32 *s EINA_UNUSED, DATA8 *m, DATA32 c, DATA32 *d, int l) {
- DATA32 *e;
- int alpha;
+ uint16x8_t dc0_16x8;
+ uint16x8_t dc1_16x8;
+ uint16x8_t m_16x8;
+ uint16x8_t mc0_16x8;
+ uint16x8_t mc1_16x8;
+ uint16x8_t temp0_16x8;
+ uint16x8_t temp1_16x8;
+ uint16x8_t x255_16x8;
+ uint32x2_t c_32x2;
+ uint32x2_t m_32x2;
+ uint32x4_t a_32x4;
+ uint32x4_t ad_32x4;
+ uint32x4_t cond_32x4;
+ uint32x4_t d_32x4;
+ uint32x4_t dc_32x4;
+ uint32x4_t m_32x4;
+ uint32x4_t temp_32x4;
+ uint32x4_t x0_32x4;
+ uint32x4_t x1_32x4;
+ uint8x16_t a_8x16;
+ uint8x16_t d_8x16;
+ uint8x16_t dc_8x16;
+ uint8x16_t m_8x16;
+ uint8x16_t mc_8x16;
+ uint8x16_t temp_8x16;
+ uint8x16_t x0_8x16;
+ uint8x16_t x1_8x16;
+ uint8x8_t a0_8x8;
+ uint8x8_t a1_8x8;
+ uint8x8_t c_8x8;
+ uint8x8_t d0_8x8;
+ uint8x8_t d1_8x8;
+ uint8x8_t dc0_8x8;
+ uint8x8_t dc1_8x8;
+ uint8x8_t m0_8x8;
+ uint8x8_t m1_8x8;
+ uint8x8_t m_8x8;
+ uint8x8_t mc0_8x8;
+ uint8x8_t mc1_8x8;
+ uint8x8_t temp0_8x8;
+ uint8x8_t temp1_8x8;
- DEBUG_FNCOUNT("not");
+ c_32x2 = vdup_n_u32(c);
+ c_8x8 = vreinterpret_u8_u32(c_32x2);
+ x1_8x16 = vdupq_n_u8(0x1);
+ x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
+ x255_16x8 = vdupq_n_u16(0xff);
+ x0_8x16 = vdupq_n_u8(0x0);
+ x0_32x4 = vreinterpretq_u32_u8(x0_8x16);
- UNROLL8_PLD_WHILE(d, l, e,
- {
- DATA32 mc = MUL_SYM(*m, c);
- alpha = 256 - (mc >> 24);
- *d = MUL_SYM(*d >> 24, mc) + MUL_256(alpha, *d);
- d++;
- m++;
- });
+ DATA32 *end = d + (l & ~3);
+ while (d < end)
+ {
+ // load 4 elements from d
+ d_32x4 = vld1q_u32(d);
+ d_8x16 = vreinterpretq_u8_u32(d_32x4);
+ d0_8x8 = vget_low_u8(d_8x16);
+ d1_8x8 = vget_high_u8(d_8x16);
+
+ // load 4 elements from m
+ m_32x2 = vld1_lane_u32((DATA32*)m, m_32x2, 0);
+ m_8x8 = vreinterpret_u8_u32(m_32x2);
+ m_16x8 = vmovl_u8(m_8x8);
+ m_8x16 = vreinterpretq_u8_u16(m_16x8);
+ m_8x8 = vget_low_u8(m_8x16);
+ m_16x8 = vmovl_u8(m_8x8);
+ m_32x4 = vreinterpretq_u32_u16(m_16x8);
+
+ m_32x4 = vmulq_u32(m_32x4, x1_32x4);
+ m_8x16 = vreinterpretq_u8_u32(m_32x4);
+ m0_8x8 = vget_low_u8(m_8x16);
+ m1_8x8 = vget_high_u8(m_8x16);
+
+ // multiply MUL_SYM(*m, c)
+ mc0_16x8 = vmull_u8(m0_8x8, c_8x8);
+ mc1_16x8 = vmull_u8(m1_8x8, c_8x8);
+ mc0_16x8 = vaddq_u16(mc0_16x8, x255_16x8);
+ mc1_16x8 = vaddq_u16(mc1_16x8, x255_16x8);
+ mc0_8x8 = vshrn_n_u16(mc0_16x8, 8);
+ mc1_8x8 = vshrn_n_u16(mc1_16x8, 8);
+ mc_8x16 = vcombine_u8(mc0_8x8, mc1_8x8);
+
+ // calculate alpha = 256 - (mc >> 24)
+ a_8x16 = vsubq_u8(x0_8x16, mc_8x16);
+ a_32x4 = vreinterpretq_u32_u8(a_8x16);
+ a_32x4 = vshrq_n_u32(a_32x4, 24);
+ a_32x4 = vmulq_u32(a_32x4, x1_32x4);
+ a_8x16 = vreinterpretq_u8_u32(a_32x4);
+ a0_8x8 = vget_low_u8(a_8x16);
+ a1_8x8 = vget_high_u8(a_8x16);
+
+ // multiply MUL_256(alpha, *d)
+ temp0_16x8 = vmull_u8(a0_8x8, d0_8x8);
+ temp1_16x8 = vmull_u8(a1_8x8, d1_8x8);
+ temp0_8x8 = vshrn_n_u16(temp0_16x8,8);
+ temp1_8x8 = vshrn_n_u16(temp1_16x8,8);
+ temp_8x16 = vcombine_u8(temp0_8x8, temp1_8x8);
+ temp_32x4 = vreinterpretq_u32_u8(temp_8x16);
+
+ // select d where alpha == 0
+ cond_32x4 = vceqq_u32(a_32x4, x0_32x4);
+ ad_32x4 = vbslq_u32(cond_32x4, d_32x4, temp_32x4);
+
+ // shift (*d >> 24)
+ dc_32x4 = vshrq_n_u32(d_32x4, 24);
+ dc_32x4 = vmulq_u32(x1_32x4, dc_32x4);
+ dc_8x16 = vreinterpretq_u8_u32(dc_32x4);
+ dc0_8x8 = vget_low_u8(dc_8x16);
+ dc1_8x8 = vget_high_u8(dc_8x16);
+
+ // multiply MUL_256(*d >> 24, sc);
+ dc0_16x8 = vmull_u8(dc0_8x8, mc0_8x8);
+ dc1_16x8 = vmull_u8(dc1_8x8, mc1_8x8);
+ dc0_16x8 = vaddq_u16(dc0_16x8, x255_16x8);
+ dc1_16x8 = vaddq_u16(dc1_16x8, x255_16x8);
+ dc0_8x8 = vshrn_n_u16(dc0_16x8, 8);
+ dc1_8x8 = vshrn_n_u16(dc1_16x8, 8);
+ dc_8x16 = vcombine_u8(dc0_8x8, dc1_8x8);
+
+ // add up everything
+ dc_32x4 = vreinterpretq_u32_u8(dc_8x16);
+ d_32x4 = vaddq_u32(dc_32x4, ad_32x4);
+
+ // save result
+ vst1q_u32(d, d_32x4);
+ d+=4;
+ m+=4;
+ }
+
+ end += (l & 3);
+ while (d < end)
+ {
+ DATA32 mc = MUL_SYM(*m, c);
+ int alpha = 256 - (mc >> 24);
+ *d = MUL_SYM(*d >> 24, mc) + MUL_256(alpha, *d);
+ d++;
+ m++;
+ }
}
#define _op_blend_rel_mas_cn_dp_neon _op_blend_rel_mas_c_dp_neon