diff options
author | Shiyou Yin <yinshiyou-hf@loongson.cn> | 2019-01-21 18:10:24 +0800 |
---|---|---|
committer | Michael Niedermayer <michael@niedermayer.cc> | 2019-01-22 00:46:36 +0100 |
commit | 6d1916481137a9608d95fc3aa8f3d874b7aaea3c (patch) | |
tree | 04914f3e1ea9a920e5d1d262c063e4502fa1b663 /libavcodec/mips | |
parent | 8133921ad2d80b8b2fe4e0bf6e0abd40afe3e235 (diff) | |
download | ffmpeg-6d1916481137a9608d95fc3aa8f3d874b7aaea3c.tar.gz |
avcodec/mips: [loongson] optimize put_hevc_qpel_hv_8 with mmi.
Optimize put_hevc_qpel_hv_8 with mmi in the case width=4/8/12/16/24/32/48/64.
This optimization improved HEVC decoding performance 11%(1.81x to 2.01x, tested on loongson 3A3000).
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
Diffstat (limited to 'libavcodec/mips')
-rw-r--r-- | libavcodec/mips/hevcdsp_init_mips.c | 9 | ||||
-rw-r--r-- | libavcodec/mips/hevcdsp_mips.h | 37 | ||||
-rw-r--r-- | libavcodec/mips/hevcdsp_mmi.c | 195 |
3 files changed, 231 insertions, 10 deletions
diff --git a/libavcodec/mips/hevcdsp_init_mips.c b/libavcodec/mips/hevcdsp_init_mips.c index 41c9001ed9..e5e0588e5c 100644 --- a/libavcodec/mips/hevcdsp_init_mips.c +++ b/libavcodec/mips/hevcdsp_init_mips.c @@ -25,6 +25,15 @@ static av_cold void hevc_dsp_init_mmi(HEVCDSPContext *c, const int bit_depth) { if (8 == bit_depth) { + c->put_hevc_qpel[1][1][1] = ff_hevc_put_hevc_qpel_hv4_8_mmi; + c->put_hevc_qpel[3][1][1] = ff_hevc_put_hevc_qpel_hv8_8_mmi; + c->put_hevc_qpel[4][1][1] = ff_hevc_put_hevc_qpel_hv12_8_mmi; + c->put_hevc_qpel[5][1][1] = ff_hevc_put_hevc_qpel_hv16_8_mmi; + c->put_hevc_qpel[6][1][1] = ff_hevc_put_hevc_qpel_hv24_8_mmi; + c->put_hevc_qpel[7][1][1] = ff_hevc_put_hevc_qpel_hv32_8_mmi; + c->put_hevc_qpel[8][1][1] = ff_hevc_put_hevc_qpel_hv48_8_mmi; + c->put_hevc_qpel[9][1][1] = ff_hevc_put_hevc_qpel_hv64_8_mmi; + c->put_hevc_qpel_bi[3][0][0] = ff_hevc_put_hevc_pel_bi_pixels8_8_mmi; c->put_hevc_qpel_bi[5][0][0] = ff_hevc_put_hevc_pel_bi_pixels16_8_mmi; c->put_hevc_qpel_bi[6][0][0] = ff_hevc_put_hevc_pel_bi_pixels24_8_mmi; diff --git a/libavcodec/mips/hevcdsp_mips.h b/libavcodec/mips/hevcdsp_mips.h index ff9401c2ca..2351c9b7c3 100644 --- a/libavcodec/mips/hevcdsp_mips.h +++ b/libavcodec/mips/hevcdsp_mips.h @@ -480,16 +480,33 @@ void ff_hevc_addblk_32x32_msa(uint8_t *dst, int16_t *pi16Coeffs, void ff_hevc_idct_luma_4x4_msa(int16_t *pi16Coeffs); /* Loongson optimization */ -#define L_BI_MC(PEL, DIR, WIDTH, TYPE) \ -void ff_hevc_put_hevc_##PEL##_bi_##DIR##WIDTH##_8_##TYPE(uint8_t *dst, \ - ptrdiff_t dst_stride, \ - uint8_t *src, \ - ptrdiff_t src_stride, \ - int16_t *src_16bit, \ - int height, \ - intptr_t mx, \ - intptr_t my, \ - int width) +#define L_MC(PEL, DIR, WIDTH, TYPE) \ +void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_##TYPE(int16_t *dst, \ + uint8_t *src, \ + ptrdiff_t src_stride, \ + int height, \ + intptr_t mx, \ + intptr_t my, \ + int width) +L_MC(qpel, hv, 4, mmi); +L_MC(qpel, hv, 8, mmi); +L_MC(qpel, hv, 12, mmi); +L_MC(qpel, hv, 16, mmi); +L_MC(qpel, hv, 24, mmi); +L_MC(qpel, hv, 32, mmi); +L_MC(qpel, hv, 48, mmi); +L_MC(qpel, hv, 64, mmi); + +#define L_BI_MC(PEL, DIR, WIDTH, TYPE) \ +void ff_hevc_put_hevc_##PEL##_bi_##DIR##WIDTH##_8_##TYPE(uint8_t *dst, \ + ptrdiff_t dst_stride, \ + uint8_t *src, \ + ptrdiff_t src_stride, \ + int16_t *src_16bit, \ + int height, \ + intptr_t mx, \ + intptr_t my, \ + int width) L_BI_MC(pel, pixels, 8, mmi); L_BI_MC(pel, pixels, 16, mmi); diff --git a/libavcodec/mips/hevcdsp_mmi.c b/libavcodec/mips/hevcdsp_mmi.c index 60b9c189b5..e776a13d5c 100644 --- a/libavcodec/mips/hevcdsp_mmi.c +++ b/libavcodec/mips/hevcdsp_mmi.c @@ -18,10 +18,205 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavcodec/hevcdec.h" #include "libavcodec/bit_depth_template.c" #include "libavcodec/mips/hevcdsp_mips.h" #include "libavutil/mips/mmiutils.h" +#define PUT_HEVC_QPEL_HV(w, x_step, src_step, dst_step) \ +void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \ + ptrdiff_t _srcstride, \ + int height, intptr_t mx, \ + intptr_t my, int width) \ +{ \ + int x, y; \ + const int8_t *filter; \ + pixel *src = (pixel*)_src; \ + ptrdiff_t srcstride = _srcstride / sizeof(pixel); \ + int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE]; \ + int16_t *tmp = tmp_array; \ + uint64_t ftmp[15]; \ + uint64_t rtmp[1]; \ + \ + src -= (QPEL_EXTRA_BEFORE * srcstride + 3); \ + filter = ff_hevc_qpel_filters[mx - 1]; \ + x = x_step; \ + y = height + QPEL_EXTRA; \ + __asm__ volatile( \ + MMI_LDC1(%[ftmp1], %[filter], 0x00) \ + "li %[rtmp0], 0x08 \n\t" \ + "dmtc1 %[rtmp0], %[ftmp0] \n\t" \ + "punpckhbh %[ftmp2], %[ftmp0], %[ftmp1] \n\t" \ + "punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \ + "psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \ + "psrah %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \ + "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \ + \ + "1: \n\t" \ + "2: \n\t" \ + "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \ + "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \ + "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \ + "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \ + "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \ + "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \ + "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \ + "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \ + "punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \ + "punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \ + "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \ + "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \ + "paddh %[ftmp3], %[ftmp7], %[ftmp8] \n\t" \ + "punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t" \ + "punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t" \ + "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \ + "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \ + "paddh %[ftmp4], %[ftmp7], %[ftmp8] \n\t" \ + "punpcklbh %[ftmp7], %[ftmp5], %[ftmp0] \n\t" \ + "punpckhbh %[ftmp8], %[ftmp5], %[ftmp0] \n\t" \ + "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \ + "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \ + "paddh %[ftmp5], %[ftmp7], %[ftmp8] \n\t" \ + "punpcklbh %[ftmp7], %[ftmp6], %[ftmp0] \n\t" \ + "punpckhbh %[ftmp8], %[ftmp6], %[ftmp0] \n\t" \ + "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \ + "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \ + "paddh %[ftmp6], %[ftmp7], %[ftmp8] \n\t" \ + TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \ + %[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10]) \ + "paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \ + "paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \ + "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \ + "gssdlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \ + "gssdrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \ + \ + "daddi %[x], %[x], -0x01 \n\t" \ + PTR_ADDIU "%[src], %[src], 0x04 \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \ + "bnez %[x], 2b \n\t" \ + \ + "daddi %[y], %[y], -0x01 \n\t" \ + "li %[x], " #x_step " \n\t" \ + PTR_ADDIU "%[src], %[src], " #src_step " \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], " #dst_step " \n\t" \ + PTR_ADDU "%[src], %[src], %[stride] \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "bnez %[y], 1b \n\t" \ + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \ + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \ + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \ + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \ + [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \ + [ftmp10]"=&f"(ftmp[10]), [rtmp0]"=&r"(rtmp[0]), \ + [src]"+&r"(src), [tmp]"+&r"(tmp), [y]"+&r"(y), \ + [x]"+&r"(x) \ + : [filter]"r"(filter), [stride]"r"(srcstride) \ + : "memory" \ + ); \ + \ + tmp = tmp_array + QPEL_EXTRA_BEFORE * 4 -12; \ + filter = ff_hevc_qpel_filters[my - 1]; \ + x = x_step; \ + y = height; \ + __asm__ volatile( \ + MMI_LDC1(%[ftmp1], %[filter], 0x00) \ + "li %[rtmp0], 0x08 \n\t" \ + "dmtc1 %[rtmp0], %[ftmp0] \n\t" \ + "punpckhbh %[ftmp2], %[ftmp0], %[ftmp1] \n\t" \ + "punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \ + "psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \ + "psrah %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \ + "li %[rtmp0], 0x06 \n\t" \ + "dmtc1 %[rtmp0], %[ftmp0] \n\t" \ + \ + "1: \n\t" \ + "2: \n\t" \ + "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp7], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp7], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp8], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp8], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp9], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp9], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "gsldlc1 %[ftmp10], 0x07(%[tmp]) \n\t" \ + "gsldrc1 %[ftmp10], 0x00(%[tmp]) \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], -0x380 \n\t" \ + TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \ + %[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \ + TRANSPOSE_4H(%[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10], \ + %[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \ + "pmaddhw %[ftmp11], %[ftmp3], %[ftmp1] \n\t" \ + "pmaddhw %[ftmp12], %[ftmp7], %[ftmp2] \n\t" \ + "pmaddhw %[ftmp13], %[ftmp4], %[ftmp1] \n\t" \ + "pmaddhw %[ftmp14], %[ftmp8], %[ftmp2] \n\t" \ + "paddw %[ftmp11], %[ftmp11], %[ftmp12] \n\t" \ + "paddw %[ftmp13], %[ftmp13], %[ftmp14] \n\t" \ + TRANSPOSE_2W(%[ftmp11], %[ftmp13], %[ftmp3], %[ftmp4]) \ + "paddw %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \ + "psraw %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \ + "pmaddhw %[ftmp11], %[ftmp5], %[ftmp1] \n\t" \ + "pmaddhw %[ftmp12], %[ftmp9], %[ftmp2] \n\t" \ + "pmaddhw %[ftmp13], %[ftmp6], %[ftmp1] \n\t" \ + "pmaddhw %[ftmp14], %[ftmp10], %[ftmp2] \n\t" \ + "paddw %[ftmp11], %[ftmp11], %[ftmp12] \n\t" \ + "paddw %[ftmp13], %[ftmp13], %[ftmp14] \n\t" \ + TRANSPOSE_2W(%[ftmp11], %[ftmp13], %[ftmp5], %[ftmp6]) \ + "paddw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \ + "psraw %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \ + "packsswh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \ + "gssdlc1 %[ftmp3], 0x07(%[dst]) \n\t" \ + "gssdrc1 %[ftmp3], 0x00(%[dst]) \n\t" \ + \ + "daddi %[x], %[x], -0x01 \n\t" \ + PTR_ADDIU "%[dst], %[dst], 0x08 \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \ + "bnez %[x], 2b \n\t" \ + \ + "daddi %[y], %[y], -0x01 \n\t" \ + "li %[x], " #x_step " \n\t" \ + PTR_ADDIU "%[dst], %[dst], " #dst_step " \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], " #dst_step " \n\t" \ + PTR_ADDIU "%[dst], %[dst], 0x80 \n\t" \ + PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \ + "bnez %[y], 1b \n\t" \ + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \ + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \ + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \ + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \ + [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \ + [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]), \ + [ftmp12]"=&f"(ftmp[12]), [ftmp13]"=&f"(ftmp[13]), \ + [ftmp14]"=&f"(ftmp[14]), [rtmp0]"=&r"(rtmp[0]), \ + [dst]"+&r"(dst), [tmp]"+&r"(tmp), [y]"+&r"(y), \ + [x]"+&r"(x) \ + : [filter]"r"(filter), [stride]"r"(srcstride) \ + : "memory" \ + ); \ +} + +PUT_HEVC_QPEL_HV(4, 1, -4, -8); +PUT_HEVC_QPEL_HV(8, 2, -8, -16); +PUT_HEVC_QPEL_HV(12, 3, -12, -24); +PUT_HEVC_QPEL_HV(16, 4, -16, -32); +PUT_HEVC_QPEL_HV(24, 6, -24, -48); +PUT_HEVC_QPEL_HV(32, 8, -32, -64); +PUT_HEVC_QPEL_HV(48, 12, -48, -96); +PUT_HEVC_QPEL_HV(64, 16, -64, -128); + #define PUT_HEVC_PEL_BI_PIXELS(w, x_step, src_step, dst_step, src2_step) \ void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \ ptrdiff_t _dststride, \ |