diff options
author | Diego Biurrun <diego@biurrun.de> | 2014-01-24 01:41:12 -0800 |
---|---|---|
committer | Diego Biurrun <diego@biurrun.de> | 2014-06-23 09:58:17 -0700 |
commit | fab9df63a3156ffe1f9490aafaea41e03ef60ddf (patch) | |
tree | f7f363e0676f471c373dc8aa90dbc6b3071065b7 /libavcodec/x86 | |
parent | f23d26a6864128001b03876b0b92fffe131f2060 (diff) | |
download | ffmpeg-fab9df63a3156ffe1f9490aafaea41e03ef60ddf.tar.gz |
dsputil: Split off global motion compensation bits into a separate context
Diffstat (limited to 'libavcodec/x86')
-rw-r--r-- | libavcodec/x86/Makefile | 3 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_init.c | 2 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_mmx.c | 114 | ||||
-rw-r--r-- | libavcodec/x86/dsputil_x86.h | 5 | ||||
-rw-r--r-- | libavcodec/x86/mpegvideodsp.c | 153 |
5 files changed, 155 insertions, 122 deletions
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index 587ff39bf4..13f9affdb2 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -20,7 +20,8 @@ OBJS-$(CONFIG_HUFFYUVDSP) += x86/huffyuvdsp_init.o OBJS-$(CONFIG_HUFFYUVENCDSP) += x86/huffyuvencdsp_mmx.o OBJS-$(CONFIG_LPC) += x86/lpc.o OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodsp.o -OBJS-$(CONFIG_MPEGVIDEO) += x86/mpegvideo.o +OBJS-$(CONFIG_MPEGVIDEO) += x86/mpegvideo.o \ + x86/mpegvideodsp.o OBJS-$(CONFIG_MPEGVIDEOENC) += x86/mpegvideoenc.o OBJS-$(CONFIG_QPELDSP) += x86/qpeldsp_init.o OBJS-$(CONFIG_VIDEODSP) += x86/videodsp_init.o diff --git a/libavcodec/x86/dsputil_init.c b/libavcodec/x86/dsputil_init.c index e69db8e9f0..90e62715db 100644 --- a/libavcodec/x86/dsputil_init.c +++ b/libavcodec/x86/dsputil_init.c @@ -52,8 +52,6 @@ static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, break; } } - - c->gmc = ff_gmc_mmx; #endif /* HAVE_MMX_INLINE */ } diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index fe43804428..5fa047da7b 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -260,118 +260,4 @@ void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, } } -void ff_gmc_mmx(uint8_t *dst, uint8_t *src, - int stride, int h, int ox, int oy, - int dxx, int dxy, int dyx, int dyy, - int shift, int r, int width, int height) -{ - const int w = 8; - const int ix = ox >> (16 + shift); - const int iy = oy >> (16 + shift); - const int oxs = ox >> 4; - const int oys = oy >> 4; - const int dxxs = dxx >> 4; - const int dxys = dxy >> 4; - const int dyxs = dyx >> 4; - const int dyys = dyy >> 4; - const uint16_t r4[4] = { r, r, r, r }; - const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys }; - const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys }; - const uint64_t shift2 = 2 * shift; - int x, y; - - const int dxw = (dxx - (1 << (16 + shift))) * (w - 1); - const int dyh = (dyy - (1 << (16 + shift))) * (h - 1); - const int dxh = dxy * (h - 1); - const int dyw = dyx * (w - 1); - - if ( // non-constant fullpel offset (3% of blocks) - ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) | - (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) || - // uses more than 16 bits of subpel mv (only at huge resolution) - (dxx | dxy | dyx | dyy) & 15 || - (unsigned) ix >= width - w || - (unsigned) iy >= height - h) { - // FIXME could still use mmx for some of the rows - ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, - shift, r, width, height); - return; - } - - src += ix + iy * stride; - - __asm__ volatile ( - "movd %0, %%mm6 \n\t" - "pxor %%mm7, %%mm7 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - :: "r" (1 << shift)); - - for (x = 0; x < w; x += 4) { - uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0), - oxs - dxys + dxxs * (x + 1), - oxs - dxys + dxxs * (x + 2), - oxs - dxys + dxxs * (x + 3) }; - uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0), - oys - dyys + dyxs * (x + 1), - oys - dyys + dyxs * (x + 2), - oys - dyys + dyxs * (x + 3) }; - - for (y = 0; y < h; y++) { - __asm__ volatile ( - "movq %0, %%mm4 \n\t" - "movq %1, %%mm5 \n\t" - "paddw %2, %%mm4 \n\t" - "paddw %3, %%mm5 \n\t" - "movq %%mm4, %0 \n\t" - "movq %%mm5, %1 \n\t" - "psrlw $12, %%mm4 \n\t" - "psrlw $12, %%mm5 \n\t" - : "+m" (*dx4), "+m" (*dy4) - : "m" (*dxy4), "m" (*dyy4)); - - __asm__ volatile ( - "movq %%mm6, %%mm2 \n\t" - "movq %%mm6, %%mm1 \n\t" - "psubw %%mm4, %%mm2 \n\t" - "psubw %%mm5, %%mm1 \n\t" - "movq %%mm2, %%mm0 \n\t" - "movq %%mm4, %%mm3 \n\t" - "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy) - "pmullw %%mm5, %%mm3 \n\t" // dx * dy - "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy - "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy) - - "movd %4, %%mm5 \n\t" - "movd %3, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy - "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy - - "movd %2, %%mm5 \n\t" - "movd %1, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy) - "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy) - "paddw %5, %%mm1 \n\t" - "paddw %%mm3, %%mm2 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm2, %%mm0 \n\t" - - "psrlw %6, %%mm0 \n\t" - "packuswb %%mm0, %%mm0 \n\t" - "movd %%mm0, %0 \n\t" - - : "=m" (dst[x + y * stride]) - : "m" (src[0]), "m" (src[1]), - "m" (src[stride]), "m" (src[stride + 1]), - "m" (*r4), "m" (shift2)); - src += stride; - } - src += 4 - h * stride; - } -} - #endif /* HAVE_INLINE_ASM */ diff --git a/libavcodec/x86/dsputil_x86.h b/libavcodec/x86/dsputil_x86.h index eeb9ca6114..4beb6c11ca 100644 --- a/libavcodec/x86/dsputil_x86.h +++ b/libavcodec/x86/dsputil_x86.h @@ -41,9 +41,4 @@ void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides); -void ff_gmc_mmx(uint8_t *dst, uint8_t *src, - int stride, int h, int ox, int oy, - int dxx, int dxy, int dyx, int dyy, - int shift, int r, int width, int height); - #endif /* AVCODEC_X86_DSPUTIL_X86_H */ diff --git a/libavcodec/x86/mpegvideodsp.c b/libavcodec/x86/mpegvideodsp.c new file mode 100644 index 0000000000..0e5dd0f153 --- /dev/null +++ b/libavcodec/x86/mpegvideodsp.c @@ -0,0 +1,153 @@ +/* + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include "libavutil/attributes.h" +#include "libavutil/cpu.h" +#include "libavutil/x86/asm.h" +#include "libavutil/x86/cpu.h" +#include "libavcodec/mpegvideodsp.h" + +#if HAVE_INLINE_ASM + +static void gmc_mmx(uint8_t *dst, uint8_t *src, + int stride, int h, int ox, int oy, + int dxx, int dxy, int dyx, int dyy, + int shift, int r, int width, int height) +{ + const int w = 8; + const int ix = ox >> (16 + shift); + const int iy = oy >> (16 + shift); + const int oxs = ox >> 4; + const int oys = oy >> 4; + const int dxxs = dxx >> 4; + const int dxys = dxy >> 4; + const int dyxs = dyx >> 4; + const int dyys = dyy >> 4; + const uint16_t r4[4] = { r, r, r, r }; + const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys }; + const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys }; + const uint64_t shift2 = 2 * shift; + int x, y; + + const int dxw = (dxx - (1 << (16 + shift))) * (w - 1); + const int dyh = (dyy - (1 << (16 + shift))) * (h - 1); + const int dxh = dxy * (h - 1); + const int dyw = dyx * (w - 1); + + if ( // non-constant fullpel offset (3% of blocks) + ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) | + (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) || + // uses more than 16 bits of subpel mv (only at huge resolution) + (dxx | dxy | dyx | dyy) & 15 || + (unsigned) ix >= width - w || + (unsigned) iy >= height - h) { + // FIXME could still use mmx for some of the rows + ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, + shift, r, width, height); + return; + } + + src += ix + iy * stride; + + __asm__ volatile ( + "movd %0, %%mm6 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + :: "r" (1 << shift)); + + for (x = 0; x < w; x += 4) { + uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0), + oxs - dxys + dxxs * (x + 1), + oxs - dxys + dxxs * (x + 2), + oxs - dxys + dxxs * (x + 3) }; + uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0), + oys - dyys + dyxs * (x + 1), + oys - dyys + dyxs * (x + 2), + oys - dyys + dyxs * (x + 3) }; + + for (y = 0; y < h; y++) { + __asm__ volatile ( + "movq %0, %%mm4 \n\t" + "movq %1, %%mm5 \n\t" + "paddw %2, %%mm4 \n\t" + "paddw %3, %%mm5 \n\t" + "movq %%mm4, %0 \n\t" + "movq %%mm5, %1 \n\t" + "psrlw $12, %%mm4 \n\t" + "psrlw $12, %%mm5 \n\t" + : "+m" (*dx4), "+m" (*dy4) + : "m" (*dxy4), "m" (*dyy4)); + + __asm__ volatile ( + "movq %%mm6, %%mm2 \n\t" + "movq %%mm6, %%mm1 \n\t" + "psubw %%mm4, %%mm2 \n\t" + "psubw %%mm5, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm4, %%mm3 \n\t" + "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy) + "pmullw %%mm5, %%mm3 \n\t" // dx * dy + "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy + "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy) + + "movd %4, %%mm5 \n\t" + "movd %3, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy + "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy + + "movd %2, %%mm5 \n\t" + "movd %1, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy) + "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy) + "paddw %5, %%mm1 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm2, %%mm0 \n\t" + + "psrlw %6, %%mm0 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "movd %%mm0, %0 \n\t" + + : "=m" (dst[x + y * stride]) + : "m" (src[0]), "m" (src[1]), + "m" (src[stride]), "m" (src[stride + 1]), + "m" (*r4), "m" (shift2)); + src += stride; + } + src += 4 - h * stride; + } +} + +#endif /* HAVE_INLINE_ASM */ + +av_cold void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c) +{ +#if HAVE_INLINE_ASM + int cpu_flags = av_get_cpu_flags(); + + if (INLINE_MMX(cpu_flags)) + c->gmc = gmc_mmx; +#endif /* HAVE_INLINE_ASM */ +} + |