summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/h264.c542
-rw-r--r--libavcodec/h264_mb_template.c379
-rw-r--r--libavcodec/h264_mc_template.c160
-rw-r--r--libavcodec/mpegvideo_common.h3
-rw-r--r--libavcodec/qdm2.c48
-rw-r--r--libavcodec/x86/h264_intrapred.asm290
-rw-r--r--libavcodec/x86/h264_intrapred_init.c24
-rw-r--r--libavcodec/x86/vp8dsp-init.c2
-rw-r--r--libavcodec/x86/vp8dsp.asm21
9 files changed, 721 insertions, 748 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 5cf8d54f51..0e211a6b36 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -729,33 +729,6 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
}
}
-static av_always_inline void mc_part(H264Context *h, int n, int square,
- int height, int delta,
- uint8_t *dest_y, uint8_t *dest_cb,
- uint8_t *dest_cr,
- int x_offset, int y_offset,
- qpel_mc_func *qpix_put,
- h264_chroma_mc_func chroma_put,
- qpel_mc_func *qpix_avg,
- h264_chroma_mc_func chroma_avg,
- h264_weight_func *weight_op,
- h264_biweight_func *weight_avg,
- int list0, int list1,
- int pixel_shift, int chroma_idc)
-{
- if ((h->use_weight == 2 && list0 && list1 &&
- (h->implicit_weight[h->ref_cache[0][scan8[n]]][h->ref_cache[1][scan8[n]]][h->s.mb_y & 1] != 32)) ||
- h->use_weight == 1)
- mc_part_weighted(h, n, square, height, delta, dest_y, dest_cb, dest_cr,
- x_offset, y_offset, qpix_put, chroma_put,
- weight_op[0], weight_op[1], weight_avg[0],
- weight_avg[1], list0, list1, pixel_shift, chroma_idc);
- else
- mc_part_std(h, n, square, height, delta, dest_y, dest_cb, dest_cr,
- x_offset, y_offset, qpix_put, chroma_put, qpix_avg,
- chroma_avg, list0, list1, pixel_shift, chroma_idc);
-}
-
static av_always_inline void prefetch_motion(H264Context *h, int list,
int pixel_shift, int chroma_idc)
{
@@ -781,146 +754,6 @@ static av_always_inline void prefetch_motion(H264Context *h, int list,
}
}
-static av_always_inline void hl_motion(H264Context *h, uint8_t *dest_y,
- uint8_t *dest_cb, uint8_t *dest_cr,
- qpel_mc_func(*qpix_put)[16],
- h264_chroma_mc_func(*chroma_put),
- qpel_mc_func(*qpix_avg)[16],
- h264_chroma_mc_func(*chroma_avg),
- h264_weight_func *weight_op,
- h264_biweight_func *weight_avg,
- int pixel_shift, int chroma_idc)
-{
- MpegEncContext *const s = &h->s;
- const int mb_xy = h->mb_xy;
- const int mb_type = s->current_picture.f.mb_type[mb_xy];
-
- assert(IS_INTER(mb_type));
-
- if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- await_references(h);
- prefetch_motion(h, 0, pixel_shift, chroma_idc);
-
- if (IS_16X16(mb_type)) {
- mc_part(h, 0, 1, 16, 0, dest_y, dest_cb, dest_cr, 0, 0,
- qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
- weight_op, weight_avg,
- IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1),
- pixel_shift, chroma_idc);
- } else if (IS_16X8(mb_type)) {
- mc_part(h, 0, 0, 8, 8 << pixel_shift, dest_y, dest_cb, dest_cr, 0, 0,
- qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
- weight_op, weight_avg,
- IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1),
- pixel_shift, chroma_idc);
- mc_part(h, 8, 0, 8, 8 << pixel_shift, dest_y, dest_cb, dest_cr, 0, 4,
- qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
- weight_op, weight_avg,
- IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1),
- pixel_shift, chroma_idc);
- } else if (IS_8X16(mb_type)) {
- mc_part(h, 0, 0, 16, 8 * h->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
- qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
- &weight_op[1], &weight_avg[1],
- IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1),
- pixel_shift, chroma_idc);
- mc_part(h, 4, 0, 16, 8 * h->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
- qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
- &weight_op[1], &weight_avg[1],
- IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1),
- pixel_shift, chroma_idc);
- } else {
- int i;
-
- assert(IS_8X8(mb_type));
-
- for (i = 0; i < 4; i++) {
- const int sub_mb_type = h->sub_mb_type[i];
- const int n = 4 * i;
- int x_offset = (i & 1) << 2;
- int y_offset = (i & 2) << 1;
-
- if (IS_SUB_8X8(sub_mb_type)) {
- mc_part(h, n, 1, 8, 0, dest_y, dest_cb, dest_cr,
- x_offset, y_offset,
- qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
- &weight_op[1], &weight_avg[1],
- IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1),
- pixel_shift, chroma_idc);
- } else if (IS_SUB_8X4(sub_mb_type)) {
- mc_part(h, n, 0, 4, 4 << pixel_shift, dest_y, dest_cb, dest_cr,
- x_offset, y_offset,
- qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
- &weight_op[1], &weight_avg[1],
- IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1),
- pixel_shift, chroma_idc);
- mc_part(h, n + 2, 0, 4, 4 << pixel_shift,
- dest_y, dest_cb, dest_cr, x_offset, y_offset + 2,
- qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
- &weight_op[1], &weight_avg[1],
- IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1),
- pixel_shift, chroma_idc);
- } else if (IS_SUB_4X8(sub_mb_type)) {
- mc_part(h, n, 0, 8, 4 * h->mb_linesize,
- dest_y, dest_cb, dest_cr, x_offset, y_offset,
- qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
- &weight_op[2], &weight_avg[2],
- IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1),
- pixel_shift, chroma_idc);
- mc_part(h, n + 1, 0, 8, 4 * h->mb_linesize,
- dest_y, dest_cb, dest_cr, x_offset + 2, y_offset,
- qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
- &weight_op[2], &weight_avg[2],
- IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1),
- pixel_shift, chroma_idc);
- } else {
- int j;
- assert(IS_SUB_4X4(sub_mb_type));
- for (j = 0; j < 4; j++) {
- int sub_x_offset = x_offset + 2 * (j & 1);
- int sub_y_offset = y_offset + (j & 2);
- mc_part(h, n + j, 1, 4, 0,
- dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
- qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
- &weight_op[2], &weight_avg[2],
- IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1),
- pixel_shift, chroma_idc);
- }
- }
- }
- }
-
- prefetch_motion(h, 1, pixel_shift, chroma_idc);
-}
-
-static av_always_inline void hl_motion_420(H264Context *h, uint8_t *dest_y,
- uint8_t *dest_cb, uint8_t *dest_cr,
- qpel_mc_func(*qpix_put)[16],
- h264_chroma_mc_func(*chroma_put),
- qpel_mc_func(*qpix_avg)[16],
- h264_chroma_mc_func(*chroma_avg),
- h264_weight_func *weight_op,
- h264_biweight_func *weight_avg,
- int pixel_shift)
-{
- hl_motion(h, dest_y, dest_cb, dest_cr, qpix_put, chroma_put,
- qpix_avg, chroma_avg, weight_op, weight_avg, pixel_shift, 1);
-}
-
-static av_always_inline void hl_motion_422(H264Context *h, uint8_t *dest_y,
- uint8_t *dest_cb, uint8_t *dest_cr,
- qpel_mc_func(*qpix_put)[16],
- h264_chroma_mc_func(*chroma_put),
- qpel_mc_func(*qpix_avg)[16],
- h264_chroma_mc_func(*chroma_avg),
- h264_weight_func *weight_op,
- h264_biweight_func *weight_avg,
- int pixel_shift)
-{
- hl_motion(h, dest_y, dest_cb, dest_cr, qpix_put, chroma_put,
- qpix_avg, chroma_avg, weight_op, weight_avg, pixel_shift, 2);
-}
-
static void free_tables(H264Context *h, int free_rbsp)
{
int i;
@@ -2070,372 +1903,17 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type,
}
}
-static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple,
- int pixel_shift)
-{
- MpegEncContext *const s = &h->s;
- const int mb_x = s->mb_x;
- const int mb_y = s->mb_y;
- const int mb_xy = h->mb_xy;
- const int mb_type = s->current_picture.f.mb_type[mb_xy];
- uint8_t *dest_y, *dest_cb, *dest_cr;
- int linesize, uvlinesize /*dct_offset*/;
- int i, j;
- int *block_offset = &h->block_offset[0];
- const int transform_bypass = !simple && (s->qscale == 0 && h->sps.transform_bypass);
- /* is_h264 should always be true if SVQ3 is disabled. */
- const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264;
- void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
- const int block_h = 16 >> s->chroma_y_shift;
- const int chroma422 = CHROMA422;
-
- dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
- dest_cb = s->current_picture.f.data[1] + (mb_x << pixel_shift) * 8 + mb_y * s->uvlinesize * block_h;
- dest_cr = s->current_picture.f.data[2] + (mb_x << pixel_shift) * 8 + mb_y * s->uvlinesize * block_h;
-
- s->dsp.prefetch(dest_y + (s->mb_x & 3) * 4 * s->linesize + (64 << pixel_shift), s->linesize, 4);
- s->dsp.prefetch(dest_cb + (s->mb_x & 7) * s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2);
-
- h->list_counts[mb_xy] = h->list_count;
-
- if (!simple && MB_FIELD) {
- linesize = h->mb_linesize = s->linesize * 2;
- uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
- block_offset = &h->block_offset[48];
- if (mb_y & 1) { // FIXME move out of this function?
- dest_y -= s->linesize * 15;
- dest_cb -= s->uvlinesize * (block_h - 1);
- dest_cr -= s->uvlinesize * (block_h - 1);
- }
- if (FRAME_MBAFF) {
- int list;
- for (list = 0; list < h->list_count; list++) {
- if (!USES_LIST(mb_type, list))
- continue;
- if (IS_16X16(mb_type)) {
- int8_t *ref = &h->ref_cache[list][scan8[0]];
- fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1);
- } else {
- for (i = 0; i < 16; i += 4) {
- int ref = h->ref_cache[list][scan8[i]];
- if (ref >= 0)
- fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2,
- 8, (16 + ref) ^ (s->mb_y & 1), 1);
- }
- }
- }
- }
- } else {
- linesize = h->mb_linesize = s->linesize;
- uvlinesize = h->mb_uvlinesize = s->uvlinesize;
- // dct_offset = s->linesize * 16;
- }
-
- if (!simple && IS_INTRA_PCM(mb_type)) {
- const int bit_depth = h->sps.bit_depth_luma;
- if (pixel_shift) {
- int j;
- GetBitContext gb;
- init_get_bits(&gb, (uint8_t *)h->mb,
- ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth);
-
- for (i = 0; i < 16; i++) {
- uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize);
- for (j = 0; j < 16; j++)
- tmp_y[j] = get_bits(&gb, bit_depth);
- }
- if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
- if (!h->sps.chroma_format_idc) {
- for (i = 0; i < block_h; i++) {
- uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
- uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
- for (j = 0; j < 8; j++) {
- tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1);
- }
- }
- } else {
- for (i = 0; i < block_h; i++) {
- uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
- for (j = 0; j < 8; j++)
- tmp_cb[j] = get_bits(&gb, bit_depth);
- }
- for (i = 0; i < block_h; i++) {
- uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
- for (j = 0; j < 8; j++)
- tmp_cr[j] = get_bits(&gb, bit_depth);
- }
- }
- }
- } else {
- for (i = 0; i < 16; i++)
- memcpy(dest_y + i * linesize, (uint8_t *)h->mb + i * 16, 16);
- if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
- if (!h->sps.chroma_format_idc) {
- for (i = 0; i < 8; i++) {
- memset(dest_cb + i*uvlinesize, 1 << (bit_depth - 1), 8);
- memset(dest_cr + i*uvlinesize, 1 << (bit_depth - 1), 8);
- }
- } else {
- uint8_t *src_cb = (uint8_t *)h->mb + 256;
- uint8_t *src_cr = (uint8_t *)h->mb + 256 + block_h * 8;
- for (i = 0; i < block_h; i++) {
- memcpy(dest_cb + i * uvlinesize, src_cb + i * 8, 8);
- memcpy(dest_cr + i * uvlinesize, src_cr + i * 8, 8);
- }
- }
- }
- }
- } else {
- if (IS_INTRA(mb_type)) {
- if (h->deblocking_filter)
- xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
- uvlinesize, 1, 0, simple, pixel_shift);
-
- if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
- if (CHROMA) {
- h->hpc.pred8x8[h->chroma_pred_mode](dest_cb, uvlinesize);
- h->hpc.pred8x8[h->chroma_pred_mode](dest_cr, uvlinesize);
- }
- }
-
- hl_decode_mb_predict_luma(h, mb_type, is_h264, simple,
- transform_bypass, pixel_shift,
- block_offset, linesize, dest_y, 0);
-
- if (h->deblocking_filter)
- xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
- uvlinesize, 0, 0, simple, pixel_shift);
- } else if (is_h264) {
- if (chroma422) {
- hl_motion_422(h, dest_y, dest_cb, dest_cr,
- s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
- s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
- h->h264dsp.weight_h264_pixels_tab,
- h->h264dsp.biweight_h264_pixels_tab,
- pixel_shift);
- } else {
- hl_motion_420(h, dest_y, dest_cb, dest_cr,
- s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
- s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
- h->h264dsp.weight_h264_pixels_tab,
- h->h264dsp.biweight_h264_pixels_tab,
- pixel_shift);
- }
- }
-
- hl_decode_mb_idct_luma(h, mb_type, is_h264, simple, transform_bypass,
- pixel_shift, block_offset, linesize, dest_y, 0);
-
- if ((simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) &&
- (h->cbp & 0x30)) {
- uint8_t *dest[2] = { dest_cb, dest_cr };
- if (transform_bypass) {
- if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 &&
- (h->chroma_pred_mode == VERT_PRED8x8 ||
- h->chroma_pred_mode == HOR_PRED8x8)) {
- h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0],
- block_offset + 16,
- h->mb + (16 * 16 * 1 << pixel_shift),
- uvlinesize);
- h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1],
- block_offset + 32,
- h->mb + (16 * 16 * 2 << pixel_shift),
- uvlinesize);
- } else {
- idct_add = s->dsp.add_pixels4;
- for (j = 1; j < 3; j++) {
- for (i = j * 16; i < j * 16 + 4; i++)
- if (h->non_zero_count_cache[scan8[i]] ||
- dctcoef_get(h->mb, pixel_shift, i * 16))
- idct_add(dest[j - 1] + block_offset[i],
- h->mb + (i * 16 << pixel_shift),
- uvlinesize);
- if (chroma422) {
- for (i = j * 16 + 4; i < j * 16 + 8; i++)
- if (h->non_zero_count_cache[scan8[i + 4]] ||
- dctcoef_get(h->mb, pixel_shift, i * 16))
- idct_add(dest[j - 1] + block_offset[i + 4],
- h->mb + (i * 16 << pixel_shift),
- uvlinesize);
- }
- }
- }
- } else {
- if (is_h264) {
- int qp[2];
- if (chroma422) {
- qp[0] = h->chroma_qp[0] + 3;
- qp[1] = h->chroma_qp[1] + 3;
- } else {
- qp[0] = h->chroma_qp[0];
- qp[1] = h->chroma_qp[1];
- }
- if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]])
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 1 << pixel_shift),
- h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
- if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]])
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 2 << pixel_shift),
- h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
- h->h264dsp.h264_idct_add8(dest, block_offset,
- h->mb, uvlinesize,
- h->non_zero_count_cache);
- } else if (CONFIG_SVQ3_DECODER) {
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 1,
- h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][h->chroma_qp[0]][0]);
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 2,
- h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][h->chroma_qp[1]][0]);
- for (j = 1; j < 3; j++) {
- for (i = j * 16; i < j * 16 + 4; i++)
- if (h->non_zero_count_cache[scan8[i]] || h->mb[i * 16]) {
- uint8_t *const ptr = dest[j - 1] + block_offset[i];
- ff_svq3_add_idct_c(ptr, h->mb + i * 16,
- uvlinesize,
- ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
- }
- }
- }
- }
- }
- }
- if (h->cbp || IS_INTRA(mb_type)) {
- s->dsp.clear_blocks(h->mb);
- s->dsp.clear_blocks(h->mb + (24 * 16 << pixel_shift));
- }
-}
-
-static av_always_inline void hl_decode_mb_444_internal(H264Context *h,
- int simple,
- int pixel_shift)
-{
- MpegEncContext *const s = &h->s;
- const int mb_x = s->mb_x;
- const int mb_y = s->mb_y;
- const int mb_xy = h->mb_xy;
- const int mb_type = s->current_picture.f.mb_type[mb_xy];
- uint8_t *dest[3];
- int linesize;
- int i, j, p;
- int *block_offset = &h->block_offset[0];
- const int transform_bypass = !simple && (s->qscale == 0 && h->sps.transform_bypass);
- const int plane_count = (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) ? 3 : 1;
-
- for (p = 0; p < plane_count; p++) {
- dest[p] = s->current_picture.f.data[p] +
- ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
- s->dsp.prefetch(dest[p] + (s->mb_x & 3) * 4 * s->linesize + (64 << pixel_shift),
- s->linesize, 4);
- }
-
- h->list_counts[mb_xy] = h->list_count;
-
- if (!simple && MB_FIELD) {
- linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize * 2;
- block_offset = &h->block_offset[48];
- if (mb_y & 1) // FIXME move out of this function?
- for (p = 0; p < 3; p++)
- dest[p] -= s->linesize * 15;
- if (FRAME_MBAFF) {
- int list;
- for (list = 0; list < h->list_count; list++) {
- if (!USES_LIST(mb_type, list))
- continue;
- if (IS_16X16(mb_type)) {
- int8_t *ref = &h->ref_cache[list][scan8[0]];
- fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1);
- } else {
- for (i = 0; i < 16; i += 4) {
- int ref = h->ref_cache[list][scan8[i]];
- if (ref >= 0)
- fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2,
- 8, (16 + ref) ^ (s->mb_y & 1), 1);
- }
- }
- }
- }
- } else {
- linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize;
- }
-
- if (!simple && IS_INTRA_PCM(mb_type)) {
- if (pixel_shift) {
- const int bit_depth = h->sps.bit_depth_luma;
- GetBitContext gb;
- init_get_bits(&gb, (uint8_t *)h->mb, 768 * bit_depth);
-
- for (p = 0; p < plane_count; p++)
- for (i = 0; i < 16; i++) {
- uint16_t *tmp = (uint16_t *)(dest[p] + i * linesize);
- for (j = 0; j < 16; j++)
- tmp[j] = get_bits(&gb, bit_depth);
- }
- } else {
- for (p = 0; p < plane_count; p++)
- for (i = 0; i < 16; i++)
- memcpy(dest[p] + i * linesize,
- (uint8_t *)h->mb + p * 256 + i * 16, 16);
- }
- } else {
- if (IS_INTRA(mb_type)) {
- if (h->deblocking_filter)
- xchg_mb_border(h, dest[0], dest[1], dest[2], linesize,
- linesize, 1, 1, simple, pixel_shift);
-
- for (p = 0; p < plane_count; p++)
- hl_decode_mb_predict_luma(h, mb_type, 1, simple,
- transform_bypass, pixel_shift,
- block_offset, linesize, dest[p], p);
-
- if (h->deblocking_filter)
- xchg_mb_border(h, dest[0], dest[1], dest[2], linesize,
- linesize, 0, 1, simple, pixel_shift);
- } else {
- hl_motion(h, dest[0], dest[1], dest[2],
- s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
- s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
- h->h264dsp.weight_h264_pixels_tab,
- h->h264dsp.biweight_h264_pixels_tab, pixel_shift, 3);
- }
-
- for (p = 0; p < plane_count; p++)
- hl_decode_mb_idct_luma(h, mb_type, 1, simple, transform_bypass,
- pixel_shift, block_offset, linesize,
- dest[p], p);
- }
- if (h->cbp || IS_INTRA(mb_type)) {
- s->dsp.clear_blocks(h->mb);
- s->dsp.clear_blocks(h->mb + (24 * 16 << pixel_shift));
- }
-}
-
-/**
- * Process a macroblock; this case avoids checks for expensive uncommon cases.
- */
-#define hl_decode_mb_simple(sh, bits) \
-static void hl_decode_mb_simple_ ## bits(H264Context *h) \
-{ \
- hl_decode_mb_internal(h, 1, sh); \
-}
+#define BITS 8
+#define SIMPLE 1
+#include "h264_mb_template.c"
-hl_decode_mb_simple(0, 8)
-hl_decode_mb_simple(1, 16)
+#undef BITS
+#define BITS 16
+#include "h264_mb_template.c"
-/**
- * Process a macroblock; this handles edge cases, such as interlacing.
- */
-static av_noinline void hl_decode_mb_complex(H264Context *h)
-{
- hl_decode_mb_internal(h, 0, h->pixel_shift);
-}
-
-static av_noinline void hl_decode_mb_444_complex(H264Context *h)
-{
- hl_decode_mb_444_internal(h, 0, h->pixel_shift);
-}
-
-static av_noinline void hl_decode_mb_444_simple(H264Context *h)
-{
- hl_decode_mb_444_internal(h, 1, 0);
-}
+#undef SIMPLE
+#define SIMPLE 0
+#include "h264_mb_template.c"
void ff_h264_hl_decode_mb(H264Context *h)
{
@@ -2448,7 +1926,7 @@ void ff_h264_hl_decode_mb(H264Context *h)
if (is_complex || h->pixel_shift)
hl_decode_mb_444_complex(h);
else
- hl_decode_mb_444_simple(h);
+ hl_decode_mb_444_simple_8(h);
} else if (is_complex) {
hl_decode_mb_complex(h);
} else if (h->pixel_shift) {
diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c
new file mode 100644
index 0000000000..d82562bac3
--- /dev/null
+++ b/libavcodec/h264_mb_template.c
@@ -0,0 +1,379 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef FUNC
+#undef PIXEL_SHIFT
+
+#if SIMPLE
+# define FUNC(n) AV_JOIN(n ## _simple_, BITS)
+# define PIXEL_SHIFT (BITS >> 4)
+#else
+# define FUNC(n) n ## _complex
+# define PIXEL_SHIFT h->pixel_shift
+#endif
+
+#undef CHROMA_IDC
+#define CHROMA_IDC 1
+#include "h264_mc_template.c"
+
+#undef CHROMA_IDC
+#define CHROMA_IDC 2
+#include "h264_mc_template.c"
+
+static av_noinline void FUNC(hl_decode_mb)(H264Context *h)
+{
+ MpegEncContext *const s = &h->s;
+ const int mb_x = s->mb_x;
+ const int mb_y = s->mb_y;
+ const int mb_xy = h->mb_xy;
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ int linesize, uvlinesize /*dct_offset*/;
+ int i, j;
+ int *block_offset = &h->block_offset[0];
+ const int transform_bypass = !SIMPLE && (s->qscale == 0 && h->sps.transform_bypass);
+ /* is_h264 should always be true if SVQ3 is disabled. */
+ const int is_h264 = !CONFIG_SVQ3_DECODER || SIMPLE || s->codec_id == CODEC_ID_H264;
+ void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
+ const int block_h = 16 >> s->chroma_y_shift;
+ const int chroma422 = CHROMA422;
+
+ dest_y = s->current_picture.f.data[0] + ((mb_x << PIXEL_SHIFT) + mb_y * s->linesize) * 16;
+ dest_cb = s->current_picture.f.data[1] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * s->uvlinesize * block_h;
+ dest_cr = s->current_picture.f.data[2] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * s->uvlinesize * block_h;
+
+ s->dsp.prefetch(dest_y + (s->mb_x & 3) * 4 * s->linesize + (64 << PIXEL_SHIFT), s->linesize, 4);
+ s->dsp.prefetch(dest_cb + (s->mb_x & 7) * s->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2);
+
+ h->list_counts[mb_xy] = h->list_count;
+
+ if (!SIMPLE && MB_FIELD) {
+ linesize = h->mb_linesize = s->linesize * 2;
+ uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
+ block_offset = &h->block_offset[48];
+ if (mb_y & 1) { // FIXME move out of this function?
+ dest_y -= s->linesize * 15;
+ dest_cb -= s->uvlinesize * (block_h - 1);
+ dest_cr -= s->uvlinesize * (block_h - 1);
+ }
+ if (FRAME_MBAFF) {
+ int list;
+ for (list = 0; list < h->list_count; list++) {
+ if (!USES_LIST(mb_type, list))
+ continue;
+ if (IS_16X16(mb_type)) {
+ int8_t *ref = &h->ref_cache[list][scan8[0]];
+ fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1);
+ } else {
+ for (i = 0; i < 16; i += 4) {
+ int ref = h->ref_cache[list][scan8[i]];
+ if (ref >= 0)
+ fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2,
+ 8, (16 + ref) ^ (s->mb_y & 1), 1);
+ }
+ }
+ }
+ }
+ } else {
+ linesize = h->mb_linesize = s->linesize;
+ uvlinesize = h->mb_uvlinesize = s->uvlinesize;
+ // dct_offset = s->linesize * 16;
+ }
+
+ if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
+ const int bit_depth = h->sps.bit_depth_luma;
+ if (PIXEL_SHIFT) {
+ int j;
+ GetBitContext gb;
+ init_get_bits(&gb, (uint8_t *)h->mb,
+ ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth);
+
+ for (i = 0; i < 16; i++) {
+ uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize);
+ for (j = 0; j < 16; j++)
+ tmp_y[j] = get_bits(&gb, bit_depth);
+ }
+ if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
+ if (!h->sps.chroma_format_idc) {
+ for (i = 0; i < block_h; i++) {
+ uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
+ uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
+ for (j = 0; j < 8; j++) {
+ tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1);
+ }
+ }
+ } else {
+ for (i = 0; i < block_h; i++) {
+ uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
+ for (j = 0; j < 8; j++)
+ tmp_cb[j] = get_bits(&gb, bit_depth);
+ }
+ for (i = 0; i < block_h; i++) {
+ uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
+ for (j = 0; j < 8; j++)
+ tmp_cr[j] = get_bits(&gb, bit_depth);
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < 16; i++)
+ memcpy(dest_y + i * linesize, (uint8_t *)h->mb + i * 16, 16);
+ if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
+ if (!h->sps.chroma_format_idc) {
+ for (i = 0; i < 8; i++) {
+ memset(dest_cb + i*uvlinesize, 1 << (bit_depth - 1), 8);
+ memset(dest_cr + i*uvlinesize, 1 << (bit_depth - 1), 8);
+ }
+ } else {
+ uint8_t *src_cb = (uint8_t *)h->mb + 256;
+ uint8_t *src_cr = (uint8_t *)h->mb + 256 + block_h * 8;
+ for (i = 0; i < block_h; i++) {
+ memcpy(dest_cb + i * uvlinesize, src_cb + i * 8, 8);
+ memcpy(dest_cr + i * uvlinesize, src_cr + i * 8, 8);
+ }
+ }
+ }
+ }
+ } else {
+ if (IS_INTRA(mb_type)) {
+ if (h->deblocking_filter)
+ xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
+ uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT);
+
+ if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
+ if (CHROMA) {
+ h->hpc.pred8x8[h->chroma_pred_mode](dest_cb, uvlinesize);
+ h->hpc.pred8x8[h->chroma_pred_mode](dest_cr, uvlinesize);
+ }
+ }
+
+ hl_decode_mb_predict_luma(h, mb_type, is_h264, SIMPLE,
+ transform_bypass, PIXEL_SHIFT,
+ block_offset, linesize, dest_y, 0);
+
+ if (h->deblocking_filter)
+ xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
+ uvlinesize, 0, 0, SIMPLE, PIXEL_SHIFT);
+ } else if (is_h264) {
+ if (chroma422) {
+ FUNC(hl_motion_422)(h, dest_y, dest_cb, dest_cr,
+ s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
+ s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
+ h->h264dsp.weight_h264_pixels_tab,
+ h->h264dsp.biweight_h264_pixels_tab);
+ } else {
+ FUNC(hl_motion_420)(h, dest_y, dest_cb, dest_cr,
+ s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
+ s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
+ h->h264dsp.weight_h264_pixels_tab,
+ h->h264dsp.biweight_h264_pixels_tab);
+ }
+ }
+
+ hl_decode_mb_idct_luma(h, mb_type, is_h264, SIMPLE, transform_bypass,
+ PIXEL_SHIFT, block_offset, linesize, dest_y, 0);
+
+ if ((SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) &&
+ (h->cbp & 0x30)) {
+ uint8_t *dest[2] = { dest_cb, dest_cr };
+ if (transform_bypass) {
+ if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 &&
+ (h->chroma_pred_mode == VERT_PRED8x8 ||
+ h->chroma_pred_mode == HOR_PRED8x8)) {
+ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0],
+ block_offset + 16,
+ h->mb + (16 * 16 * 1 << PIXEL_SHIFT),
+ uvlinesize);
+ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1],
+ block_offset + 32,
+ h->mb + (16 * 16 * 2 << PIXEL_SHIFT),
+ uvlinesize);
+ } else {
+ idct_add = s->dsp.add_pixels4;
+ for (j = 1; j < 3; j++) {
+ for (i = j * 16; i < j * 16 + 4; i++)
+ if (h->non_zero_count_cache[scan8[i]] ||
+ dctcoef_get(h->mb, PIXEL_SHIFT, i * 16))
+ idct_add(dest[j - 1] + block_offset[i],
+ h->mb + (i * 16 << PIXEL_SHIFT),
+ uvlinesize);
+ if (chroma422) {
+ for (i = j * 16 + 4; i < j * 16 + 8; i++)
+ if (h->non_zero_count_cache[scan8[i + 4]] ||
+ dctcoef_get(h->mb, PIXEL_SHIFT, i * 16))
+ idct_add(dest[j - 1] + block_offset[i + 4],
+ h->mb + (i * 16 << PIXEL_SHIFT),
+ uvlinesize);
+ }
+ }
+ }
+ } else {
+ if (is_h264) {
+ int qp[2];
+ if (chroma422) {
+ qp[0] = h->chroma_qp[0] + 3;
+ qp[1] = h->chroma_qp[1] + 3;
+ } else {
+ qp[0] = h->chroma_qp[0];
+ qp[1] = h->chroma_qp[1];
+ }
+ if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]])
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 1 << PIXEL_SHIFT),
+ h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
+ if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]])
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 2 << PIXEL_SHIFT),
+ h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
+ h->h264dsp.h264_idct_add8(dest, block_offset,
+ h->mb, uvlinesize,
+ h->non_zero_count_cache);
+ } else if (CONFIG_SVQ3_DECODER) {
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 1,
+ h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][h->chroma_qp[0]][0]);
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 2,
+ h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][h->chroma_qp[1]][0]);
+ for (j = 1; j < 3; j++) {
+ for (i = j * 16; i < j * 16 + 4; i++)
+ if (h->non_zero_count_cache[scan8[i]] || h->mb[i * 16]) {
+ uint8_t *const ptr = dest[j - 1] + block_offset[i];
+ ff_svq3_add_idct_c(ptr, h->mb + i * 16,
+ uvlinesize,
+ ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
+ }
+ }
+ }
+ }
+ }
+ }
+ if (h->cbp || IS_INTRA(mb_type)) {
+ s->dsp.clear_blocks(h->mb);
+ s->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT));
+ }
+}
+
+#if !SIMPLE || BITS == 8
+
+#undef CHROMA_IDC
+#define CHROMA_IDC 3
+#include "h264_mc_template.c"
+
+static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h)
+{
+ MpegEncContext *const s = &h->s;
+ const int mb_x = s->mb_x;
+ const int mb_y = s->mb_y;
+ const int mb_xy = h->mb_xy;
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
+ uint8_t *dest[3];
+ int linesize;
+ int i, j, p;
+ int *block_offset = &h->block_offset[0];
+ const int transform_bypass = !SIMPLE && (s->qscale == 0 && h->sps.transform_bypass);
+ const int plane_count = (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) ? 3 : 1;
+
+ for (p = 0; p < plane_count; p++) {
+ dest[p] = s->current_picture.f.data[p] +
+ ((mb_x << PIXEL_SHIFT) + mb_y * s->linesize) * 16;
+ s->dsp.prefetch(dest[p] + (s->mb_x & 3) * 4 * s->linesize + (64 << PIXEL_SHIFT),
+ s->linesize, 4);
+ }
+
+ h->list_counts[mb_xy] = h->list_count;
+
+ if (!SIMPLE && MB_FIELD) {
+ linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize * 2;
+ block_offset = &h->block_offset[48];
+ if (mb_y & 1) // FIXME move out of this function?
+ for (p = 0; p < 3; p++)
+ dest[p] -= s->linesize * 15;
+ if (FRAME_MBAFF) {
+ int list;
+ for (list = 0; list < h->list_count; list++) {
+ if (!USES_LIST(mb_type, list))
+ continue;
+ if (IS_16X16(mb_type)) {
+ int8_t *ref = &h->ref_cache[list][scan8[0]];
+ fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1);
+ } else {
+ for (i = 0; i < 16; i += 4) {
+ int ref = h->ref_cache[list][scan8[i]];
+ if (ref >= 0)
+ fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2,
+ 8, (16 + ref) ^ (s->mb_y & 1), 1);
+ }
+ }
+ }
+ }
+ } else {
+ linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize;
+ }
+
+ if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
+ if (PIXEL_SHIFT) {
+ const int bit_depth = h->sps.bit_depth_luma;
+ GetBitContext gb;
+ init_get_bits(&gb, (uint8_t *)h->mb, 768 * bit_depth);
+
+ for (p = 0; p < plane_count; p++)
+ for (i = 0; i < 16; i++) {
+ uint16_t *tmp = (uint16_t *)(dest[p] + i * linesize);
+ for (j = 0; j < 16; j++)
+ tmp[j] = get_bits(&gb, bit_depth);
+ }
+ } else {
+ for (p = 0; p < plane_count; p++)
+ for (i = 0; i < 16; i++)
+ memcpy(dest[p] + i * linesize,
+ (uint8_t *)h->mb + p * 256 + i * 16, 16);
+ }
+ } else {
+ if (IS_INTRA(mb_type)) {
+ if (h->deblocking_filter)
+ xchg_mb_border(h, dest[0], dest[1], dest[2], linesize,
+ linesize, 1, 1, SIMPLE, PIXEL_SHIFT);
+
+ for (p = 0; p < plane_count; p++)
+ hl_decode_mb_predict_luma(h, mb_type, 1, SIMPLE,
+ transform_bypass, PIXEL_SHIFT,
+ block_offset, linesize, dest[p], p);
+
+ if (h->deblocking_filter)
+ xchg_mb_border(h, dest[0], dest[1], dest[2], linesize,
+ linesize, 0, 1, SIMPLE, PIXEL_SHIFT);
+ } else {
+ FUNC(hl_motion_444)(h, dest[0], dest[1], dest[2],
+ s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
+ s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
+ h->h264dsp.weight_h264_pixels_tab,
+ h->h264dsp.biweight_h264_pixels_tab);
+ }
+
+ for (p = 0; p < plane_count; p++)
+ hl_decode_mb_idct_luma(h, mb_type, 1, SIMPLE, transform_bypass,
+ PIXEL_SHIFT, block_offset, linesize,
+ dest[p], p);
+ }
+ if (h->cbp || IS_INTRA(mb_type)) {
+ s->dsp.clear_blocks(h->mb);
+ s->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT));
+ }
+}
+
+#endif
diff --git a/libavcodec/h264_mc_template.c b/libavcodec/h264_mc_template.c
new file mode 100644
index 0000000000..43a3ecfd72
--- /dev/null
+++ b/libavcodec/h264_mc_template.c
@@ -0,0 +1,160 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef MCFUNC
+
+#if CHROMA_IDC == 1
+# define MCFUNC(n) FUNC(n ## _420)
+#elif CHROMA_IDC == 2
+# define MCFUNC(n) FUNC(n ## _422)
+#elif CHROMA_IDC == 3
+# define MCFUNC(n) FUNC(n ## _444)
+#endif
+
+#undef mc_part
+#define mc_part MCFUNC(mc_part)
+
+static void mc_part(H264Context *h, int n, int square,
+ int height, int delta,
+ uint8_t *dest_y, uint8_t *dest_cb,
+ uint8_t *dest_cr,
+ int x_offset, int y_offset,
+ qpel_mc_func *qpix_put,
+ h264_chroma_mc_func chroma_put,
+ qpel_mc_func *qpix_avg,
+ h264_chroma_mc_func chroma_avg,
+ h264_weight_func *weight_op,
+ h264_biweight_func *weight_avg,
+ int list0, int list1)
+{
+ if ((h->use_weight == 2 && list0 && list1 &&
+ (h->implicit_weight[h->ref_cache[0][scan8[n]]][h->ref_cache[1][scan8[n]]][h->s.mb_y & 1] != 32)) ||
+ h->use_weight == 1)
+ mc_part_weighted(h, n, square, height, delta, dest_y, dest_cb, dest_cr,
+ x_offset, y_offset, qpix_put, chroma_put,
+ weight_op[0], weight_op[1], weight_avg[0],
+ weight_avg[1], list0, list1, PIXEL_SHIFT, CHROMA_IDC);
+ else
+ mc_part_std(h, n, square, height, delta, dest_y, dest_cb, dest_cr,
+ x_offset, y_offset, qpix_put, chroma_put, qpix_avg,
+ chroma_avg, list0, list1, PIXEL_SHIFT, CHROMA_IDC);
+}
+
+static void MCFUNC(hl_motion)(H264Context *h, uint8_t *dest_y,
+ uint8_t *dest_cb, uint8_t *dest_cr,
+ qpel_mc_func(*qpix_put)[16],
+ h264_chroma_mc_func(*chroma_put),
+ qpel_mc_func(*qpix_avg)[16],
+ h264_chroma_mc_func(*chroma_avg),
+ h264_weight_func *weight_op,
+ h264_biweight_func *weight_avg)
+{
+ MpegEncContext *const s = &h->s;
+ const int mb_xy = h->mb_xy;
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
+
+ assert(IS_INTER(mb_type));
+
+ if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
+ await_references(h);
+ prefetch_motion(h, 0, PIXEL_SHIFT, CHROMA_IDC);
+
+ if (IS_16X16(mb_type)) {
+ mc_part(h, 0, 1, 16, 0, dest_y, dest_cb, dest_cr, 0, 0,
+ qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
+ weight_op, weight_avg,
+ IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
+ } else if (IS_16X8(mb_type)) {
+ mc_part(h, 0, 0, 8, 8 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr, 0, 0,
+ qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
+ weight_op, weight_avg,
+ IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
+ mc_part(h, 8, 0, 8, 8 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr, 0, 4,
+ qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
+ weight_op, weight_avg,
+ IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
+ } else if (IS_8X16(mb_type)) {
+ mc_part(h, 0, 0, 16, 8 * h->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
+ qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
+ mc_part(h, 4, 0, 16, 8 * h->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
+ qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
+ } else {
+ int i;
+
+ assert(IS_8X8(mb_type));
+
+ for (i = 0; i < 4; i++) {
+ const int sub_mb_type = h->sub_mb_type[i];
+ const int n = 4 * i;
+ int x_offset = (i & 1) << 2;
+ int y_offset = (i & 2) << 1;
+
+ if (IS_SUB_8X8(sub_mb_type)) {
+ mc_part(h, n, 1, 8, 0, dest_y, dest_cb, dest_cr,
+ x_offset, y_offset,
+ qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ } else if (IS_SUB_8X4(sub_mb_type)) {
+ mc_part(h, n, 0, 4, 4 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr,
+ x_offset, y_offset,
+ qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ mc_part(h, n + 2, 0, 4, 4 << PIXEL_SHIFT,
+ dest_y, dest_cb, dest_cr, x_offset, y_offset + 2,
+ qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
+ &weight_op[1], &weight_avg[1],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ } else if (IS_SUB_4X8(sub_mb_type)) {
+ mc_part(h, n, 0, 8, 4 * h->mb_linesize,
+ dest_y, dest_cb, dest_cr, x_offset, y_offset,
+ qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
+ &weight_op[2], &weight_avg[2],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ mc_part(h, n + 1, 0, 8, 4 * h->mb_linesize,
+ dest_y, dest_cb, dest_cr, x_offset + 2, y_offset,
+ qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
+ &weight_op[2], &weight_avg[2],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ } else {
+ int j;
+ assert(IS_SUB_4X4(sub_mb_type));
+ for (j = 0; j < 4; j++) {
+ int sub_x_offset = x_offset + 2 * (j & 1);
+ int sub_y_offset = y_offset + (j & 2);
+ mc_part(h, n + j, 1, 4, 0,
+ dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
+ qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
+ &weight_op[2], &weight_avg[2],
+ IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
+ }
+ }
+ }
+ }
+
+ prefetch_motion(h, 1, PIXEL_SHIFT, CHROMA_IDC);
+}
+
diff --git a/libavcodec/mpegvideo_common.h b/libavcodec/mpegvideo_common.h
index 131a805223..1802a9b0f5 100644
--- a/libavcodec/mpegvideo_common.h
+++ b/libavcodec/mpegvideo_common.h
@@ -720,7 +720,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
0, 0, 0,
ref_picture, pix_op, qpix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 16);
- }else if(!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && s->mspel && s->codec_id == CODEC_ID_WMV2){
+ } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
+ s->mspel && s->codec_id == CODEC_ID_WMV2) {
ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 16);
diff --git a/libavcodec/qdm2.c b/libavcodec/qdm2.c
index aa180fdbd1..c739a43c4c 100644
--- a/libavcodec/qdm2.c
+++ b/libavcodec/qdm2.c
@@ -1696,51 +1696,6 @@ static av_cold void qdm2_init(QDM2Context *q) {
}
-#if 0
-static void dump_context(QDM2Context *q)
-{
- int i;
-#define PRINT(a,b) av_log(NULL,AV_LOG_DEBUG," %s = %d\n", a, b);
- PRINT("compressed_data",q->compressed_data);
- PRINT("compressed_size",q->compressed_size);
- PRINT("frame_size",q->frame_size);
- PRINT("checksum_size",q->checksum_size);
- PRINT("channels",q->channels);
- PRINT("nb_channels",q->nb_channels);
- PRINT("fft_size",q->fft_size);
- PRINT("sub_sampling",q->sub_sampling);
- PRINT("fft_order",q->fft_order);
- PRINT("group_order",q->group_order);
- PRINT("group_size",q->group_size);
- PRINT("sub_packet",q->sub_packet);
- PRINT("frequency_range",q->frequency_range);
- PRINT("has_errors",q->has_errors);
- PRINT("fft_tone_end",q->fft_tone_end);
- PRINT("fft_tone_start",q->fft_tone_start);
- PRINT("fft_coefs_index",q->fft_coefs_index);
- PRINT("coeff_per_sb_select",q->coeff_per_sb_select);
- PRINT("cm_table_select",q->cm_table_select);
- PRINT("noise_idx",q->noise_idx);
-
- for (i = q->fft_tone_start; i < q->fft_tone_end; i++)
- {
- FFTTone *t = &q->fft_tones[i];
-
- av_log(NULL,AV_LOG_DEBUG,"Tone (%d) dump:\n", i);
- av_log(NULL,AV_LOG_DEBUG," level = %f\n", t->level);
-// PRINT(" level", t->level);
- PRINT(" phase", t->phase);
- PRINT(" phase_shift", t->phase_shift);
- PRINT(" duration", t->duration);
- PRINT(" samples_im", t->samples_im);
- PRINT(" samples_re", t->samples_re);
- PRINT(" table", t->table);
- }
-
-}
-#endif
-
-
/**
* Init parameters from codec extradata
*/
@@ -1922,7 +1877,6 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
-// dump_context(s);
return 0;
}
@@ -1949,8 +1903,6 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out)
q->compressed_data = in;
q->compressed_size = q->checksum_size;
-// dump_context(q);
-
/* copy old block, clear new block of output samples */
memmove(q->output_buffer, &q->output_buffer[frame_size], frame_size * sizeof(float));
memset(&q->output_buffer[frame_size], 0, frame_size * sizeof(float));
diff --git a/libavcodec/x86/h264_intrapred.asm b/libavcodec/x86/h264_intrapred.asm
index b5d45ddf48..3fd5ee87c7 100644
--- a/libavcodec/x86/h264_intrapred.asm
+++ b/libavcodec/x86/h264_intrapred.asm
@@ -87,23 +87,23 @@ cglobal pred16x16_vertical_sse, 2,3
; void pred16x16_horizontal(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_H 1
-cglobal pred16x16_horizontal_%1, 2,3
+%macro PRED16x16_H 0
+cglobal pred16x16_horizontal, 2,3
mov r2, 8
-%ifidn %1, ssse3
+%if cpuflag(ssse3)
mova m2, [pb_3]
%endif
.loop:
movd m0, [r0+r1*0-4]
movd m1, [r0+r1*1-4]
-%ifidn %1, ssse3
+%if cpuflag(ssse3)
pshufb m0, m2
pshufb m1, m2
%else
punpcklbw m0, m0
punpcklbw m1, m1
-%ifidn %1, mmxext
+%if cpuflag(mmx2)
pshufw m0, m0, 0xff
pshufw m1, m1, 0xff
%else
@@ -124,18 +124,20 @@ cglobal pred16x16_horizontal_%1, 2,3
REP_RET
%endmacro
-INIT_MMX
-PRED16x16_H mmx
-PRED16x16_H mmxext
+INIT_MMX mmx
+PRED16x16_H
+INIT_MMX mmx2
+PRED16x16_H
+INIT_XMM ssse3
+PRED16x16_H
INIT_XMM
-PRED16x16_H ssse3
;-----------------------------------------------------------------------------
; void pred16x16_dc(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_DC 1
-cglobal pred16x16_dc_%1, 2,7
+%macro PRED16x16_DC 0
+cglobal pred16x16_dc, 2,7
mov r4, r0
sub r0, r1
pxor mm0, mm0
@@ -158,19 +160,19 @@ cglobal pred16x16_dc_%1, 2,7
add r5d, r6d
lea r2d, [r2+r5+16]
shr r2d, 5
-%ifidn %1, mmxext
+%if cpuflag(ssse3)
+ pxor m1, m1
movd m0, r2d
- punpcklbw m0, m0
- pshufw m0, m0, 0
-%elifidn %1, sse2
+ pshufb m0, m1
+%elif cpuflag(sse2)
movd m0, r2d
punpcklbw m0, m0
pshuflw m0, m0, 0
punpcklqdq m0, m0
-%elifidn %1, ssse3
- pxor m1, m1
+%elif cpuflag(mmx2)
movd m0, r2d
- pshufb m0, m1
+ punpcklbw m0, m0
+ pshufw m0, m0, 0
%endif
%if mmsize==8
@@ -195,18 +197,20 @@ cglobal pred16x16_dc_%1, 2,7
REP_RET
%endmacro
-INIT_MMX
-PRED16x16_DC mmxext
+INIT_MMX mmx2
+PRED16x16_DC
+INIT_XMM sse2
+PRED16x16_DC
+INIT_XMM ssse3
+PRED16x16_DC
INIT_XMM
-PRED16x16_DC sse2
-PRED16x16_DC ssse3
;-----------------------------------------------------------------------------
; void pred16x16_tm_vp8(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_TM_MMX 1
-cglobal pred16x16_tm_vp8_%1, 2,5
+%macro PRED16x16_TM_MMX 0
+cglobal pred16x16_tm_vp8, 2,5
sub r0, r1
pxor mm7, mm7
movq mm0, [r0+0]
@@ -223,11 +227,11 @@ cglobal pred16x16_tm_vp8_%1, 2,5
movzx r2d, byte [r0+r1-1]
sub r2d, r3d
movd mm4, r2d
-%ifidn %1, mmx
+%if cpuflag(mmx2)
+ pshufw mm4, mm4, 0
+%else
punpcklwd mm4, mm4
punpckldq mm4, mm4
-%else
- pshufw mm4, mm4, 0
%endif
movq mm5, mm4
movq mm6, mm4
@@ -246,8 +250,11 @@ cglobal pred16x16_tm_vp8_%1, 2,5
REP_RET
%endmacro
-PRED16x16_TM_MMX mmx
-PRED16x16_TM_MMX mmxext
+INIT_MMX mmx
+PRED16x16_TM_MMX
+INIT_MMX mmx2
+PRED16x16_TM_MMX
+INIT_MMX
cglobal pred16x16_tm_vp8_sse2, 2,6,6
sub r0, r1
@@ -288,8 +295,8 @@ cglobal pred16x16_tm_vp8_sse2, 2,6,6
; void pred16x16_plane(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro H264_PRED16x16_PLANE 3
-cglobal pred16x16_plane_%3_%1, 2, 9, %2
+%macro H264_PRED16x16_PLANE 1
+cglobal pred16x16_plane_%1, 2,9,7
mov r2, r1 ; +stride
neg r1 ; -stride
@@ -310,7 +317,10 @@ cglobal pred16x16_plane_%3_%1, 2, 9, %2
paddw m0, m2
paddw m1, m3
%else ; mmsize == 16
-%ifidn %1, sse2
+%if cpuflag(ssse3)
+ movhps m0, [r0+r1 +8]
+ pmaddubsw m0, [plane_shuf] ; H coefficients
+%else ; sse2
pxor m2, m2
movh m1, [r0+r1 +8]
punpcklbw m0, m2
@@ -318,29 +328,26 @@ cglobal pred16x16_plane_%3_%1, 2, 9, %2
pmullw m0, [pw_m8tom1]
pmullw m1, [pw_1to8]
paddw m0, m1
-%else ; ssse3
- movhps m0, [r0+r1 +8]
- pmaddubsw m0, [plane_shuf] ; H coefficients
%endif
movhlps m1, m0
%endif
paddw m0, m1
-%ifidn %1, mmx
+%if cpuflag(sse2)
+ pshuflw m1, m0, 0xE
+%elif cpuflag(mmx2)
+ pshufw m1, m0, 0xE
+%elif cpuflag(mmx)
mova m1, m0
psrlq m1, 32
-%elifidn %1, mmx2
- pshufw m1, m0, 0xE
-%else ; mmsize == 16
- pshuflw m1, m0, 0xE
%endif
paddw m0, m1
-%ifidn %1, mmx
+%if cpuflag(sse2)
+ pshuflw m1, m0, 0x1
+%elif cpuflag(mmx2)
+ pshufw m1, m0, 0x1
+%elif cpuflag(mmx)
mova m1, m0
psrlq m1, 16
-%elifidn %1, mmx2
- pshufw m1, m0, 0x1
-%else
- pshuflw m1, m0, 0x1
%endif
paddw m0, m1 ; sum of H coefficients
@@ -424,13 +431,13 @@ cglobal pred16x16_plane_%3_%1, 2, 9, %2
mov r0, r0m
%endif
-%ifidn %3, h264
+%ifidn %1, h264
lea r5, [r5*5+32]
sar r5, 6
-%elifidn %3, rv40
+%elifidn %1, rv40
lea r5, [r5*5]
sar r5, 6
-%elifidn %3, svq3
+%elifidn %1, svq3
test r5, r5
lea r6, [r5+3]
cmovs r5, r6
@@ -449,8 +456,8 @@ cglobal pred16x16_plane_%3_%1, 2, 9, %2
movd r1d, m0
movsx r1d, r1w
-%ifnidn %3, svq3
-%ifidn %3, h264
+%ifnidn %1, svq3
+%ifidn %1, h264
lea r1d, [r1d*5+32]
%else ; rv40
lea r1d, [r1d*5]
@@ -476,26 +483,26 @@ cglobal pred16x16_plane_%3_%1, 2, 9, %2
movd m1, r5d
movd m3, r3d
-%ifidn %1, mmx
- punpcklwd m0, m0
- punpcklwd m1, m1
- punpcklwd m3, m3
- punpckldq m0, m0
- punpckldq m1, m1
- punpckldq m3, m3
-%elifidn %1, mmx2
- pshufw m0, m0, 0x0
- pshufw m1, m1, 0x0
- pshufw m3, m3, 0x0
-%else
+%if cpuflag(sse2)
pshuflw m0, m0, 0x0
pshuflw m1, m1, 0x0
pshuflw m3, m3, 0x0
punpcklqdq m0, m0 ; splat H (words)
punpcklqdq m1, m1 ; splat V (words)
punpcklqdq m3, m3 ; splat a (words)
+%elif cpuflag(mmx2)
+ pshufw m0, m0, 0x0
+ pshufw m1, m1, 0x0
+ pshufw m3, m3, 0x0
+%elif cpuflag(mmx)
+ punpcklwd m0, m0
+ punpcklwd m1, m1
+ punpcklwd m3, m3
+ punpckldq m0, m0
+ punpckldq m1, m1
+ punpckldq m3, m3
%endif
-%ifidn %3, svq3
+%ifidn %1, svq3
SWAP 0, 1
%endif
mova m2, m0
@@ -568,27 +575,30 @@ cglobal pred16x16_plane_%3_%1, 2, 9, %2
REP_RET
%endmacro
-INIT_MMX
-H264_PRED16x16_PLANE mmx, 0, h264
-H264_PRED16x16_PLANE mmx, 0, rv40
-H264_PRED16x16_PLANE mmx, 0, svq3
-H264_PRED16x16_PLANE mmx2, 0, h264
-H264_PRED16x16_PLANE mmx2, 0, rv40
-H264_PRED16x16_PLANE mmx2, 0, svq3
+INIT_MMX mmx
+H264_PRED16x16_PLANE h264
+H264_PRED16x16_PLANE rv40
+H264_PRED16x16_PLANE svq3
+INIT_MMX mmx2
+H264_PRED16x16_PLANE h264
+H264_PRED16x16_PLANE rv40
+H264_PRED16x16_PLANE svq3
+INIT_XMM sse2
+H264_PRED16x16_PLANE h264
+H264_PRED16x16_PLANE rv40
+H264_PRED16x16_PLANE svq3
+INIT_XMM ssse3
+H264_PRED16x16_PLANE h264
+H264_PRED16x16_PLANE rv40
+H264_PRED16x16_PLANE svq3
INIT_XMM
-H264_PRED16x16_PLANE sse2, 8, h264
-H264_PRED16x16_PLANE sse2, 8, rv40
-H264_PRED16x16_PLANE sse2, 8, svq3
-H264_PRED16x16_PLANE ssse3, 8, h264
-H264_PRED16x16_PLANE ssse3, 8, rv40
-H264_PRED16x16_PLANE ssse3, 8, svq3
;-----------------------------------------------------------------------------
; void pred8x8_plane(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro H264_PRED8x8_PLANE 2
-cglobal pred8x8_plane_%1, 2, 9, %2
+%macro H264_PRED8x8_PLANE 0
+cglobal pred8x8_plane, 2,9,7
mov r2, r1 ; +stride
neg r1 ; -stride
@@ -601,39 +611,39 @@ cglobal pred8x8_plane_%1, 2, 9, %2
pmullw m0, [pw_m4to4]
pmullw m1, [pw_m4to4+8]
%else ; mmsize == 16
-%ifidn %1, sse2
+%if cpuflag(ssse3)
+ movhps m0, [r0+r1 +4] ; this reads 4 bytes more than necessary
+ pmaddubsw m0, [plane8_shuf] ; H coefficients
+%else ; sse2
pxor m2, m2
movd m1, [r0+r1 +4]
punpckldq m0, m1
punpcklbw m0, m2
pmullw m0, [pw_m4to4]
-%else ; ssse3
- movhps m0, [r0+r1 +4] ; this reads 4 bytes more than necessary
- pmaddubsw m0, [plane8_shuf] ; H coefficients
%endif
movhlps m1, m0
%endif
paddw m0, m1
-%ifnidn %1, ssse3
-%ifidn %1, mmx
+%if notcpuflag(ssse3)
+%if cpuflag(sse2) ; mmsize == 16
+ pshuflw m1, m0, 0xE
+%elif cpuflag(mmx2)
+ pshufw m1, m0, 0xE
+%elif cpuflag(mmx)
mova m1, m0
psrlq m1, 32
-%elifidn %1, mmx2
- pshufw m1, m0, 0xE
-%else ; mmsize == 16
- pshuflw m1, m0, 0xE
%endif
paddw m0, m1
%endif ; !ssse3
-%ifidn %1, mmx
+%if cpuflag(sse2)
+ pshuflw m1, m0, 0x1
+%elif cpuflag(mmx2)
+ pshufw m1, m0, 0x1
+%elif cpuflag(mmx)
mova m1, m0
psrlq m1, 16
-%elifidn %1, mmx2
- pshufw m1, m0, 0x1
-%else
- pshuflw m1, m0, 0x1
%endif
paddw m0, m1 ; sum of H coefficients
@@ -701,24 +711,24 @@ cglobal pred8x8_plane_%1, 2, 9, %2
movd m1, r5d
movd m3, r3d
-%ifidn %1, mmx
- punpcklwd m0, m0
- punpcklwd m1, m1
- punpcklwd m3, m3
- punpckldq m0, m0
- punpckldq m1, m1
- punpckldq m3, m3
-%elifidn %1, mmx2
- pshufw m0, m0, 0x0
- pshufw m1, m1, 0x0
- pshufw m3, m3, 0x0
-%else
+%if cpuflag(sse2)
pshuflw m0, m0, 0x0
pshuflw m1, m1, 0x0
pshuflw m3, m3, 0x0
punpcklqdq m0, m0 ; splat H (words)
punpcklqdq m1, m1 ; splat V (words)
punpcklqdq m3, m3 ; splat a (words)
+%elif cpuflag(mmx2)
+ pshufw m0, m0, 0x0
+ pshufw m1, m1, 0x0
+ pshufw m3, m3, 0x0
+%elif cpuflag(mmx)
+ punpcklwd m0, m0
+ punpcklwd m1, m1
+ punpcklwd m3, m3
+ punpckldq m0, m0
+ punpckldq m1, m1
+ punpckldq m3, m3
%endif
%if mmsize == 8
mova m2, m0
@@ -768,12 +778,15 @@ ALIGN 16
REP_RET
%endmacro
-INIT_MMX
-H264_PRED8x8_PLANE mmx, 0
-H264_PRED8x8_PLANE mmx2, 0
+INIT_MMX mmx
+H264_PRED8x8_PLANE
+INIT_MMX mmx2
+H264_PRED8x8_PLANE
+INIT_XMM sse2
+H264_PRED8x8_PLANE
+INIT_XMM ssse3
+H264_PRED8x8_PLANE
INIT_XMM
-H264_PRED8x8_PLANE sse2, 8
-H264_PRED8x8_PLANE ssse3, 8
;-----------------------------------------------------------------------------
; void pred8x8_vertical(uint8_t *src, int stride)
@@ -795,22 +808,22 @@ cglobal pred8x8_vertical_mmx, 2,2
; void pred8x8_horizontal(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro PRED8x8_H 1
-cglobal pred8x8_horizontal_%1, 2,3
+%macro PRED8x8_H 0
+cglobal pred8x8_horizontal, 2,3
mov r2, 4
-%ifidn %1, ssse3
+%if cpuflag(ssse3)
mova m2, [pb_3]
%endif
.loop:
movd m0, [r0+r1*0-4]
movd m1, [r0+r1*1-4]
-%ifidn %1, ssse3
+%if cpuflag(ssse3)
pshufb m0, m2
pshufb m1, m2
%else
punpcklbw m0, m0
punpcklbw m1, m1
-%ifidn %1, mmxext
+%if cpuflag(mmx2)
pshufw m0, m0, 0xff
pshufw m1, m1, 0xff
%else
@@ -828,10 +841,13 @@ cglobal pred8x8_horizontal_%1, 2,3
REP_RET
%endmacro
+INIT_MMX mmx
+PRED8x8_H
+INIT_MMX mmx2
+PRED8x8_H
+INIT_MMX ssse3
+PRED8x8_H
INIT_MMX
-PRED8x8_H mmx
-PRED8x8_H mmxext
-PRED8x8_H ssse3
;-----------------------------------------------------------------------------
; void pred8x8_top_dc_mmxext(uint8_t *src, int stride)
@@ -967,8 +983,8 @@ cglobal pred8x8_dc_rv40_mmxext, 2,7
; void pred8x8_tm_vp8(uint8_t *src, int stride)
;-----------------------------------------------------------------------------
-%macro PRED8x8_TM_MMX 1
-cglobal pred8x8_tm_vp8_%1, 2,6
+%macro PRED8x8_TM_MMX 0
+cglobal pred8x8_tm_vp8, 2,6
sub r0, r1
pxor mm7, mm7
movq mm0, [r0]
@@ -984,14 +1000,14 @@ cglobal pred8x8_tm_vp8_%1, 2,6
sub r3d, r4d
movd mm2, r2d
movd mm4, r3d
-%ifidn %1, mmx
+%if cpuflag(mmx2)
+ pshufw mm2, mm2, 0
+ pshufw mm4, mm4, 0
+%else
punpcklwd mm2, mm2
punpcklwd mm4, mm4
punpckldq mm2, mm2
punpckldq mm4, mm4
-%else
- pshufw mm2, mm2, 0
- pshufw mm4, mm4, 0
%endif
movq mm3, mm2
movq mm5, mm4
@@ -1009,8 +1025,11 @@ cglobal pred8x8_tm_vp8_%1, 2,6
REP_RET
%endmacro
-PRED8x8_TM_MMX mmx
-PRED8x8_TM_MMX mmxext
+INIT_MMX mmx
+PRED8x8_TM_MMX
+INIT_MMX mmx2
+PRED8x8_TM_MMX
+INIT_MMX
cglobal pred8x8_tm_vp8_sse2, 2,6,4
sub r0, r1
@@ -2510,8 +2529,8 @@ cglobal pred4x4_dc_mmxext, 3,5
; void pred4x4_tm_vp8_mmxext(uint8_t *src, const uint8_t *topright, int stride)
;-----------------------------------------------------------------------------
-%macro PRED4x4_TM_MMX 1
-cglobal pred4x4_tm_vp8_%1, 3,6
+%macro PRED4x4_TM_MMX 0
+cglobal pred4x4_tm_vp8, 3,6
sub r0, r2
pxor mm7, mm7
movd mm0, [r0]
@@ -2525,14 +2544,14 @@ cglobal pred4x4_tm_vp8_%1, 3,6
sub r3d, r4d
movd mm2, r1d
movd mm4, r3d
-%ifidn %1, mmx
+%if cpuflag(mmx2)
+ pshufw mm2, mm2, 0
+ pshufw mm4, mm4, 0
+%else
punpcklwd mm2, mm2
punpcklwd mm4, mm4
punpckldq mm2, mm2
punpckldq mm4, mm4
-%else
- pshufw mm2, mm2, 0
- pshufw mm4, mm4, 0
%endif
paddw mm2, mm0
paddw mm4, mm0
@@ -2546,8 +2565,11 @@ cglobal pred4x4_tm_vp8_%1, 3,6
REP_RET
%endmacro
-PRED4x4_TM_MMX mmx
-PRED4x4_TM_MMX mmxext
+INIT_MMX mmx
+PRED4x4_TM_MMX
+INIT_MMX mmx2
+PRED4x4_TM_MMX
+INIT_MMX
cglobal pred4x4_tm_vp8_ssse3, 3,3
sub r0, r2
diff --git a/libavcodec/x86/h264_intrapred_init.c b/libavcodec/x86/h264_intrapred_init.c
index 58740e2ed1..5975d07706 100644
--- a/libavcodec/x86/h264_intrapred_init.c
+++ b/libavcodec/x86/h264_intrapred_init.c
@@ -96,9 +96,9 @@ PRED16x16(horizontal, 10, sse2)
void ff_pred16x16_vertical_mmx (uint8_t *src, int stride);
void ff_pred16x16_vertical_sse (uint8_t *src, int stride);
void ff_pred16x16_horizontal_mmx (uint8_t *src, int stride);
-void ff_pred16x16_horizontal_mmxext(uint8_t *src, int stride);
+void ff_pred16x16_horizontal_mmx2 (uint8_t *src, int stride);
void ff_pred16x16_horizontal_ssse3 (uint8_t *src, int stride);
-void ff_pred16x16_dc_mmxext (uint8_t *src, int stride);
+void ff_pred16x16_dc_mmx2 (uint8_t *src, int stride);
void ff_pred16x16_dc_sse2 (uint8_t *src, int stride);
void ff_pred16x16_dc_ssse3 (uint8_t *src, int stride);
void ff_pred16x16_plane_h264_mmx (uint8_t *src, int stride);
@@ -114,21 +114,21 @@ void ff_pred16x16_plane_svq3_mmx2 (uint8_t *src, int stride);
void ff_pred16x16_plane_svq3_sse2 (uint8_t *src, int stride);
void ff_pred16x16_plane_svq3_ssse3 (uint8_t *src, int stride);
void ff_pred16x16_tm_vp8_mmx (uint8_t *src, int stride);
-void ff_pred16x16_tm_vp8_mmxext (uint8_t *src, int stride);
+void ff_pred16x16_tm_vp8_mmx2 (uint8_t *src, int stride);
void ff_pred16x16_tm_vp8_sse2 (uint8_t *src, int stride);
void ff_pred8x8_top_dc_mmxext (uint8_t *src, int stride);
void ff_pred8x8_dc_rv40_mmxext (uint8_t *src, int stride);
void ff_pred8x8_dc_mmxext (uint8_t *src, int stride);
void ff_pred8x8_vertical_mmx (uint8_t *src, int stride);
void ff_pred8x8_horizontal_mmx (uint8_t *src, int stride);
-void ff_pred8x8_horizontal_mmxext (uint8_t *src, int stride);
+void ff_pred8x8_horizontal_mmx2 (uint8_t *src, int stride);
void ff_pred8x8_horizontal_ssse3 (uint8_t *src, int stride);
void ff_pred8x8_plane_mmx (uint8_t *src, int stride);
void ff_pred8x8_plane_mmx2 (uint8_t *src, int stride);
void ff_pred8x8_plane_sse2 (uint8_t *src, int stride);
void ff_pred8x8_plane_ssse3 (uint8_t *src, int stride);
void ff_pred8x8_tm_vp8_mmx (uint8_t *src, int stride);
-void ff_pred8x8_tm_vp8_mmxext (uint8_t *src, int stride);
+void ff_pred8x8_tm_vp8_mmx2 (uint8_t *src, int stride);
void ff_pred8x8_tm_vp8_sse2 (uint8_t *src, int stride);
void ff_pred8x8_tm_vp8_ssse3 (uint8_t *src, int stride);
void ff_pred8x8l_top_dc_mmxext (uint8_t *src, int has_topleft, int has_topright, int stride);
@@ -163,7 +163,7 @@ void ff_pred4x4_vertical_right_mmxext(uint8_t *src, const uint8_t *topright, int
void ff_pred4x4_horizontal_up_mmxext(uint8_t *src, const uint8_t *topright, int stride);
void ff_pred4x4_horizontal_down_mmxext(uint8_t *src, const uint8_t *topright, int stride);
void ff_pred4x4_tm_vp8_mmx (uint8_t *src, const uint8_t *topright, int stride);
-void ff_pred4x4_tm_vp8_mmxext (uint8_t *src, const uint8_t *topright, int stride);
+void ff_pred4x4_tm_vp8_mmx2 (uint8_t *src, const uint8_t *topright, int stride);
void ff_pred4x4_tm_vp8_ssse3 (uint8_t *src, const uint8_t *topright, int stride);
void ff_pred4x4_vertical_vp8_mmxext(uint8_t *src, const uint8_t *topright, int stride);
@@ -199,10 +199,10 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
}
if (mm_flags & AV_CPU_FLAG_MMX2) {
- h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext;
- h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext;
+ h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx2;
+ h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmx2;
if (chroma_format_idc == 1)
- h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext;
+ h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmx2;
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_mmxext;
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_mmxext;
h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_mmxext;
@@ -232,10 +232,10 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
}
}
if (codec_id == CODEC_ID_VP8) {
- h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_mmxext;
+ h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_mmx2;
h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext;
- h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_mmxext;
- h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmxext;
+ h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_mmx2;
+ h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmx2;
h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_mmxext;
} else {
if (chroma_format_idc == 1)
diff --git a/libavcodec/x86/vp8dsp-init.c b/libavcodec/x86/vp8dsp-init.c
index 3e550b1543..33a908ed76 100644
--- a/libavcodec/x86/vp8dsp-init.c
+++ b/libavcodec/x86/vp8dsp-init.c
@@ -389,11 +389,13 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
+#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
+#endif
}
if (mm_flags & AV_CPU_FLAG_SSE2) {
diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm
index 42bb479b9b..75c0b56f94 100644
--- a/libavcodec/x86/vp8dsp.asm
+++ b/libavcodec/x86/vp8dsp.asm
@@ -1465,27 +1465,6 @@ VP8_DC_WHT
%endif
%endmacro
-%macro SPLATB_REG 2-3
-%if cpuflag(ssse3)
- movd %1, %2d
- pshufb %1, %3
-%elif cpuflag(sse2)
- movd %1, %2d
- punpcklbw %1, %1
- pshuflw %1, %1, 0x0
- punpcklqdq %1, %1
-%elif cpuflag(mmx2)
- movd %1, %2d
- punpcklbw %1, %1
- pshufw %1, %1, 0x0
-%else
- movd %1, %2d
- punpcklbw %1, %1
- punpcklwd %1, %1
- punpckldq %1, %1
-%endif
-%endmacro
-
%macro SIMPLE_LOOPFILTER 2
cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
%if mmsize == 8 ; mmx/mmxext