--- decoder.c 2004/07/10 17:49:31 1.61 +++ decoder.c 2004/08/10 21:58:55 1.65 @@ -20,7 +20,7 @@ * along with this program ; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * - * $Id: decoder.c,v 1.61 2004/07/10 17:49:31 edgomez Exp $ + * $Id: decoder.c,v 1.65 2004/08/10 21:58:55 edgomez Exp $ * ****************************************************************************/ @@ -48,6 +48,7 @@ #include "image/interpolate8x8.h" #include "image/reduced.h" #include "image/font.h" +#include "image/qpel.h" #include "bitstream/mbcoding.h" #include "prediction/mbprediction.h" @@ -390,14 +391,13 @@ uint8_t * pY_Cur, uint8_t * pU_Cur, uint8_t * pV_Cur, - const int reduced_resolution, + int reduced_resolution, const MACROBLOCK * pMB) { - DECLARE_ALIGNED_MATRIX(data, 6, 64, int16_t, CACHE_LINE); + DECLARE_ALIGNED_MATRIX(data, 1, 64, int16_t, CACHE_LINE); int stride = dec->edged_width; int next_block = stride * (reduced_resolution ? 16 : 8); - const int stride2 = stride/2; int i; const uint32_t iQuant = pMB->quant; const int direction = dec->alternate_vertical_scan ? 2 : 0; @@ -407,63 +407,62 @@ int direction, const int quant, const uint16_t *matrix); + typedef void (*add_residual_function_t)( + uint8_t *predicted_block, + const int16_t *residual, + int stride); const get_inter_block_function_t get_inter_block = (dec->quant_type == 0) - ? get_inter_block_h263 - : get_inter_block_mpeg; + ? (get_inter_block_function_t)get_inter_block_h263 + : (get_inter_block_function_t)get_inter_block_mpeg; + + const add_residual_function_t add_residual = (reduced_resolution) + ? (add_residual_function_t)add_upsampled_8x8_16to8 + : (add_residual_function_t)transfer_16to8add; + + uint8_t *dst[6]; + int strides[6]; - memset(&data[0], 0, 6*64*sizeof(int16_t)); /* clear */ - for (i = 0; i < 6; i++) { + if (dec->interlacing && pMB->field_dct) { + next_block = stride; + stride *= 2; + } - if (cbp & (1 << (5 - i))) { /* coded */ + reduced_resolution = !!reduced_resolution; + dst[0] = pY_Cur; + dst[2] = pY_Cur + next_block; + dst[1] = dst[0] + (8<mpeg_quant_matrices)); + get_inter_block(bs, &data[0], direction, iQuant, get_inter_matrix(dec->mpeg_quant_matrices)); stop_coding_timer(); + /* iDCT */ start_timer(); - idct(&data[i * 64]); + idct(&data[0]); stop_idct_timer(); - } - } - - if (dec->interlacing && pMB->field_dct) { - next_block = stride; - stride *= 2; - } - start_timer(); - if (reduced_resolution) { - if (cbp & 32) - add_upsampled_8x8_16to8(pY_Cur, &data[0 * 64], stride); - if (cbp & 16) - add_upsampled_8x8_16to8(pY_Cur + 16, &data[1 * 64], stride); - if (cbp & 8) - add_upsampled_8x8_16to8(pY_Cur + next_block, &data[2 * 64], stride); - if (cbp & 4) - add_upsampled_8x8_16to8(pY_Cur + 16 + next_block, &data[3 * 64], stride); - if (cbp & 2) - add_upsampled_8x8_16to8(pU_Cur, &data[4 * 64], stride2); - if (cbp & 1) - add_upsampled_8x8_16to8(pV_Cur, &data[5 * 64], stride2); - } else { - if (cbp & 32) - transfer_16to8add(pY_Cur, &data[0 * 64], stride); - if (cbp & 16) - transfer_16to8add(pY_Cur + 8, &data[1 * 64], stride); - if (cbp & 8) - transfer_16to8add(pY_Cur + next_block, &data[2 * 64], stride); - if (cbp & 4) - transfer_16to8add(pY_Cur + 8 + next_block, &data[3 * 64], stride); - if (cbp & 2) - transfer_16to8add(pU_Cur, &data[4 * 64], stride2); - if (cbp & 1) - transfer_16to8add(pV_Cur, &data[5 * 64], stride2); + /* Add this residual to the predicted block */ + start_timer(); + add_residual(dst[i], &data[0], strides[i]); + stop_transfer_timer(); + } } - stop_transfer_timer(); } /* decode an inter macroblock */ @@ -1038,7 +1037,6 @@ if (!direct) { uv_dx = pMB->mvs[0].x; uv_dy = pMB->mvs[0].y; - b_uv_dx = pMB->b_mvs[0].x; b_uv_dy = pMB->b_mvs[0].y; @@ -1051,21 +1049,20 @@ uv_dx = (uv_dx >> 1) + roundtab_79[uv_dx & 0x3]; uv_dy = (uv_dy >> 1) + roundtab_79[uv_dy & 0x3]; - b_uv_dx = (b_uv_dx >> 1) + roundtab_79[b_uv_dx & 0x3]; b_uv_dy = (b_uv_dy >> 1) + roundtab_79[b_uv_dy & 0x3]; } else { - if(dec->quarterpel) { - uv_dx = (pMB->mvs[0].x / 2) + (pMB->mvs[1].x / 2) + (pMB->mvs[2].x / 2) + (pMB->mvs[3].x / 2); - uv_dy = (pMB->mvs[0].y / 2) + (pMB->mvs[1].y / 2) + (pMB->mvs[2].y / 2) + (pMB->mvs[3].y / 2); - b_uv_dx = (pMB->b_mvs[0].x / 2) + (pMB->b_mvs[1].x / 2) + (pMB->b_mvs[2].x / 2) + (pMB->b_mvs[3].x / 2); - b_uv_dy = (pMB->b_mvs[0].y / 2) + (pMB->b_mvs[1].y / 2) + (pMB->b_mvs[2].y / 2) + (pMB->b_mvs[3].y / 2); - } else { - uv_dx = pMB->mvs[0].x + pMB->mvs[1].x + pMB->mvs[2].x + pMB->mvs[3].x; - uv_dy = pMB->mvs[0].y + pMB->mvs[1].y + pMB->mvs[2].y + pMB->mvs[3].y; - b_uv_dx = pMB->b_mvs[0].x + pMB->b_mvs[1].x + pMB->b_mvs[2].x + pMB->b_mvs[3].x; - b_uv_dy = pMB->b_mvs[0].y + pMB->b_mvs[1].y + pMB->b_mvs[2].y + pMB->b_mvs[3].y; + uv_dx = pMB->mvs[0].x + pMB->mvs[1].x + pMB->mvs[2].x + pMB->mvs[3].x; + uv_dy = pMB->mvs[0].y + pMB->mvs[1].y + pMB->mvs[2].y + pMB->mvs[3].y; + b_uv_dx = pMB->b_mvs[0].x + pMB->b_mvs[1].x + pMB->b_mvs[2].x + pMB->b_mvs[3].x; + b_uv_dy = pMB->b_mvs[0].y + pMB->b_mvs[1].y + pMB->b_mvs[2].y + pMB->b_mvs[3].y; + + if (dec->quarterpel) { + uv_dx /= 2; + uv_dy /= 2; + b_uv_dx /= 2; + b_uv_dy /= 2; } uv_dx = (uv_dx >> 3) + roundtab_76[uv_dx & 0xf]; @@ -1113,68 +1110,38 @@ if(dec->quarterpel) { if(!direct) { - interpolate16x16_quarterpel(dec->tmp.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, - dec->qtmp.y + 128, 16*x_pos, 16*y_pos, - pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0); + interpolate16x16_add_quarterpel(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, + dec->qtmp.y + 128, 16*x_pos, 16*y_pos, + pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0); } else { - interpolate8x8_quarterpel(dec->tmp.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, - dec->qtmp.y + 128, 16*x_pos, 16*y_pos, - pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0); - interpolate8x8_quarterpel(dec->tmp.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, - dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos, - pMB->b_mvs[1].x, pMB->b_mvs[1].y, stride, 0); - interpolate8x8_quarterpel(dec->tmp.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, - dec->qtmp.y + 128, 16*x_pos, 16*y_pos + 8, - pMB->b_mvs[2].x, pMB->b_mvs[2].y, stride, 0); - interpolate8x8_quarterpel(dec->tmp.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, - dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos + 8, - pMB->b_mvs[3].x, pMB->b_mvs[3].y, stride, 0); + interpolate8x8_add_quarterpel(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, + dec->qtmp.y + 128, 16*x_pos, 16*y_pos, + pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0); + interpolate8x8_add_quarterpel(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, + dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos, + pMB->b_mvs[1].x, pMB->b_mvs[1].y, stride, 0); + interpolate8x8_add_quarterpel(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, + dec->qtmp.y + 128, 16*x_pos, 16*y_pos + 8, + pMB->b_mvs[2].x, pMB->b_mvs[2].y, stride, 0); + interpolate8x8_add_quarterpel(dec->cur.y, backward.y, dec->qtmp.y, dec->qtmp.y + 64, + dec->qtmp.y + 128, 16*x_pos + 8, 16*y_pos + 8, + pMB->b_mvs[3].x, pMB->b_mvs[3].y, stride, 0); } } else { - interpolate8x8_switch(dec->tmp.y, backward.y, 16 * x_pos, 16 * y_pos, - pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0); - interpolate8x8_switch(dec->tmp.y, backward.y, 16 * x_pos + 8, - 16 * y_pos, pMB->b_mvs[1].x, pMB->b_mvs[1].y, stride, 0); - interpolate8x8_switch(dec->tmp.y, backward.y, 16 * x_pos, - 16 * y_pos + 8, pMB->b_mvs[2].x, pMB->b_mvs[2].y, stride, 0); - interpolate8x8_switch(dec->tmp.y, backward.y, 16 * x_pos + 8, - 16 * y_pos + 8, pMB->b_mvs[3].x, pMB->b_mvs[3].y, stride, 0); - } - - interpolate8x8_switch(dec->tmp.u, backward.u, 8 * x_pos, 8 * y_pos, - b_uv_dx, b_uv_dy, stride2, 0); - interpolate8x8_switch(dec->tmp.v, backward.v, 8 * x_pos, 8 * y_pos, - b_uv_dx, b_uv_dy, stride2, 0); - - interpolate8x8_avg2(dec->cur.y + (16 * y_pos * stride) + 16 * x_pos, - dec->cur.y + (16 * y_pos * stride) + 16 * x_pos, - dec->tmp.y + (16 * y_pos * stride) + 16 * x_pos, - stride, 1, 8); - - interpolate8x8_avg2(dec->cur.y + (16 * y_pos * stride) + 16 * x_pos + 8, - dec->cur.y + (16 * y_pos * stride) + 16 * x_pos + 8, - dec->tmp.y + (16 * y_pos * stride) + 16 * x_pos + 8, - stride, 1, 8); - - interpolate8x8_avg2(dec->cur.y + ((16 * y_pos + 8) * stride) + 16 * x_pos, - dec->cur.y + ((16 * y_pos + 8) * stride) + 16 * x_pos, - dec->tmp.y + ((16 * y_pos + 8) * stride) + 16 * x_pos, - stride, 1, 8); - - interpolate8x8_avg2(dec->cur.y + ((16 * y_pos + 8) * stride) + 16 * x_pos + 8, - dec->cur.y + ((16 * y_pos + 8) * stride) + 16 * x_pos + 8, - dec->tmp.y + ((16 * y_pos + 8) * stride) + 16 * x_pos + 8, - stride, 1, 8); - - interpolate8x8_avg2(dec->cur.u + (8 * y_pos * stride2) + 8 * x_pos, - dec->cur.u + (8 * y_pos * stride2) + 8 * x_pos, - dec->tmp.u + (8 * y_pos * stride2) + 8 * x_pos, - stride2, 1, 8); - - interpolate8x8_avg2(dec->cur.v + (8 * y_pos * stride2) + 8 * x_pos, - dec->cur.v + (8 * y_pos * stride2) + 8 * x_pos, - dec->tmp.v + (8 * y_pos * stride2) + 8 * x_pos, - stride2, 1, 8); + interpolate8x8_add_switch(dec->cur.y, backward.y, 16 * x_pos, 16 * y_pos, + pMB->b_mvs[0].x, pMB->b_mvs[0].y, stride, 0); + interpolate8x8_add_switch(dec->cur.y, backward.y, 16 * x_pos + 8, + 16 * y_pos, pMB->b_mvs[1].x, pMB->b_mvs[1].y, stride, 0); + interpolate8x8_add_switch(dec->cur.y, backward.y, 16 * x_pos, + 16 * y_pos + 8, pMB->b_mvs[2].x, pMB->b_mvs[2].y, stride, 0); + interpolate8x8_add_switch(dec->cur.y, backward.y, 16 * x_pos + 8, + 16 * y_pos + 8, pMB->b_mvs[3].x, pMB->b_mvs[3].y, stride, 0); + } + + interpolate8x8_add_switch(dec->cur.u, backward.u, 8 * x_pos, 8 * y_pos, + b_uv_dx, b_uv_dy, stride2, 0); + interpolate8x8_add_switch(dec->cur.v, backward.v, 8 * x_pos, 8 * y_pos, + b_uv_dx, b_uv_dy, stride2, 0); stop_comp_timer();