--- motion_comp.c 2002/12/09 10:47:05 1.11.2.11 +++ motion_comp.c 2002/12/28 15:34:29 1.11.2.17 @@ -14,25 +14,6 @@ #define SIGN(X) (((X)>0)?1:-1) -// decode an inter macroblock - -static void -rrv_mv_scaleup(VECTOR * mv) -{ - if (mv->x > 0) { - mv->x = 2*mv->x - 1; - } else if (mv->x < 0) { - mv->x = 2*mv->x + 1; - } - - if (mv->y > 0) { - mv->y = 2*mv->y - 1; - } else if (mv->y < 0) { - mv->y = 2*mv->y + 1; - } -} - - static __inline void compensate16x16_interpolate(int16_t * const dct_codes, uint8_t * const cur, @@ -50,135 +31,47 @@ const uint32_t rounding) { - if (reduced_resolution) - { - - /* XXX: todo */ - - VECTOR mv; - int i,j; - uint8_t tmp[18*18]; - - x*=2; - y*=2; - - mv.x = dx; - mv.y = dy; - rrv_mv_scaleup(&mv); - - interpolate32x32_switch( - refv, ref, x, y, mv.x, mv.y, stride, rounding); - - for (j = 0; j < 32; j++) - for (i = 0; i < 32; i++) - cur[(y+j)*stride + x + i] -= refv[(y+j)*stride + x + i]; - - filter_18x18_to_8x8(dct_codes, cur + y*stride + x, stride); - filter_18x18_to_8x8(dct_codes+64, cur + y*stride + x + 16, stride); - filter_18x18_to_8x8(dct_codes+128, cur + (y+16)*stride + x, stride); - filter_18x18_to_8x8(dct_codes+192, cur + (y+16)*stride + x + 16, stride); - - /* - for (j = 0; j < 16; j++) - for (i = 0; i < 16; i++) - tmp[(j+1)*18 + i+1] = refv[ (y+j)*stride + x+i]; - - for (i = 1; i < 17; i++) - { - tmp[ 0*18 + i] = tmp[ 1*18 + i]; - tmp[17*18 + i] = tmp[16*18 + i]; - } - - for (i = 0; i < 18; i++) - { - tmp[ i*18 + 0] = tmp[i*18 + 1]; - tmp[ i*18 + 17] = tmp[i*18 + 16]; - } - filter_18x18_to_8x8(dct_codes, tmp, 18); - - - for (j = 0; j < 16; j++) - for (i = 0; i < 16; i++) - tmp[(j+1)*18 + i+1] = refv[ (y+j)*stride + x+i + 16]; + if (reduced_resolution) { + const uint8_t * reference; + x*=2; y*=2; - for (i = 1; i < 17; i++) - { - tmp[ 0*18 + i] = tmp[ 1*18 + i]; - tmp[17*18 + i] = tmp[16*18 + i]; - } - - for (i = 0; i < 18; i++) - { - tmp[ i*18 + 0] = tmp[i*18 + 1]; - tmp[ i*18 + 17] = tmp[i*18 + 16]; - } - filter_18x18_to_8x8(dct_codes+64, tmp, 18); - - for (j = 0; j < 16; j++) - for (i = 0; i < 16; i++) - tmp[(j+1)*18 + i+1] = refv[ (y+16+j)*stride + x+i]; - - for (i = 1; i < 17; i++) - { - tmp[ 0*18 + i] = tmp[ 1*18 + i]; - tmp[17*18 + i] = tmp[16*18 + i]; - } + reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); + + filter_18x18_to_8x8(dct_codes, cur+y*stride + x, stride); + filter_diff_18x18_to_8x8(dct_codes, reference, stride); - for (i = 0; i < 18; i++) - { - tmp[ i*18 + 0] = tmp[i*18 + 1]; - tmp[ i*18 + 17] = tmp[i*18 + 16]; - } - filter_18x18_to_8x8(dct_codes+128, tmp, 18); + filter_18x18_to_8x8(dct_codes+64, cur+y*stride + x + 16, stride); + filter_diff_18x18_to_8x8(dct_codes+64, reference + 16, stride); - for (j = 0; j < 16; j++) - for (i = 0; i < 16; i++) - tmp[(j+1)*18 + i+1] = refv[ (y+16+j)*stride + x+i + 16]; + filter_18x18_to_8x8(dct_codes+128, cur+(y+16)*stride + x, stride); + filter_diff_18x18_to_8x8(dct_codes+128, reference + 16*stride, stride); - for (i = 1; i < 17; i++) - { - tmp[ 0*18 + i] = tmp[ 1*18 + i]; - tmp[17*18 + i] = tmp[16*18 + i]; - } + filter_18x18_to_8x8(dct_codes+192, cur+(y+16)*stride + x + 16, stride); + filter_diff_18x18_to_8x8(dct_codes+192, reference + 16*stride + 16, stride); - for (i = 0; i < 18; i++) - { - tmp[ i*18 + 0] = tmp[i*18 + 1]; - tmp[ i*18 + 17] = tmp[i*18 + 16]; - } - filter_18x18_to_8x8(dct_codes+192, tmp, 18); - */ - - - //memset(dct_codes, 0, sizeof(uint16_t) * 64 * 4); + transfer32x32_copy(cur + y*stride + x, reference, stride); - }else{ + } else { if(quarterpel) { - interpolate16x16_quarterpel((uint8_t *) refv, (uint8_t *) ref, (uint8_t *) refh, - (uint8_t *) refh + 64, (uint8_t *) refhv, x, y, dx, dy, stride, rounding); + const uint8_t * ptr; + if (dx&3 | dy&3) { + interpolate16x16_quarterpel((uint8_t *) refv, (uint8_t *) ref, (uint8_t *) refv + 32, + (uint8_t *) refv + 64, (uint8_t *) refv + 96, x, y, dx, dy, stride, rounding); + ptr = refv + y*stride + x; + } else ptr = ref + (y + dy/4)*stride + x + dx/4; // fullpixel position transfer_8to16sub(dct_codes, cur + y*stride + x, - refv + y*stride + x, stride); + ptr, stride); transfer_8to16sub(dct_codes+64, cur + y*stride + x + 8, - refv + y*stride + x + 8, stride); + ptr + 8, stride); transfer_8to16sub(dct_codes+128, cur + y*stride + x + 8*stride, - refv + y*stride + x + 8*stride, stride); + ptr + 8*stride, stride); transfer_8to16sub(dct_codes+192, cur + y*stride + x + 8*stride + 8, - refv + y*stride + x + 8*stride+8, stride); + ptr + 8*stride+8, stride); - } - else - { - const uint8_t * reference; + } else { + const uint8_t * reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); - switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) - { - case 0: reference = ref + ((y + dy / 2) * stride + x + dx / 2); break; - case 1: reference = refv + ((y + (dy-1) / 2) * stride + x + dx / 2); break; - case 2: reference = refh + ((y + dy / 2) * stride + x + (dx-1) / 2); break; - default: // case 3: - reference = refhv + ((y + (dy-1) / 2) * stride + x + (dx-1) / 2); break; - } transfer_8to16sub(dct_codes, cur + y * stride + x, reference, stride); transfer_8to16sub(dct_codes+64, cur + y * stride + x + 8, @@ -198,8 +91,8 @@ const uint8_t * const refh, const uint8_t * const refv, const uint8_t * const refhv, - const uint32_t x, - const uint32_t y, + uint32_t x, + uint32_t y, const int32_t dx, const int32_t dy, const uint32_t stride, @@ -207,36 +100,66 @@ const int reduced_resolution, const uint32_t rounding) { - if (reduced_resolution) - { - // XXX: todo + if (reduced_resolution) { + const uint8_t * reference; + x*=2; y*=2; + + reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); + + filter_18x18_to_8x8(dct_codes, cur+y*stride + x, stride); + filter_diff_18x18_to_8x8(dct_codes, reference, stride); + + transfer16x16_copy(cur + y*stride + x, reference, stride); + } else { if(quarterpel) { - interpolate8x8_quarterpel((uint8_t *) refv, (uint8_t *) ref, (uint8_t *) refh, - (uint8_t *) refh + 64, (uint8_t *) refhv, x, y, dx, dy, stride, rounding); + const uint8_t * ptr; + if (dx&3 | dy&3) { + interpolate8x8_quarterpel((uint8_t *) refv, (uint8_t *) ref, (uint8_t *) refv+32, + (uint8_t *) refv + 64, (uint8_t *) refv+96, x, y, dx, dy, stride, rounding); + ptr = refv + y*stride + x; + } else ptr = ref + (y + dy/4)*stride + x + dx/4; // fullpixel position + transfer_8to16sub(dct_codes, cur + y*stride + x, - refv + y*stride + x, stride); - } - else - { - const uint8_t * reference; + ptr, stride); + } else { + const uint8_t * reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); - switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) - { - case 0: reference = ref + ((y + dy / 2) * stride + x + dx / 2); break; - case 1: reference = refv + ((y + (dy-1) / 2) * stride + x + dx / 2); break; - case 2: reference = refh + ((y + dy / 2) * stride + x + (dx-1) / 2); break; - default: // case 3: - reference = refhv + ((y + (dy-1) / 2) * stride + x + (dx-1) / 2); break; - } transfer_8to16sub(dct_codes, cur + y * stride + x, reference, stride); } } } + + +/* XXX: slow, inelegant... */ +static void +interpolate18x18_switch(uint8_t * const cur, + const uint8_t * const refn, + const uint32_t x, + const uint32_t y, + const int32_t dx, + const int dy, + const uint32_t stride, + const uint32_t rounding) +{ + interpolate8x8_switch(cur, refn, x-1, y-1, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+7, y-1, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+9, y-1, dx, dy, stride, rounding); + + interpolate8x8_switch(cur, refn, x-1, y+7, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+7, y+7, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+9, y+7, dx, dy, stride, rounding); + + interpolate8x8_switch(cur, refn, x-1, y+9, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+7, y+9, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+9, y+9, dx, dy, stride, rounding); +} + + void MBMotionCompensation(MACROBLOCK * const mb, const uint32_t i, @@ -254,33 +177,37 @@ const int reduced_resolution, const uint32_t rounding) { + int32_t dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); + int32_t dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); - if (mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER || mb->mode == MODE_INTER_Q) { + if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) && (dx==0) && (dy==0) ) { /* quick copy */ + transfer16x16_copy(cur->y + 16 * (i + j * edged_width), + ref->y + 16 * (i + j * edged_width), + edged_width); + + transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), + ref->u + 8 * (i + j * edged_width/2), + edged_width / 2); + transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), + ref->v + 8 * (i + j * edged_width/2), + edged_width / 2); + return; + } - int32_t dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); - int32_t dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); + if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER || mb->mode == MODE_INTER_Q) /*&& !quarterpel*/) { - if ( (mb->mode == MODE_NOT_CODED) && (dx==0) && (dy==0) ) { /* quick copy */ - transfer16x16_copy(cur->y + 16 * (i + j * edged_width), - ref->y + 16 * (i + j * edged_width), - edged_width); - - transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), - ref->u + 8 * (i + j * edged_width/2), - edged_width / 2); - transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), - ref->v + 8 * (i + j * edged_width/2), - edged_width / 2); - return; - } /* quick MODE_NOT_CODED for GMC with MV!=(0,0) is still needed */ + if (reduced_resolution) { + dx = RRV_MV_SCALEUP(dx); + dy = RRV_MV_SCALEUP(dy); + } + compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, refv->y, refhv->y, 16 * i, 16 * j, dx, dy, edged_width, quarterpel, reduced_resolution, rounding); - if (quarterpel) - { + if (quarterpel) { dx /= 2; dy /= 2; } @@ -289,81 +216,108 @@ dy = (dy >> 1) + roundtab_79[dy & 0x3]; /* uv-block-based compensation */ - if (reduced_resolution) - { - // XXX: todo - }else{ + if (reduced_resolution) { + const int stride = edged_width/2; + uint8_t * current, * reference; + + current = cur->u + 16*j*stride + 16*i; + reference = refv->u + 16*j*stride + 16*i; + interpolate18x18_switch(refv->u, ref->u, 16*i, 16*j, dx, dy, stride, rounding); + filter_18x18_to_8x8(dct_codes + 4*64, current, stride); + filter_diff_18x18_to_8x8(dct_codes + 4*64, reference, stride); + transfer16x16_copy(current, reference, stride); + + current = cur->v + 16*j*stride + 16*i; + reference = refv->v + 16*j*stride + 16*i; + interpolate18x18_switch(refv->v, ref->v, 16*i, 16*j, dx, dy, stride, rounding); + filter_18x18_to_8x8(dct_codes + 5*64, current, stride); + filter_diff_18x18_to_8x8(dct_codes + 5*64, reference, stride); + transfer16x16_copy(current, reference, stride); + } else { transfer_8to16sub(&dct_codes[4 * 64], cur->u + 8 * j * edged_width / 2 + 8 * i, interpolate8x8_switch2(refv->u, ref->u, 8 * i, 8 * j, dx, dy, edged_width / 2, rounding), - edged_width / 2); + edged_width / 2); transfer_8to16sub(&dct_codes[5 * 64], cur->v + 8 * j * edged_width / 2 + 8 * i, interpolate8x8_switch2(refv->u, ref->v, 8 * i, 8 * j, dx, dy, edged_width / 2, rounding), - edged_width / 2); + edged_width / 2); } } else { // mode == MODE_INTER4V - int32_t sum, dx, dy; - VECTOR *mvs; + int k; + VECTOR mvs[4]; if(quarterpel) - mvs = mb->qmvs; + for (k = 0; k < 4; k++) mvs[k] = mb->qmvs[k]; else - mvs = mb->mvs; + for (k = 0; k < 4; k++) mvs[k] = mb->mvs[k]; if (reduced_resolution) - { - ///XXX: todo - }else{ - - compensate8x8_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, - refv->y, refhv->y, 16 * i, 16 * j, mvs[0].x, - mvs[0].y, edged_width, quarterpel, reduced_resolution, rounding); - compensate8x8_interpolate(&dct_codes[1 * 64], cur->y, ref->y, refh->y, - refv->y, refhv->y, 16 * i + 8, 16 * j, - mvs[1].x, mvs[1].y, edged_width, quarterpel, reduced_resolution, rounding); - compensate8x8_interpolate(&dct_codes[2 * 64], cur->y, ref->y, refh->y, - refv->y, refhv->y, 16 * i, 16 * j + 8, - mvs[2].x, mvs[2].y, edged_width, quarterpel, reduced_resolution, rounding); - compensate8x8_interpolate(&dct_codes[3 * 64], cur->y, ref->y, refh->y, - refv->y, refhv->y, 16 * i + 8, 16 * j + 8, - mvs[3].x, mvs[3].y, edged_width, quarterpel, reduced_resolution, rounding); - } + for (k = 0; k < 4; k++) { + mvs[k].x = RRV_MV_SCALEUP(mvs[k].x); + mvs[k].y = RRV_MV_SCALEUP(mvs[k].y); + } - if(quarterpel) - sum = (mvs[0].x / 2) + (mvs[1].x / 2) + (mvs[2].x / 2) + (mvs[3].x / 2); - else - sum = mvs[0].x + mvs[1].x + mvs[2].x + mvs[3].x; + compensate8x8_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, + refv->y, refhv->y, 16 * i, 16 * j, mvs[0].x, + mvs[0].y, edged_width, quarterpel, reduced_resolution, rounding); + compensate8x8_interpolate(&dct_codes[1 * 64], cur->y, ref->y, refh->y, + refv->y, refhv->y, 16 * i + 8, 16 * j, + mvs[1].x, mvs[1].y, edged_width, quarterpel, reduced_resolution, rounding); + compensate8x8_interpolate(&dct_codes[2 * 64], cur->y, ref->y, refh->y, + refv->y, refhv->y, 16 * i, 16 * j + 8, + mvs[2].x, mvs[2].y, edged_width, quarterpel, reduced_resolution, rounding); + compensate8x8_interpolate(&dct_codes[3 * 64], cur->y, ref->y, refh->y, + refv->y, refhv->y, 16 * i + 8, 16 * j + 8, + mvs[3].x, mvs[3].y, edged_width, quarterpel, reduced_resolution, rounding); - dx = (sum >> 3) + roundtab_76[sum & 0xf]; + if (quarterpel) { + dx = (mvs[0].x / 2) + (mvs[1].x / 2) + (mvs[2].x / 2) + (mvs[3].x / 2); + dy = (mvs[0].y / 2) + (mvs[1].y / 2) + (mvs[2].y / 2) + (mvs[3].y / 2); + } else { + dx = mvs[0].x + mvs[1].x + mvs[2].x + mvs[3].x; + dy = mvs[0].y + mvs[1].y + mvs[2].y + mvs[3].y; + } - if(quarterpel) - sum = (mvs[0].y / 2) + (mvs[1].y / 2) + (mvs[2].y / 2) + (mvs[3].y / 2); - else - sum = mvs[0].y + mvs[1].y + mvs[2].y + mvs[3].y; + dx = (dx >> 3) + roundtab_76[dx & 0xf]; + dy = (dy >> 3) + roundtab_76[dy & 0xf]; - dy = (sum >> 3) + roundtab_76[sum & 0xf]; /* uv-block-based compensation */ - if (reduced_resolution) - { - //XXX: todo - }else{ + if (reduced_resolution) { + const int stride = edged_width/2; + uint8_t * current, * reference; + + current = cur->u + 16*j*stride + 16*i; + reference = refv->u + 16*j*stride + 16*i; + interpolate18x18_switch(refv->u, ref->u, 16*i, 16*j, dx, dy, stride, rounding); + filter_18x18_to_8x8(dct_codes + 4*64, current, stride); + filter_diff_18x18_to_8x8(dct_codes + 4*64, reference, stride); + transfer16x16_copy(current, reference, stride); + + current = cur->v + 16*j*stride + 16*i; + reference = refv->v + 16*j*stride + 16*i; + interpolate18x18_switch(refv->v, ref->v, 16*i, 16*j, dx, dy, stride, rounding); + filter_18x18_to_8x8(dct_codes + 5*64, current, stride); + filter_diff_18x18_to_8x8(dct_codes + 5*64, reference, stride); + transfer16x16_copy(current, reference, stride); + + } else { transfer_8to16sub(&dct_codes[4 * 64], cur->u + 8 * j * edged_width / 2 + 8 * i, interpolate8x8_switch2(refv->u, ref->u, 8 * i, 8 * j, dx, dy, edged_width / 2, rounding), - edged_width / 2); + edged_width / 2); transfer_8to16sub(&dct_codes[5 * 64], cur->v + 8 * j * edged_width / 2 + 8 * i, interpolate8x8_switch2(refv->u, ref->v, 8 * i, 8 * j, dx, dy, edged_width / 2, rounding), - edged_width / 2); + edged_width / 2); } } } @@ -391,10 +345,11 @@ const int32_t edged_width = pParam->edged_width; int32_t dx, dy; int32_t b_dx, b_dy; - int k,sum; + int k, sum; int x = i; int y = j; - uint32_t quarterpel = pParam->m_quarterpel; + const uint32_t quarterpel = pParam->m_quarterpel; + const uint8_t * ptr1, * ptr2; switch (mb->mode) { case MODE_FORWARD: @@ -409,7 +364,7 @@ compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, f_ref->y, f_refh->y, f_refv->y, f_refhv->y, 16 * i, 16 * j, dx, - dy, edged_width, quarterpel, 0 /*reduced_resolution*/, 0); + dy, edged_width, quarterpel, 0, 0); if (quarterpel) { dx /= 2; @@ -424,14 +379,12 @@ cur->u + 8 * j * edged_width / 2 + 8 * i, interpolate8x8_switch2(f_refv->u, f_ref->u, 8 * i, 8 * j, dx, dy, edged_width / 2, 0), - edged_width / 2); transfer_8to16sub(&dct_codes[5 * 64], cur->v + 8 * j * edged_width / 2 + 8 * i, interpolate8x8_switch2(f_refv->u, f_ref->v, 8 * i, 8 * j, dx, dy, edged_width / 2, 0), - edged_width / 2); break; @@ -447,7 +400,7 @@ compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, b_ref->y, b_refh->y, b_refv->y, b_refhv->y, 16 * i, 16 * j, b_dx, - b_dy, edged_width, quarterpel, 0 /*reduced_resolution*/, 0); + b_dy, edged_width, quarterpel, 0, 0); if (quarterpel) { b_dx /= 2; @@ -475,7 +428,7 @@ break; - case MODE_INTERPOLATE: /* _could_ use DIRECT, but would be overkill (no 4MV there) */ + case MODE_INTERPOLATE: /* _could_ use DIRECT, but would be overkill (no 4MV there) */ case MODE_DIRECT_NO4V: if (quarterpel) { @@ -483,18 +436,24 @@ dy = mb->qmvs[0].y; b_dx = mb->b_qmvs[0].x; b_dy = mb->b_qmvs[0].y; - - interpolate16x16_quarterpel((uint8_t *) f_refv->y, (uint8_t *) f_ref->y, (uint8_t *) f_refh->y, - (uint8_t *) f_refh->y + 64, (uint8_t *) f_refhv->y, 16*i, 16*j, dx, dy, edged_width, 0); - interpolate16x16_quarterpel((uint8_t *) b_refv->y, (uint8_t *) b_ref->y, (uint8_t *) b_refh->y, - (uint8_t *) b_refh->y + 64, (uint8_t *) b_refhv->y, 16*i, 16*j, b_dx, b_dy, edged_width, 0); + + if (dx&3 | dy&3) { + interpolate16x16_quarterpel((uint8_t *) f_refv->y, (uint8_t *) f_ref->y, (uint8_t *) f_refv->y + 32, + (uint8_t *) f_refv->y + 64, (uint8_t *) f_refv->y + 96, 16*i, 16*j, dx, dy, edged_width, 0); + ptr1 = f_refv->y + i * 16 + j * 16 * edged_width; + } else ptr1 = f_ref->y + (16*j + dy/4)*edged_width + 16*i + dx/4; // fullpixel position + + if (b_dx&3 | b_dy&3) { + interpolate16x16_quarterpel((uint8_t *) b_refv->y, (uint8_t *) b_ref->y, (uint8_t *) f_refv->y + 32, + (uint8_t *) f_refv->y + 64, (uint8_t *) f_refv->y + 96, 16*i, 16*j, b_dx, b_dy, edged_width, 0); + ptr2 = b_refv->y + i * 16 + j * 16 * edged_width; + } else ptr2 = b_ref->y + (16*j + b_dy/4)*edged_width + 16*i + b_dx/4; // fullpixel position for (k = 0; k < 4; k++) { transfer_8to16sub2(&dct_codes[k * 64], cur->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, - f_refv->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, - b_refv->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, - edged_width); + ptr1 + (k&1)*8 + (k>>1)*8*edged_width, + ptr2 + (k&1)*8 + (k>>1)*8*edged_width, edged_width); } b_dx /= 2; b_dy /= 2; @@ -547,33 +506,38 @@ break; case MODE_DIRECT: - if (quarterpel) { - for (k=0;k<4;k++) { + if (quarterpel) { + for (k = 0; k < 4; k++) { dx = mb->qmvs[k].x; dy = mb->qmvs[k].y; b_dx = mb->b_qmvs[k].x; b_dy = mb->b_qmvs[k].y; - - interpolate8x8_quarterpel((uint8_t *) f_refv->y, - (uint8_t *) f_ref->y, - (uint8_t *) f_refh->y, - (uint8_t *) f_refh->y + 64, - (uint8_t *) f_refhv->y, - 16*i + (k&1)*8, 16*j + (k>>1)*8, dx, dy, edged_width, 0); - interpolate8x8_quarterpel((uint8_t *) b_refv->y, - (uint8_t *) b_ref->y, - (uint8_t *) b_refh->y, - (uint8_t *) b_refh->y + 64, - (uint8_t *) b_refhv->y, - 16*i + (k&1)*8, 16*j + (k>>1)*8, b_dx, b_dy, edged_width, 0); + + if (dx&3 | dy&3) { + interpolate8x8_quarterpel((uint8_t *) f_refv->y, + (uint8_t *) f_ref->y, + (uint8_t *) f_refv->y + 32, + (uint8_t *) f_refv->y + 64, + (uint8_t *) f_refv->y + 96, + 16*i + (k&1)*8, 16*j + (k>>1)*8, dx, dy, edged_width, 0); + ptr1 = f_refv->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width; + } else ptr1 = f_ref->y + (16*j + (k>>1)*8 + dy/4)*edged_width + 16*i + (k&1)*8 + dx/4; + + if (b_dx&3 | b_dy&3) { + interpolate8x8_quarterpel((uint8_t *) b_refv->y, + (uint8_t *) b_ref->y, + (uint8_t *) f_refv->y + 32, + (uint8_t *) f_refv->y + 64, + (uint8_t *) f_refv->y + 96, + 16*i + (k&1)*8, 16*j + (k>>1)*8, b_dx, b_dy, edged_width, 0); + ptr2 = b_refv->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width; + } else ptr2 = b_ref->y + (16*j + (k>>1)*8 + b_dy/4)*edged_width + 16*i + (k&1)*8 + b_dx/4; transfer_8to16sub2(&dct_codes[k * 64], cur->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, - f_refv->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, - b_refv->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, - edged_width); + ptr1, ptr2, edged_width); } sum = mb->qmvs[0].y/2 + mb->qmvs[1].y/2 + mb->qmvs[2].y/2 + mb->qmvs[3].y/2; dy = (sum >> 3) + roundtab_76[sum & 0xf];