1 |
// 30.10.2002 corrected qpel chroma rounding |
/* |
2 |
// 04.10.2002 added qpel support to MBMotionCompensation |
* 30.10.2002 corrected qpel chroma rounding |
3 |
// 01.05.2002 updated MBMotionCompensationBVOP |
* 04.10.2002 added qpel support to MBMotionCompensation |
4 |
// 14.04.2002 bframe compensation |
* 01.05.2002 updated MBMotionCompensationBVOP |
5 |
|
* 14.04.2002 bframe compensation |
6 |
|
*/ |
7 |
|
|
8 |
#include <stdio.h> |
#include <stdio.h> |
9 |
|
|
14 |
#include "../utils/timer.h" |
#include "../utils/timer.h" |
15 |
#include "motion.h" |
#include "motion.h" |
16 |
|
|
|
#ifndef ABS |
|
|
#define ABS(X) (((X)>0)?(X):-(X)) |
|
|
#endif |
|
|
#ifndef SIGN |
|
|
#define SIGN(X) (((X)>0)?1:-1) |
|
|
#endif |
|
|
|
|
17 |
#ifndef RSHIFT |
#ifndef RSHIFT |
18 |
#define RSHIFT(a,b) ((a) > 0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) |
#define RSHIFT(a,b) ((a) > 0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) |
19 |
#endif |
#endif |
20 |
|
|
21 |
/* assume b>0 */ |
/* assume b>0 */ |
22 |
#ifndef ROUNDED_DIV |
#ifndef RDIV |
23 |
#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) |
#define RDIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) |
24 |
#endif |
#endif |
25 |
|
|
26 |
|
|
29 |
{ |
{ |
30 |
int length = 1 << (fcode+4); |
int length = 1 << (fcode+4); |
31 |
|
|
32 |
// if (quarterpel) value *= 2; |
#if 0 |
33 |
|
if (quarterpel) value *= 2; |
34 |
|
#endif |
35 |
|
|
36 |
if (value < -length) |
if (value < -length) |
37 |
return -length; |
return -length; |
90 |
(uint8_t *) ref, tmp + 32, |
(uint8_t *) ref, tmp + 32, |
91 |
tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); |
tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); |
92 |
ptr = tmp; |
ptr = tmp; |
93 |
} else ptr = ref + (y + dy/4)*stride + x + dx/4; // fullpixel position |
} else ptr = ref + (y + dy/4)*stride + x + dx/4; /* fullpixel position */ |
94 |
|
|
95 |
} else ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
} else ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
96 |
|
|
103 |
transfer_8to16sub(dct_codes+192, cur + y * stride + x + 8*stride+8, |
transfer_8to16sub(dct_codes+192, cur + y * stride + x + 8*stride+8, |
104 |
ptr + 8*stride + 8, stride); |
ptr + 8*stride + 8, stride); |
105 |
|
|
106 |
} else { //reduced_resolution |
} else { /* reduced_resolution */ |
107 |
|
|
108 |
x *= 2; y *= 2; |
x *= 2; y *= 2; |
109 |
|
|
152 |
(uint8_t *) ref, tmp + 32, |
(uint8_t *) ref, tmp + 32, |
153 |
tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); |
tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); |
154 |
ptr = tmp; |
ptr = tmp; |
155 |
} else ptr = ref + (y + dy/4)*stride + x + dx/4; // fullpixel position |
} else ptr = ref + (y + dy/4)*stride + x + dx/4; /* fullpixel position */ |
156 |
} else ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
} else ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
157 |
|
|
158 |
transfer_8to16sub(dct_codes, cur + y * stride + x, ptr, stride); |
transfer_8to16sub(dct_codes, cur + y * stride + x, ptr, stride); |
159 |
|
|
160 |
} else { //reduced_resolution |
} else { /* reduced_resolution */ |
161 |
|
|
162 |
x *= 2; y *= 2; |
x *= 2; y *= 2; |
163 |
|
|
254 |
int32_t dx; |
int32_t dx; |
255 |
int32_t dy; |
int32_t dy; |
256 |
|
|
|
|
|
257 |
uint8_t * const tmp = refv->u; |
uint8_t * const tmp = refv->u; |
258 |
|
|
259 |
if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) ) { /* quick copy for early SKIP */ |
if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) ) { /* quick copy for early SKIP */ |
260 |
/* early SKIP is only activated in P-VOPs, not in S-VOPs, so mcsel can never be 1 */ |
/* early SKIP is only activated in P-VOPs, not in S-VOPs, so mcsel can never be 1 */ |
261 |
|
|
|
/* if (mb->mcsel) { |
|
|
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
|
|
refGMC->y + 16 * (i + j * edged_width), |
|
|
edged_width); |
|
|
transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), |
|
|
refGMC->u + 8 * (i + j * edged_width/2), |
|
|
edged_width / 2); |
|
|
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
|
|
refGMC->v + 8 * (i + j * edged_width/2), |
|
|
edged_width / 2); |
|
|
} else |
|
|
*/ |
|
|
{ |
|
262 |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
263 |
ref->y + 16 * (i + j * edged_width), |
ref->y + 16 * (i + j * edged_width), |
264 |
edged_width); |
edged_width); |
269 |
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
270 |
ref->v + 8 * (i + j * edged_width/2), |
ref->v + 8 * (i + j * edged_width/2), |
271 |
edged_width / 2); |
edged_width / 2); |
|
} |
|
272 |
return; |
return; |
273 |
} |
} |
274 |
|
|
275 |
if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER |
if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER |
276 |
|| mb->mode == MODE_INTER_Q) /*&& !quarterpel*/) { |
|| mb->mode == MODE_INTER_Q)) { |
277 |
|
|
278 |
/* reduced resolution + GMC: not possible */ |
/* reduced resolution + GMC: not possible */ |
279 |
|
|
315 |
refv->y, refhv->y, tmp, 16 * i, 16 * j, dx, dy, |
refv->y, refhv->y, tmp, 16 * i, 16 * j, dx, dy, |
316 |
edged_width, quarterpel, reduced_resolution, rounding); |
edged_width, quarterpel, reduced_resolution, rounding); |
317 |
|
|
318 |
dx /= (int)(1 + quarterpel); |
if (quarterpel) { dx /= 2; dy /= 2; } |
|
dy /= (int)(1 + quarterpel); |
|
319 |
|
|
320 |
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
321 |
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
322 |
|
|
323 |
} else { // mode == MODE_INTER4V |
} else { /* mode == MODE_INTER4V */ |
324 |
int k, sumx = 0, sumy = 0; |
int k, sumx = 0, sumy = 0; |
325 |
const VECTOR * const mvs = (quarterpel ? mb->qmvs : mb->mvs); |
const VECTOR * const mvs = (quarterpel ? mb->qmvs : mb->mvs); |
326 |
|
|
327 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
328 |
dx = mvs[k].x; |
dx = mvs[k].x; |
329 |
dy = mvs[k].y; |
dy = mvs[k].y; |
330 |
sumx += dx / (1 + quarterpel); |
sumx += quarterpel ? dx/2 : dx; |
331 |
sumy += dy / (1 + quarterpel); |
sumy += quarterpel ? dy/2 : dy; |
332 |
|
|
333 |
if (reduced_resolution){ |
if (reduced_resolution){ |
334 |
dx = RRV_MV_SCALEUP(dx); |
dx = RRV_MV_SCALEUP(dx); |
367 |
const uint32_t edged_width = pParam->edged_width; |
const uint32_t edged_width = pParam->edged_width; |
368 |
int32_t dx, dy, b_dx, b_dy, sumx, sumy, b_sumx, b_sumy; |
int32_t dx, dy, b_dx, b_dy, sumx, sumy, b_sumx, b_sumy; |
369 |
int k; |
int k; |
370 |
const int quarterpel = pParam->m_quarterpel; |
const int quarterpel = pParam->vol_flags & XVID_VOL_QUARTERPEL; |
371 |
const uint8_t * ptr1, * ptr2; |
const uint8_t * ptr1, * ptr2; |
372 |
uint8_t * const tmp = f_refv->u; |
uint8_t * const tmp = f_refv->u; |
373 |
const VECTOR * const fmvs = (quarterpel ? mb->qmvs : mb->mvs); |
const VECTOR * const fmvs = (quarterpel ? mb->qmvs : mb->mvs); |
381 |
f_refv->y, f_refhv->y, tmp, 16 * i, 16 * j, dx, |
f_refv->y, f_refhv->y, tmp, 16 * i, 16 * j, dx, |
382 |
dy, edged_width, quarterpel, 0, 0); |
dy, edged_width, quarterpel, 0, 0); |
383 |
|
|
384 |
dx /= 1 + quarterpel; |
if (quarterpel) { dx /= 2; dy /= 2; } |
385 |
dy /= 1 + quarterpel; |
|
386 |
CompensateChroma( (dx >> 1) + roundtab_79[dx & 0x3], |
CompensateChroma( (dx >> 1) + roundtab_79[dx & 0x3], |
387 |
(dy >> 1) + roundtab_79[dy & 0x3], |
(dy >> 1) + roundtab_79[dy & 0x3], |
388 |
i, j, cur, f_ref, tmp, |
i, j, cur, f_ref, tmp, |
397 |
b_refv->y, b_refhv->y, tmp, 16 * i, 16 * j, b_dx, |
b_refv->y, b_refhv->y, tmp, 16 * i, 16 * j, b_dx, |
398 |
b_dy, edged_width, quarterpel, 0, 0); |
b_dy, edged_width, quarterpel, 0, 0); |
399 |
|
|
400 |
b_dx /= 1 + quarterpel; |
if (quarterpel) { b_dx /= 2; b_dy /= 2; } |
401 |
b_dy /= 1 + quarterpel; |
|
402 |
CompensateChroma( (b_dx >> 1) + roundtab_79[b_dx & 0x3], |
CompensateChroma( (b_dx >> 1) + roundtab_79[b_dx & 0x3], |
403 |
(b_dy >> 1) + roundtab_79[b_dy & 0x3], |
(b_dy >> 1) + roundtab_79[b_dy & 0x3], |
404 |
i, j, cur, b_ref, tmp, |
i, j, cur, b_ref, tmp, |
418 |
(uint8_t *) f_ref->y, tmp + 32, |
(uint8_t *) f_ref->y, tmp + 32, |
419 |
tmp + 64, tmp + 96, 16*i, 16*j, dx, dy, edged_width, 0); |
tmp + 64, tmp + 96, 16*i, 16*j, dx, dy, edged_width, 0); |
420 |
ptr1 = tmp; |
ptr1 = tmp; |
421 |
} else ptr1 = f_ref->y + (16*j + dy/4)*edged_width + 16*i + dx/4; // fullpixel position |
} else ptr1 = f_ref->y + (16*j + dy/4)*edged_width + 16*i + dx/4; /* fullpixel position */ |
422 |
|
|
423 |
if ((b_dx&3) | (b_dy&3)) { |
if ((b_dx&3) | (b_dy&3)) { |
424 |
interpolate16x16_quarterpel(tmp - i * 16 - j * 16 * edged_width + 16, |
interpolate16x16_quarterpel(tmp - i * 16 - j * 16 * edged_width + 16, |
425 |
(uint8_t *) b_ref->y, tmp + 32, |
(uint8_t *) b_ref->y, tmp + 32, |
426 |
tmp + 64, tmp + 96, 16*i, 16*j, b_dx, b_dy, edged_width, 0); |
tmp + 64, tmp + 96, 16*i, 16*j, b_dx, b_dy, edged_width, 0); |
427 |
ptr2 = tmp + 16; |
ptr2 = tmp + 16; |
428 |
} else ptr2 = b_ref->y + (16*j + b_dy/4)*edged_width + 16*i + b_dx/4; // fullpixel position |
} else ptr2 = b_ref->y + (16*j + b_dy/4)*edged_width + 16*i + b_dx/4; /* fullpixel position */ |
429 |
|
|
430 |
b_dx /= 2; |
b_dx /= 2; |
431 |
b_dy /= 2; |
b_dy /= 2; |
454 |
|
|
455 |
break; |
break; |
456 |
|
|
457 |
default: // MODE_DIRECT |
default: /* MODE_DIRECT (or MODE_DIRECT_NONE_MV in case of bframes decoding) */ |
458 |
sumx = sumy = b_sumx = b_sumy = 0; |
sumx = sumy = b_sumx = b_sumy = 0; |
459 |
|
|
460 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
504 |
break; |
break; |
505 |
} |
} |
506 |
|
|
507 |
// uv block-based chroma interpolation for direct and interpolate modes |
/* v block-based chroma interpolation for direct and interpolate modes */ |
508 |
transfer_8to16sub2(&dct_codes[4 * 64], |
transfer_8to16sub2(&dct_codes[4 * 64], |
509 |
cur->u + (j * 8) * edged_width / 2 + (i * 8), |
cur->u + (j * 8) * edged_width / 2 + (i * 8), |
510 |
interpolate8x8_switch2(tmp, b_ref->u, 8 * i, 8 * j, |
interpolate8x8_switch2(tmp, b_ref->u, 8 * i, 8 * j, |
524 |
|
|
525 |
|
|
526 |
|
|
527 |
|
void generate_GMCparameters( const int num_wp, const int res, |
528 |
|
const WARPPOINTS *const warp, |
529 |
|
const int width, const int height, |
530 |
|
GMC_DATA *const gmc) |
531 |
|
{ |
532 |
|
const int du0 = warp->duv[0].x; |
533 |
|
const int dv0 = warp->duv[0].y; |
534 |
|
const int du1 = warp->duv[1].x; |
535 |
|
const int dv1 = warp->duv[1].y; |
536 |
|
const int du2 = warp->duv[2].x; |
537 |
|
const int dv2 = warp->duv[2].y; |
538 |
|
|
539 |
|
gmc->W = width; |
540 |
|
gmc->H = height; |
541 |
|
|
542 |
|
gmc->rho = 4 - log2bin(res-1); /* = {3,2,1,0} for res={2,4,8,16} */ |
543 |
|
|
544 |
|
gmc->alpha = log2bin(gmc->W-1); |
545 |
|
gmc->Ws = (1 << gmc->alpha); |
546 |
|
|
547 |
|
gmc->dxF = 16*gmc->Ws + RDIV( 8*gmc->Ws*du1, gmc->W ); |
548 |
|
gmc->dxG = RDIV( 8*gmc->Ws*dv1, gmc->W ); |
549 |
|
gmc->Fo = (res*du0 + 1) << (gmc->alpha+gmc->rho-1); |
550 |
|
gmc->Go = (res*dv0 + 1) << (gmc->alpha+gmc->rho-1); |
551 |
|
|
552 |
|
if (num_wp==2) { |
553 |
|
gmc->dyF = -gmc->dxG; |
554 |
|
gmc->dyG = gmc->dxF; |
555 |
|
} else if (num_wp==3) { |
556 |
|
gmc->beta = log2bin(gmc->H-1); |
557 |
|
gmc->Hs = (1 << gmc->beta); |
558 |
|
gmc->dyF = RDIV( 8*gmc->Hs*du2, gmc->H ); |
559 |
|
gmc->dyG = 16*gmc->Hs + RDIV( 8*gmc->Hs*dv2, gmc->H ); |
560 |
|
if (gmc->beta > gmc->alpha) { |
561 |
|
gmc->dxF <<= (gmc->beta - gmc->alpha); |
562 |
|
gmc->dxG <<= (gmc->beta - gmc->alpha); |
563 |
|
gmc->alpha = gmc->beta; |
564 |
|
gmc->Ws = 1<< gmc->beta; |
565 |
|
} else { |
566 |
|
gmc->dyF <<= gmc->alpha - gmc->beta; |
567 |
|
gmc->dyG <<= gmc->alpha - gmc->beta; |
568 |
|
} |
569 |
|
} |
570 |
|
|
571 |
|
gmc->cFo = gmc->dxF + gmc->dyF + (1 << (gmc->alpha+gmc->rho+1)); |
572 |
|
gmc->cFo += 16*gmc->Ws*(du0-1); |
573 |
|
|
574 |
|
gmc->cGo = gmc->dxG + gmc->dyG + (1 << (gmc->alpha+gmc->rho+1)); |
575 |
|
gmc->cGo += 16*gmc->Ws*(dv0-1); |
576 |
|
} |
577 |
|
|
578 |
void |
void |
579 |
generate_GMCparameters( const int num_wp, // [input]: number of warppoints |
generate_GMCimage( const GMC_DATA *const gmc_data, /* [input] precalculated data */ |
580 |
const int res, // [input]: resolution |
const IMAGE *const pRef, /* [input] */ |
581 |
const WARPPOINTS *const warp, // [input]: warp points |
const int mb_width, |
582 |
|
const int mb_height, |
583 |
|
const int stride, |
584 |
|
const int stride2, |
585 |
|
const int fcode, /* [input] some parameters... */ |
586 |
|
const int32_t quarterpel, /* [input] for rounding avgMV */ |
587 |
|
const int reduced_resolution, /* [input] ignored */ |
588 |
|
const int32_t rounding, /* [input] for rounding image data */ |
589 |
|
MACROBLOCK *const pMBs, /* [output] average motion vectors */ |
590 |
|
IMAGE *const pGMC) /* [output] full warped image */ |
591 |
|
{ |
592 |
|
|
593 |
|
unsigned int mj,mi; |
594 |
|
VECTOR avgMV; |
595 |
|
|
596 |
|
for (mj = 0; mj < (unsigned int)mb_height; mj++) |
597 |
|
for (mi = 0; mi < (unsigned int)mb_width; mi++) { |
598 |
|
|
599 |
|
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
600 |
|
stride, stride2, quarterpel, rounding, pGMC); |
601 |
|
|
602 |
|
pMBs[mj*mb_width+mi].amv.x = gmc_sanitize(avgMV.x, quarterpel, fcode); |
603 |
|
pMBs[mj*mb_width+mi].amv.y = gmc_sanitize(avgMV.y, quarterpel, fcode); |
604 |
|
pMBs[mj*mb_width+mi].mcsel = 0; /* until mode decision */ |
605 |
|
} |
606 |
|
} |
607 |
|
|
608 |
|
|
609 |
|
|
610 |
|
#define MLT(i) (((16-(i))<<16) + (i)) |
611 |
|
static const uint32_t MTab[16] = { |
612 |
|
MLT( 0), MLT( 1), MLT( 2), MLT( 3), MLT( 4), MLT( 5), MLT( 6), MLT(7), |
613 |
|
MLT( 8), MLT( 9), MLT(10), MLT(11), MLT(12), MLT(13), MLT(14), MLT(15) |
614 |
|
}; |
615 |
|
#undef MLT |
616 |
|
|
617 |
|
VECTOR generate_GMCimageMB( const GMC_DATA *const gmc_data, |
618 |
|
const IMAGE *const pRef, |
619 |
|
const int mi, const int mj, |
620 |
|
const int stride, |
621 |
|
const int stride2, |
622 |
|
const int quarterpel, |
623 |
|
const int rounding, |
624 |
|
IMAGE *const pGMC) |
625 |
|
{ |
626 |
|
const int W = gmc_data->W; |
627 |
|
const int H = gmc_data->H; |
628 |
|
|
629 |
|
const int rho = gmc_data->rho; |
630 |
|
const int alpha = gmc_data->alpha; |
631 |
|
|
632 |
|
const int rounder = ( 128 - (rounding<<(rho+rho)) ) << 16; |
633 |
|
|
634 |
|
const int dxF = gmc_data->dxF; |
635 |
|
const int dyF = gmc_data->dyF; |
636 |
|
const int dxG = gmc_data->dxG; |
637 |
|
const int dyG = gmc_data->dyG; |
638 |
|
|
639 |
|
uint8_t *dstY, *dstU, *dstV; |
640 |
|
|
641 |
|
int I,J; |
642 |
|
VECTOR avgMV = {0,0}; |
643 |
|
|
644 |
|
int32_t Fj, Gj; |
645 |
|
|
646 |
|
dstY = &pGMC->y[(mj*16)*stride+mi*16] + 16; |
647 |
|
|
648 |
|
Fj = gmc_data->Fo + dyF*mj*16 + dxF*mi*16; |
649 |
|
Gj = gmc_data->Go + dyG*mj*16 + dxG*mi*16; |
650 |
|
|
651 |
|
for (J = 16; J > 0; --J) { |
652 |
|
int32_t Fi, Gi; |
653 |
|
|
654 |
|
Fi = Fj; Fj += dyF; |
655 |
|
Gi = Gj; Gj += dyG; |
656 |
|
for (I = -16; I < 0; ++I) { |
657 |
|
int32_t F, G; |
658 |
|
uint32_t ri, rj; |
659 |
|
|
660 |
|
F = ( Fi >> (alpha+rho) ) << rho; Fi += dxF; |
661 |
|
G = ( Gi >> (alpha+rho) ) << rho; Gi += dxG; |
662 |
|
|
663 |
|
avgMV.x += F; |
664 |
|
avgMV.y += G; |
665 |
|
|
666 |
|
ri = MTab[F&15]; |
667 |
|
rj = MTab[G&15]; |
668 |
|
|
669 |
|
F >>= 4; |
670 |
|
G >>= 4; |
671 |
|
|
672 |
|
if (F < -1) F = -1; |
673 |
|
else if (F > W) F = W; |
674 |
|
if (G< -1) G=-1; |
675 |
|
else if (G>H) G=H; |
676 |
|
|
677 |
|
{ /* MMX-like bilinear... */ |
678 |
|
const int offset = G*stride + F; |
679 |
|
uint32_t f0, f1; |
680 |
|
f0 = pRef->y[ offset +0 ]; |
681 |
|
f0 |= pRef->y[ offset +1 ] << 16; |
682 |
|
f1 = pRef->y[ offset+stride +0 ]; |
683 |
|
f1 |= pRef->y[ offset+stride +1 ] << 16; |
684 |
|
f0 = (ri*f0)>>16; |
685 |
|
f1 = (ri*f1) & 0x0fff0000; |
686 |
|
f0 |= f1; |
687 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
688 |
|
|
689 |
|
dstY[I] = (uint8_t)f0; |
690 |
|
} |
691 |
|
} |
692 |
|
|
693 |
|
dstY += stride; |
694 |
|
} |
695 |
|
|
696 |
|
dstU = &pGMC->u[(mj*8)*stride2+mi*8] + 8; |
697 |
|
dstV = &pGMC->v[(mj*8)*stride2+mi*8] + 8; |
698 |
|
|
699 |
|
Fj = gmc_data->cFo + dyF*4 *mj*8 + dxF*4 *mi*8; |
700 |
|
Gj = gmc_data->cGo + dyG*4 *mj*8 + dxG*4 *mi*8; |
701 |
|
|
702 |
|
for (J = 8; J > 0; --J) { |
703 |
|
int32_t Fi, Gi; |
704 |
|
Fi = Fj; Fj += 4*dyF; |
705 |
|
Gi = Gj; Gj += 4*dyG; |
706 |
|
|
707 |
|
for (I = -8; I < 0; ++I) { |
708 |
|
int32_t F, G; |
709 |
|
uint32_t ri, rj; |
710 |
|
|
711 |
|
F = ( Fi >> (alpha+rho+2) ) << rho; Fi += 4*dxF; |
712 |
|
G = ( Gi >> (alpha+rho+2) ) << rho; Gi += 4*dxG; |
713 |
|
|
714 |
|
ri = MTab[F&15]; |
715 |
|
rj = MTab[G&15]; |
716 |
|
|
717 |
|
F >>= 4; |
718 |
|
G >>= 4; |
719 |
|
|
720 |
|
if (F < -1) F=-1; |
721 |
|
else if (F >= W/2) F = W/2; |
722 |
|
if (G < -1) G = -1; |
723 |
|
else if (G >= H/2) G = H/2; |
724 |
|
|
725 |
|
{ |
726 |
|
const int offset = G*stride2 + F; |
727 |
|
uint32_t f0, f1; |
728 |
|
|
729 |
|
f0 = pRef->u[ offset +0 ]; |
730 |
|
f0 |= pRef->u[ offset +1 ] << 16; |
731 |
|
f1 = pRef->u[ offset+stride2 +0 ]; |
732 |
|
f1 |= pRef->u[ offset+stride2 +1 ] << 16; |
733 |
|
f0 = (ri*f0)>>16; |
734 |
|
f1 = (ri*f1) & 0x0fff0000; |
735 |
|
f0 |= f1; |
736 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
737 |
|
|
738 |
|
dstU[I] = (uint8_t)f0; |
739 |
|
|
740 |
|
|
741 |
|
f0 = pRef->v[ offset +0 ]; |
742 |
|
f0 |= pRef->v[ offset +1 ] << 16; |
743 |
|
f1 = pRef->v[ offset+stride2 +0 ]; |
744 |
|
f1 |= pRef->v[ offset+stride2 +1 ] << 16; |
745 |
|
f0 = (ri*f0)>>16; |
746 |
|
f1 = (ri*f1) & 0x0fff0000; |
747 |
|
f0 |= f1; |
748 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
749 |
|
|
750 |
|
dstV[I] = (uint8_t)f0; |
751 |
|
} |
752 |
|
} |
753 |
|
dstU += stride2; |
754 |
|
dstV += stride2; |
755 |
|
} |
756 |
|
|
757 |
|
|
758 |
|
avgMV.x -= 16*((256*mi+120)<<4); /* 120 = 15*16/2 */ |
759 |
|
avgMV.y -= 16*((256*mj+120)<<4); |
760 |
|
|
761 |
|
avgMV.x = RSHIFT( avgMV.x, (4+7-quarterpel) ); |
762 |
|
avgMV.y = RSHIFT( avgMV.y, (4+7-quarterpel) ); |
763 |
|
|
764 |
|
return avgMV; |
765 |
|
} |
766 |
|
|
767 |
|
|
768 |
|
|
769 |
|
#ifdef OLD_GRUEL_GMC |
770 |
|
void |
771 |
|
generate_GMCparameters( const int num_wp, /* [input]: number of warppoints */ |
772 |
|
const int res, /* [input]: resolution */ |
773 |
|
const WARPPOINTS *const warp, /* [input]: warp points */ |
774 |
const int width, const int height, |
const int width, const int height, |
775 |
GMC_DATA *const gmc) // [output] precalculated parameters |
GMC_DATA *const gmc) /* [output] precalculated parameters */ |
776 |
{ |
{ |
777 |
|
|
778 |
/* We follow mainly two sources: The original standard, which is ugly, and the |
/* We follow mainly two sources: The original standard, which is ugly, and the |
832 |
int dv0 = warp->duv[0].y; |
int dv0 = warp->duv[0].y; |
833 |
int du1 = warp->duv[1].x; |
int du1 = warp->duv[1].x; |
834 |
int dv1 = warp->duv[1].y; |
int dv1 = warp->duv[1].y; |
835 |
// int du2 = warp->duv[2].x; |
#if 0 |
836 |
// int dv2 = warp->duv[2].y; |
int du2 = warp->duv[2].x; |
837 |
|
int dv2 = warp->duv[2].y; |
838 |
|
#endif |
839 |
|
|
840 |
gmc->num_wp = num_wp; |
gmc->num_wp = num_wp; |
841 |
|
|
850 |
gmc->alpha = log2bin(gmc->W-1); |
gmc->alpha = log2bin(gmc->W-1); |
851 |
gmc->Ws= 1<<gmc->alpha; |
gmc->Ws= 1<<gmc->alpha; |
852 |
|
|
853 |
// gmc->beta = log2bin(gmc->H-1); |
#if 0 |
854 |
// gmc->Hs= 1<<gmc->beta; |
gmc->beta = log2bin(gmc->H-1); |
855 |
|
gmc->Hs= 1<<gmc->beta; |
856 |
|
#endif |
857 |
|
|
858 |
// printf("du0=%d dv0=%d du1=%d dv1=%d s=%d sigma=%d W=%d alpha=%d, Ws=%d, rho=%d\n",du0,dv0,du1,dv1,gmc->s,gmc->sigma,gmc->W,gmc->alpha,gmc->Ws,gmc->rho); |
#if 0 |
859 |
|
printf("du0=%d dv0=%d du1=%d dv1=%d s=%d sigma=%d W=%d alpha=%d, Ws=%d, rho=%d\n",du0,dv0,du1,dv1,gmc->s,gmc->sigma,gmc->W,gmc->alpha,gmc->Ws,gmc->rho); |
860 |
|
#endif |
861 |
|
|
862 |
/* i2s is only needed for num_wp >= 3, etc. */ |
/* |
863 |
/* the 's' values are in 1/s pel resolution */ |
* i2s is only needed for num_wp >= 3, etc. |
864 |
|
* the 's' values are in 1/s pel resolution |
865 |
|
*/ |
866 |
gmc->i0s = res/2 * ( du0 ); |
gmc->i0s = res/2 * ( du0 ); |
867 |
gmc->j0s = res/2 * ( dv0 ); |
gmc->j0s = res/2 * ( dv0 ); |
868 |
gmc->i1s = res/2 * (2*width + du1 + du0 ); |
gmc->i1s = res/2 * (2*width + du1 + du0 ); |
869 |
gmc->j1s = res/2 * ( dv1 + dv0 ); |
gmc->j1s = res/2 * ( dv1 + dv0 ); |
870 |
// gmc->i2s = res/2 * ( du2 + du0 ); |
#if 0 |
871 |
// gmc->j2s = res/2 * (2*height + dv2 + dv0 ); |
gmc->i2s = res/2 * ( du2 + du0 ); |
872 |
|
gmc->j2s = res/2 * (2*height + dv2 + dv0 ); |
873 |
|
#endif |
874 |
|
|
875 |
/* i2s and i2ss are only needed for num_wp == 3, etc. */ |
/* i2s and i2ss are only needed for num_wp == 3, etc. */ |
876 |
|
|
878 |
gmc->i1ss = 16*gmc->Ws + ROUNDED_DIV(((gmc->W-gmc->Ws)*(gmc->r*gmc->i0s) + gmc->Ws*(gmc->r*gmc->i1s - 16*gmc->W)),gmc->W); |
gmc->i1ss = 16*gmc->Ws + ROUNDED_DIV(((gmc->W-gmc->Ws)*(gmc->r*gmc->i0s) + gmc->Ws*(gmc->r*gmc->i1s - 16*gmc->W)),gmc->W); |
879 |
gmc->j1ss = ROUNDED_DIV( ((gmc->W - gmc->Ws)*(gmc->r*gmc->j0s) + gmc->Ws*gmc->r*gmc->j1s) ,gmc->W ); |
gmc->j1ss = ROUNDED_DIV( ((gmc->W - gmc->Ws)*(gmc->r*gmc->j0s) + gmc->Ws*gmc->r*gmc->j1s) ,gmc->W ); |
880 |
|
|
881 |
// gmc->i2ss = ROUNDED_DIV( ((gmc->H - gmc->Hs)*(gmc->r*gmc->i0s) + gmc->Hs*(gmc->r*gmc->i2s)), gmc->H); |
#if 0 |
882 |
// gmc->j2ss = 16*gmc->Hs + ROUNDED_DIV( ((gmc->H-gmc->Hs)*(gmc->r*gmc->j0s) + gmc->Ws*(gmc->r*gmc->j2s - 16*gmc->H)), gmc->H); |
gmc->i2ss = ROUNDED_DIV( ((gmc->H - gmc->Hs)*(gmc->r*gmc->i0s) + gmc->Hs*(gmc->r*gmc->i2s)), gmc->H); |
883 |
|
gmc->j2ss = 16*gmc->Hs + ROUNDED_DIV( ((gmc->H-gmc->Hs)*(gmc->r*gmc->j0s) + gmc->Ws*(gmc->r*gmc->j2s - 16*gmc->H)), gmc->H); |
884 |
|
#endif |
885 |
|
|
886 |
return; |
return; |
887 |
} |
} |
888 |
|
|
|
|
|
|
|
|
889 |
void |
void |
890 |
generate_GMCimage( const GMC_DATA *const gmc_data, // [input] precalculated data |
generate_GMCimage( const GMC_DATA *const gmc_data, /* [input] precalculated data */ |
891 |
const IMAGE *const pRef, // [input] |
const IMAGE *const pRef, /* [input] */ |
892 |
const int mb_width, |
const int mb_width, |
893 |
const int mb_height, |
const int mb_height, |
894 |
const int stride, |
const int stride, |
895 |
const int stride2, |
const int stride2, |
896 |
const int fcode, // [input] some parameters... |
const int fcode, /* [input] some parameters... */ |
897 |
const int32_t quarterpel, // [input] for rounding avgMV |
const int32_t quarterpel, /* [input] for rounding avgMV */ |
898 |
const int reduced_resolution, // [input] ignored |
const int reduced_resolution, /* [input] ignored */ |
899 |
const int32_t rounding, // [input] for rounding image data |
const int32_t rounding, /* [input] for rounding image data */ |
900 |
MACROBLOCK *const pMBs, // [output] average motion vectors |
MACROBLOCK *const pMBs, /* [output] average motion vectors */ |
901 |
IMAGE *const pGMC) // [output] full warped image |
IMAGE *const pGMC) /* [output] full warped image */ |
902 |
{ |
{ |
903 |
|
|
904 |
unsigned int mj,mi; |
unsigned int mj,mi; |
905 |
VECTOR avgMV; |
VECTOR avgMV; |
906 |
|
|
907 |
for (mj=0;mj<mb_height;mj++) |
for (mj=0;mj<mb_height;mj++) |
908 |
for (mi=0;mi<mb_width; mi++) |
for (mi = 0;mi < mb_width; mi++) { |
909 |
{ |
|
910 |
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
911 |
stride, stride2, quarterpel, rounding, pGMC); |
stride, stride2, quarterpel, rounding, pGMC); |
912 |
|
|
960 |
|
|
961 |
const int i1ss = gmc_data->i1ss; |
const int i1ss = gmc_data->i1ss; |
962 |
const int j1ss = gmc_data->j1ss; |
const int j1ss = gmc_data->j1ss; |
963 |
// const int i2ss = gmc_data->i2ss; |
#if 0 |
964 |
// const int j2ss = gmc_data->j2ss; |
const int i2ss = gmc_data->i2ss; |
965 |
|
const int j2ss = gmc_data->j2ss; |
966 |
|
#endif |
967 |
|
|
968 |
const int alpha = gmc_data->alpha; |
const int alpha = gmc_data->alpha; |
969 |
const int Ws = gmc_data->Ws; |
const int Ws = gmc_data->Ws; |
970 |
|
|
971 |
// const int beta = gmc_data->beta; |
#if 0 |
972 |
// const int Hs = gmc_data->Hs; |
const int beta = gmc_data->beta; |
973 |
|
const int Hs = gmc_data->Hs; |
974 |
|
#endif |
975 |
|
|
976 |
int I,J; |
int I,J; |
977 |
VECTOR avgMV = {0,0}; |
VECTOR avgMV = {0,0}; |
985 |
/* this naive implementation (with lots of multiplications) isn't slower (rather faster) than |
/* this naive implementation (with lots of multiplications) isn't slower (rather faster) than |
986 |
working incremental. Don't ask me why... maybe the whole this is memory bound? */ |
working incremental. Don't ask me why... maybe the whole this is memory bound? */ |
987 |
|
|
988 |
const int ri= F & (s-1); // fractional part of pelwise MV X |
const int ri= F & (s-1); /* fractional part of pelwise MV X */ |
989 |
const int rj= G & (s-1); // fractional part of pelwise MV Y |
const int rj= G & (s-1); /* fractional part of pelwise MV Y */ |
990 |
|
|
991 |
int Y00,Y01,Y10,Y11; |
int Y00,Y01,Y10,Y11; |
992 |
|
|
1009 |
else if (G>H) |
else if (G>H) |
1010 |
G=H; /* dito */ |
G=H; /* dito */ |
1011 |
|
|
1012 |
Y00 = pRef->y[ G*stride + F ]; // Lumi values |
Y00 = pRef->y[ G*stride + F ]; /* Lumi values */ |
1013 |
Y01 = pRef->y[ G*stride + F+1 ]; |
Y01 = pRef->y[ G*stride + F+1 ]; |
1014 |
Y10 = pRef->y[ G*stride + F+stride ]; |
Y10 = pRef->y[ G*stride + F+stride ]; |
1015 |
Y11 = pRef->y[ G*stride + F+stride+1 ]; |
Y11 = pRef->y[ G*stride + F+stride+1 ]; |
1036 |
int Gc=((-r*j0s+j1ss)*(4*I+1) +(-r*i0s+i1ss)*(4*J+1) +2*Ws*r*j0s |
int Gc=((-r*j0s+j1ss)*(4*I+1) +(-r*i0s+i1ss)*(4*J+1) +2*Ws*r*j0s |
1037 |
-16*Ws +(1<<(alpha+rho+1))) >>(alpha+rho+2); |
-16*Ws +(1<<(alpha+rho+1))) >>(alpha+rho+2); |
1038 |
|
|
1039 |
const int ri= Fc & (s-1); // fractional part of pelwise MV X |
const int ri= Fc & (s-1); /* fractional part of pelwise MV X */ |
1040 |
const int rj= Gc & (s-1); // fractional part of pelwise MV Y |
const int rj= Gc & (s-1); /* fractional part of pelwise MV Y */ |
1041 |
|
|
1042 |
int C00,C01,C10,C11; |
int C00,C01,C10,C11; |
1043 |
|
|
1054 |
Gc=H/2; /* dito */ |
Gc=H/2; /* dito */ |
1055 |
|
|
1056 |
/* now calculate U data */ |
/* now calculate U data */ |
1057 |
C00 = pRef->u[ Gc*stride2 + Fc ]; // chroma-value Cb |
C00 = pRef->u[ Gc*stride2 + Fc ]; /* chroma-value Cb */ |
1058 |
C01 = pRef->u[ Gc*stride2 + Fc+1 ]; |
C01 = pRef->u[ Gc*stride2 + Fc+1 ]; |
1059 |
C10 = pRef->u[ (Gc+1)*stride2 + Fc ]; |
C10 = pRef->u[ (Gc+1)*stride2 + Fc ]; |
1060 |
C11 = pRef->u[ (Gc+1)*stride2 + Fc+1 ]; |
C11 = pRef->u[ (Gc+1)*stride2 + Fc+1 ]; |
1067 |
pGMC->u[J*stride2+I] = (uint8_t)C00; /* output 1 U-pixel */ |
pGMC->u[J*stride2+I] = (uint8_t)C00; /* output 1 U-pixel */ |
1068 |
|
|
1069 |
/* now calculate V data */ |
/* now calculate V data */ |
1070 |
C00 = pRef->v[ Gc*stride2 + Fc ]; // chroma-value Cr |
C00 = pRef->v[ Gc*stride2 + Fc ]; /* chroma-value Cr */ |
1071 |
C01 = pRef->v[ Gc*stride2 + Fc+1 ]; |
C01 = pRef->v[ Gc*stride2 + Fc+1 ]; |
1072 |
C10 = pRef->v[ (Gc+1)*stride2 + Fc ]; |
C10 = pRef->v[ (Gc+1)*stride2 + Fc ]; |
1073 |
C11 = pRef->v[ (Gc+1)*stride2 + Fc+1 ]; |
C11 = pRef->v[ (Gc+1)*stride2 + Fc+1 ]; |
1090 |
return avgMV; /* clipping to fcode area is done outside! */ |
return avgMV; /* clipping to fcode area is done outside! */ |
1091 |
} |
} |
1092 |
|
|
1093 |
|
#endif |