12 |
#include "../utils/timer.h" |
#include "../utils/timer.h" |
13 |
#include "motion.h" |
#include "motion.h" |
14 |
|
|
|
#ifndef ABS |
|
|
#define ABS(X) (((X)>0)?(X):-(X)) |
|
|
#endif |
|
|
#ifndef SIGN |
|
|
#define SIGN(X) (((X)>0)?1:-1) |
|
|
#endif |
|
|
|
|
15 |
#ifndef RSHIFT |
#ifndef RSHIFT |
16 |
#define RSHIFT(a,b) ((a) > 0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) |
#define RSHIFT(a,b) ((a) > 0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) |
17 |
#endif |
#endif |
18 |
|
|
19 |
/* assume b>0 */ |
/* assume b>0 */ |
20 |
#ifndef ROUNDED_DIV |
#ifndef RDIV |
21 |
#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) |
#define RDIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) |
22 |
#endif |
#endif |
23 |
|
|
24 |
|
|
27 |
{ |
{ |
28 |
int length = 1 << (fcode+4); |
int length = 1 << (fcode+4); |
29 |
|
|
30 |
if (quarterpel) value *= 2; |
// if (quarterpel) value *= 2; |
31 |
|
|
32 |
if (value < -length) |
if (value < -length) |
33 |
return -length; |
return -length; |
250 |
int32_t dx; |
int32_t dx; |
251 |
int32_t dy; |
int32_t dy; |
252 |
|
|
|
|
|
253 |
uint8_t * const tmp = refv->u; |
uint8_t * const tmp = refv->u; |
254 |
|
|
255 |
if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) ) { /* quick copy for early SKIP */ |
if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) ) { /* quick copy for early SKIP */ |
256 |
/* early SKIP is only activated in P-VOPs, not in S-VOPs, so mcsel can never be 1 */ |
/* early SKIP is only activated in P-VOPs, not in S-VOPs, so mcsel can never be 1 */ |
257 |
|
|
|
/* if (mb->mcsel) { |
|
|
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
|
|
refGMC->y + 16 * (i + j * edged_width), |
|
|
edged_width); |
|
|
transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), |
|
|
refGMC->u + 8 * (i + j * edged_width/2), |
|
|
edged_width / 2); |
|
|
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
|
|
refGMC->v + 8 * (i + j * edged_width/2), |
|
|
edged_width / 2); |
|
|
} else |
|
|
*/ |
|
|
{ |
|
258 |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
259 |
ref->y + 16 * (i + j * edged_width), |
ref->y + 16 * (i + j * edged_width), |
260 |
edged_width); |
edged_width); |
265 |
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
266 |
ref->v + 8 * (i + j * edged_width/2), |
ref->v + 8 * (i + j * edged_width/2), |
267 |
edged_width / 2); |
edged_width / 2); |
|
} |
|
268 |
return; |
return; |
269 |
} |
} |
270 |
|
|
271 |
if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER |
if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER |
272 |
|| mb->mode == MODE_INTER_Q) /*&& !quarterpel*/) { |
|| mb->mode == MODE_INTER_Q)) { |
273 |
|
|
274 |
/* reduced resolution + GMC: not possible */ |
/* reduced resolution + GMC: not possible */ |
275 |
|
|
311 |
refv->y, refhv->y, tmp, 16 * i, 16 * j, dx, dy, |
refv->y, refhv->y, tmp, 16 * i, 16 * j, dx, dy, |
312 |
edged_width, quarterpel, reduced_resolution, rounding); |
edged_width, quarterpel, reduced_resolution, rounding); |
313 |
|
|
314 |
dx /= (int)(1 + quarterpel); |
if (quarterpel) { dx /= 2; dy /= 2; } |
|
dy /= (int)(1 + quarterpel); |
|
315 |
|
|
316 |
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
317 |
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
323 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
324 |
dx = mvs[k].x; |
dx = mvs[k].x; |
325 |
dy = mvs[k].y; |
dy = mvs[k].y; |
326 |
sumx += dx / (1 + quarterpel); |
sumx += quarterpel ? dx/2 : dx; |
327 |
sumy += dy / (1 + quarterpel); |
sumy += quarterpel ? dy/2 : dy; |
328 |
|
|
329 |
if (reduced_resolution){ |
if (reduced_resolution){ |
330 |
dx = RRV_MV_SCALEUP(dx); |
dx = RRV_MV_SCALEUP(dx); |
363 |
const uint32_t edged_width = pParam->edged_width; |
const uint32_t edged_width = pParam->edged_width; |
364 |
int32_t dx, dy, b_dx, b_dy, sumx, sumy, b_sumx, b_sumy; |
int32_t dx, dy, b_dx, b_dy, sumx, sumy, b_sumx, b_sumy; |
365 |
int k; |
int k; |
366 |
const int quarterpel = pParam->m_quarterpel; |
const int quarterpel = pParam->vol_flags & XVID_VOL_QUARTERPEL; |
367 |
const uint8_t * ptr1, * ptr2; |
const uint8_t * ptr1, * ptr2; |
368 |
uint8_t * const tmp = f_refv->u; |
uint8_t * const tmp = f_refv->u; |
369 |
const VECTOR * const fmvs = (quarterpel ? mb->qmvs : mb->mvs); |
const VECTOR * const fmvs = (quarterpel ? mb->qmvs : mb->mvs); |
377 |
f_refv->y, f_refhv->y, tmp, 16 * i, 16 * j, dx, |
f_refv->y, f_refhv->y, tmp, 16 * i, 16 * j, dx, |
378 |
dy, edged_width, quarterpel, 0, 0); |
dy, edged_width, quarterpel, 0, 0); |
379 |
|
|
380 |
dx /= 1 + quarterpel; |
if (quarterpel) { dx /= 2; dy /= 2; } |
381 |
dy /= 1 + quarterpel; |
|
382 |
CompensateChroma( (dx >> 1) + roundtab_79[dx & 0x3], |
CompensateChroma( (dx >> 1) + roundtab_79[dx & 0x3], |
383 |
(dy >> 1) + roundtab_79[dy & 0x3], |
(dy >> 1) + roundtab_79[dy & 0x3], |
384 |
i, j, cur, f_ref, tmp, |
i, j, cur, f_ref, tmp, |
393 |
b_refv->y, b_refhv->y, tmp, 16 * i, 16 * j, b_dx, |
b_refv->y, b_refhv->y, tmp, 16 * i, 16 * j, b_dx, |
394 |
b_dy, edged_width, quarterpel, 0, 0); |
b_dy, edged_width, quarterpel, 0, 0); |
395 |
|
|
396 |
b_dx /= 1 + quarterpel; |
if (quarterpel) { b_dx /= 2; b_dy /= 2; } |
397 |
b_dy /= 1 + quarterpel; |
|
398 |
CompensateChroma( (b_dx >> 1) + roundtab_79[b_dx & 0x3], |
CompensateChroma( (b_dx >> 1) + roundtab_79[b_dx & 0x3], |
399 |
(b_dy >> 1) + roundtab_79[b_dy & 0x3], |
(b_dy >> 1) + roundtab_79[b_dy & 0x3], |
400 |
i, j, cur, b_ref, tmp, |
i, j, cur, b_ref, tmp, |
450 |
|
|
451 |
break; |
break; |
452 |
|
|
453 |
default: // MODE_DIRECT |
default: // MODE_DIRECT (or MODE_DIRECT_NONE_MV in case of bframes decoding) |
454 |
sumx = sumy = b_sumx = b_sumy = 0; |
sumx = sumy = b_sumx = b_sumy = 0; |
455 |
|
|
456 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
520 |
|
|
521 |
|
|
522 |
|
|
523 |
|
void generate_GMCparameters( const int num_wp, const int res, |
524 |
|
const WARPPOINTS *const warp, |
525 |
|
const int width, const int height, |
526 |
|
GMC_DATA *const gmc) |
527 |
|
{ |
528 |
|
const int du0 = warp->duv[0].x; |
529 |
|
const int dv0 = warp->duv[0].y; |
530 |
|
const int du1 = warp->duv[1].x; |
531 |
|
const int dv1 = warp->duv[1].y; |
532 |
|
const int du2 = warp->duv[2].x; |
533 |
|
const int dv2 = warp->duv[2].y; |
534 |
|
|
535 |
|
gmc->W = width; |
536 |
|
gmc->H = height; |
537 |
|
|
538 |
|
gmc->rho = 4 - log2bin(res-1); // = {3,2,1,0} for res={2,4,8,16} |
539 |
|
|
540 |
|
gmc->alpha = log2bin(gmc->W-1); |
541 |
|
gmc->Ws = (1 << gmc->alpha); |
542 |
|
|
543 |
|
gmc->dxF = 16*gmc->Ws + RDIV( 8*gmc->Ws*du1, gmc->W ); |
544 |
|
gmc->dxG = RDIV( 8*gmc->Ws*dv1, gmc->W ); |
545 |
|
gmc->Fo = (res*du0 + 1) << (gmc->alpha+gmc->rho-1); |
546 |
|
gmc->Go = (res*dv0 + 1) << (gmc->alpha+gmc->rho-1); |
547 |
|
|
548 |
|
if (num_wp==2) { |
549 |
|
gmc->dyF = -gmc->dxG; |
550 |
|
gmc->dyG = gmc->dxF; |
551 |
|
} else if (num_wp==3) { |
552 |
|
gmc->beta = log2bin(gmc->H-1); |
553 |
|
gmc->Hs = (1 << gmc->beta); |
554 |
|
gmc->dyF = RDIV( 8*gmc->Hs*du2, gmc->H ); |
555 |
|
gmc->dyG = 16*gmc->Hs + RDIV( 8*gmc->Hs*dv2, gmc->H ); |
556 |
|
if (gmc->beta > gmc->alpha) { |
557 |
|
gmc->dxF <<= (gmc->beta - gmc->alpha); |
558 |
|
gmc->dxG <<= (gmc->beta - gmc->alpha); |
559 |
|
gmc->alpha = gmc->beta; |
560 |
|
gmc->Ws = 1<< gmc->beta; |
561 |
|
} else { |
562 |
|
gmc->dyF <<= gmc->alpha - gmc->beta; |
563 |
|
gmc->dyG <<= gmc->alpha - gmc->beta; |
564 |
|
} |
565 |
|
} |
566 |
|
|
567 |
|
gmc->cFo = gmc->dxF + gmc->dyF + (1 << (gmc->alpha+gmc->rho+1)); |
568 |
|
gmc->cFo += 16*gmc->Ws*(du0-1); |
569 |
|
|
570 |
|
gmc->cGo = gmc->dxG + gmc->dyG + (1 << (gmc->alpha+gmc->rho+1)); |
571 |
|
gmc->cGo += 16*gmc->Ws*(dv0-1); |
572 |
|
} |
573 |
|
|
574 |
|
void |
575 |
|
generate_GMCimage( const GMC_DATA *const gmc_data, // [input] precalculated data |
576 |
|
const IMAGE *const pRef, // [input] |
577 |
|
const int mb_width, |
578 |
|
const int mb_height, |
579 |
|
const int stride, |
580 |
|
const int stride2, |
581 |
|
const int fcode, // [input] some parameters... |
582 |
|
const int32_t quarterpel, // [input] for rounding avgMV |
583 |
|
const int reduced_resolution, // [input] ignored |
584 |
|
const int32_t rounding, // [input] for rounding image data |
585 |
|
MACROBLOCK *const pMBs, // [output] average motion vectors |
586 |
|
IMAGE *const pGMC) // [output] full warped image |
587 |
|
{ |
588 |
|
|
589 |
|
unsigned int mj,mi; |
590 |
|
VECTOR avgMV; |
591 |
|
|
592 |
|
for (mj = 0; mj < (unsigned int)mb_height; mj++) |
593 |
|
for (mi = 0; mi < (unsigned int)mb_width; mi++) { |
594 |
|
|
595 |
|
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
596 |
|
stride, stride2, quarterpel, rounding, pGMC); |
597 |
|
|
598 |
|
pMBs[mj*mb_width+mi].amv.x = gmc_sanitize(avgMV.x, quarterpel, fcode); |
599 |
|
pMBs[mj*mb_width+mi].amv.y = gmc_sanitize(avgMV.y, quarterpel, fcode); |
600 |
|
pMBs[mj*mb_width+mi].mcsel = 0; /* until mode decision */ |
601 |
|
} |
602 |
|
} |
603 |
|
|
604 |
|
|
605 |
|
|
606 |
|
#define MLT(i) (((16-(i))<<16) + (i)) |
607 |
|
static const uint32_t MTab[16] = { |
608 |
|
MLT( 0), MLT( 1), MLT( 2), MLT( 3), MLT( 4), MLT( 5), MLT( 6), MLT(7), |
609 |
|
MLT( 8), MLT( 9), MLT(10), MLT(11), MLT(12), MLT(13), MLT(14), MLT(15) |
610 |
|
}; |
611 |
|
#undef MLT |
612 |
|
|
613 |
|
VECTOR generate_GMCimageMB( const GMC_DATA *const gmc_data, |
614 |
|
const IMAGE *const pRef, |
615 |
|
const int mi, const int mj, |
616 |
|
const int stride, |
617 |
|
const int stride2, |
618 |
|
const int quarterpel, |
619 |
|
const int rounding, |
620 |
|
IMAGE *const pGMC) |
621 |
|
{ |
622 |
|
const int W = gmc_data->W; |
623 |
|
const int H = gmc_data->H; |
624 |
|
|
625 |
|
const int rho = gmc_data->rho; |
626 |
|
const int alpha = gmc_data->alpha; |
627 |
|
|
628 |
|
const int rounder = ( 128 - (rounding<<(rho+rho)) ) << 16; |
629 |
|
|
630 |
|
const int dxF = gmc_data->dxF; |
631 |
|
const int dyF = gmc_data->dyF; |
632 |
|
const int dxG = gmc_data->dxG; |
633 |
|
const int dyG = gmc_data->dyG; |
634 |
|
|
635 |
|
uint8_t *dstY, *dstU, *dstV; |
636 |
|
|
637 |
|
int I,J; |
638 |
|
VECTOR avgMV = {0,0}; |
639 |
|
|
640 |
|
int32_t Fj, Gj; |
641 |
|
|
642 |
|
dstY = &pGMC->y[(mj*16)*stride+mi*16] + 16; |
643 |
|
|
644 |
|
Fj = gmc_data->Fo + dyF*mj*16 + dxF*mi*16; |
645 |
|
Gj = gmc_data->Go + dyG*mj*16 + dxG*mi*16; |
646 |
|
|
647 |
|
for (J = 16; J > 0; --J) { |
648 |
|
int32_t Fi, Gi; |
649 |
|
|
650 |
|
Fi = Fj; Fj += dyF; |
651 |
|
Gi = Gj; Gj += dyG; |
652 |
|
for (I = -16; I < 0; ++I) { |
653 |
|
int32_t F, G; |
654 |
|
uint32_t ri, rj; |
655 |
|
|
656 |
|
F = ( Fi >> (alpha+rho) ) << rho; Fi += dxF; |
657 |
|
G = ( Gi >> (alpha+rho) ) << rho; Gi += dxG; |
658 |
|
|
659 |
|
avgMV.x += F; |
660 |
|
avgMV.y += G; |
661 |
|
|
662 |
|
ri = MTab[F&15]; |
663 |
|
rj = MTab[G&15]; |
664 |
|
|
665 |
|
F >>= 4; |
666 |
|
G >>= 4; |
667 |
|
|
668 |
|
if (F < -1) F = -1; |
669 |
|
else if (F > W) F = W; |
670 |
|
if (G< -1) G=-1; |
671 |
|
else if (G>H) G=H; |
672 |
|
|
673 |
|
{ // MMX-like bilinear... |
674 |
|
const int offset = G*stride + F; |
675 |
|
uint32_t f0, f1; |
676 |
|
f0 = pRef->y[ offset +0 ]; |
677 |
|
f0 |= pRef->y[ offset +1 ] << 16; |
678 |
|
f1 = pRef->y[ offset+stride +0 ]; |
679 |
|
f1 |= pRef->y[ offset+stride +1 ] << 16; |
680 |
|
f0 = (ri*f0)>>16; |
681 |
|
f1 = (ri*f1) & 0x0fff0000; |
682 |
|
f0 |= f1; |
683 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
684 |
|
|
685 |
|
dstY[I] = (uint8_t)f0; |
686 |
|
} |
687 |
|
} |
688 |
|
|
689 |
|
dstY += stride; |
690 |
|
} |
691 |
|
|
692 |
|
dstU = &pGMC->u[(mj*8)*stride2+mi*8] + 8; |
693 |
|
dstV = &pGMC->v[(mj*8)*stride2+mi*8] + 8; |
694 |
|
|
695 |
|
Fj = gmc_data->cFo + dyF*4 *mj*8 + dxF*4 *mi*8; |
696 |
|
Gj = gmc_data->cGo + dyG*4 *mj*8 + dxG*4 *mi*8; |
697 |
|
|
698 |
|
for (J = 8; J > 0; --J) { |
699 |
|
int32_t Fi, Gi; |
700 |
|
Fi = Fj; Fj += 4*dyF; |
701 |
|
Gi = Gj; Gj += 4*dyG; |
702 |
|
|
703 |
|
for (I = -8; I < 0; ++I) { |
704 |
|
int32_t F, G; |
705 |
|
uint32_t ri, rj; |
706 |
|
|
707 |
|
F = ( Fi >> (alpha+rho+2) ) << rho; Fi += 4*dxF; |
708 |
|
G = ( Gi >> (alpha+rho+2) ) << rho; Gi += 4*dxG; |
709 |
|
|
710 |
|
ri = MTab[F&15]; |
711 |
|
rj = MTab[G&15]; |
712 |
|
|
713 |
|
F >>= 4; |
714 |
|
G >>= 4; |
715 |
|
|
716 |
|
if (F < -1) F=-1; |
717 |
|
else if (F >= W/2) F = W/2; |
718 |
|
if (G < -1) G = -1; |
719 |
|
else if (G >= H/2) G = H/2; |
720 |
|
|
721 |
|
{ |
722 |
|
const int offset = G*stride2 + F; |
723 |
|
uint32_t f0, f1; |
724 |
|
|
725 |
|
f0 = pRef->u[ offset +0 ]; |
726 |
|
f0 |= pRef->u[ offset +1 ] << 16; |
727 |
|
f1 = pRef->u[ offset+stride2 +0 ]; |
728 |
|
f1 |= pRef->u[ offset+stride2 +1 ] << 16; |
729 |
|
f0 = (ri*f0)>>16; |
730 |
|
f1 = (ri*f1) & 0x0fff0000; |
731 |
|
f0 |= f1; |
732 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
733 |
|
|
734 |
|
dstU[I] = (uint8_t)f0; |
735 |
|
|
736 |
|
|
737 |
|
f0 = pRef->v[ offset +0 ]; |
738 |
|
f0 |= pRef->v[ offset +1 ] << 16; |
739 |
|
f1 = pRef->v[ offset+stride2 +0 ]; |
740 |
|
f1 |= pRef->v[ offset+stride2 +1 ] << 16; |
741 |
|
f0 = (ri*f0)>>16; |
742 |
|
f1 = (ri*f1) & 0x0fff0000; |
743 |
|
f0 |= f1; |
744 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
745 |
|
|
746 |
|
dstV[I] = (uint8_t)f0; |
747 |
|
} |
748 |
|
} |
749 |
|
dstU += stride2; |
750 |
|
dstV += stride2; |
751 |
|
} |
752 |
|
|
753 |
|
|
754 |
|
avgMV.x -= 16*((256*mi+120)<<4); // 120 = 15*16/2 |
755 |
|
avgMV.y -= 16*((256*mj+120)<<4); |
756 |
|
|
757 |
|
avgMV.x = RSHIFT( avgMV.x, (4+7-quarterpel) ); |
758 |
|
avgMV.y = RSHIFT( avgMV.y, (4+7-quarterpel) ); |
759 |
|
|
760 |
|
return avgMV; |
761 |
|
} |
762 |
|
|
763 |
|
|
764 |
|
|
765 |
|
#ifdef OLD_GRUEL_GMC |
766 |
void |
void |
767 |
generate_GMCparameters( const int num_wp, // [input]: number of warppoints |
generate_GMCparameters( const int num_wp, // [input]: number of warppoints |
768 |
const int res, // [input]: resolution |
const int res, // [input]: resolution |
870 |
return; |
return; |
871 |
} |
} |
872 |
|
|
|
|
|
|
|
|
873 |
void |
void |
874 |
generate_GMCimage( const GMC_DATA *const gmc_data, // [input] precalculated data |
generate_GMCimage( const GMC_DATA *const gmc_data, // [input] precalculated data |
875 |
const IMAGE *const pRef, // [input] |
const IMAGE *const pRef, // [input] |
889 |
VECTOR avgMV; |
VECTOR avgMV; |
890 |
|
|
891 |
for (mj=0;mj<mb_height;mj++) |
for (mj=0;mj<mb_height;mj++) |
892 |
for (mi=0;mi<mb_width; mi++) |
for (mi = 0;mi < mb_width; mi++) { |
893 |
{ |
|
894 |
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
895 |
stride, stride2, quarterpel, rounding, pGMC); |
stride, stride2, quarterpel, rounding, pGMC); |
896 |
|
|
1070 |
return avgMV; /* clipping to fcode area is done outside! */ |
return avgMV; /* clipping to fcode area is done outside! */ |
1071 |
} |
} |
1072 |
|
|
1073 |
|
#endif |