44 |
#include "motion.h" |
#include "motion.h" |
45 |
#include "sad.h" |
#include "sad.h" |
46 |
#include "../utils/emms.h" |
#include "../utils/emms.h" |
47 |
|
#include "../dct/fdct.h" |
48 |
|
|
49 |
|
/***************************************************************************** |
50 |
|
* Modified rounding tables -- declared in motion.h |
51 |
|
* Original tables see ISO spec tables 7-6 -> 7-9 |
52 |
|
****************************************************************************/ |
53 |
|
|
54 |
|
const uint32_t roundtab[16] = |
55 |
|
{0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2 }; |
56 |
|
|
57 |
|
/* K = 4 */ |
58 |
|
const uint32_t roundtab_76[16] = |
59 |
|
{ 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1 }; |
60 |
|
|
61 |
|
/* K = 2 */ |
62 |
|
const uint32_t roundtab_78[8] = |
63 |
|
{ 0, 0, 1, 1, 0, 0, 0, 1 }; |
64 |
|
|
65 |
|
/* K = 1 */ |
66 |
|
const uint32_t roundtab_79[4] = |
67 |
|
{ 0, 1, 0, 0 }; |
68 |
|
|
69 |
#define INITIAL_SKIP_THRESH (10) |
#define INITIAL_SKIP_THRESH (10) |
70 |
#define FINAL_SKIP_THRESH (50) |
#define FINAL_SKIP_THRESH (50) |
72 |
#define MAX_CHROMA_SAD_FOR_SKIP (22) |
#define MAX_CHROMA_SAD_FOR_SKIP (22) |
73 |
|
|
74 |
#define CHECK_CANDIDATE(X,Y,D) { \ |
#define CHECK_CANDIDATE(X,Y,D) { \ |
75 |
(*CheckCandidate)((const int)(X),(const int)(Y), (D), &iDirection, data ); } |
CheckCandidate((X),(Y), (D), &iDirection, data ); } |
76 |
|
|
77 |
|
/***************************************************************************** |
78 |
|
* Code |
79 |
|
****************************************************************************/ |
80 |
|
|
81 |
static __inline uint32_t |
static __inline uint32_t |
82 |
d_mv_bits(int x, int y, const VECTOR pred, const uint32_t iFcode, const int qpel, const int rrv) |
d_mv_bits(int x, int y, const VECTOR pred, const uint32_t iFcode, const int qpel, const int rrv) |
83 |
{ |
{ |
84 |
int xb, yb; |
int bits; |
85 |
x = qpel ? x<<1 : x; |
const int q = (1 << (iFcode - 1)) - 1; |
86 |
y = qpel ? y<<1 : y; |
|
87 |
|
x <<= qpel; |
88 |
|
y <<= qpel; |
89 |
if (rrv) { x = RRV_MV_SCALEDOWN(x); y = RRV_MV_SCALEDOWN(y); } |
if (rrv) { x = RRV_MV_SCALEDOWN(x); y = RRV_MV_SCALEDOWN(y); } |
90 |
|
|
91 |
x -= pred.x; |
x -= pred.x; |
92 |
y -= pred.y; |
bits = (x != 0 ? iFcode:0); |
93 |
|
x = abs(x); |
94 |
if (x) { |
x += q; |
|
x = ABS(x); |
|
|
x += (1 << (iFcode - 1)) - 1; |
|
95 |
x >>= (iFcode - 1); |
x >>= (iFcode - 1); |
96 |
if (x > 32) x = 32; |
bits += mvtab[x]; |
97 |
xb = mvtab[x] + iFcode; |
|
98 |
} else xb = 1; |
y -= pred.y; |
99 |
|
bits += (y != 0 ? iFcode:0); |
100 |
if (y) { |
y = abs(y); |
101 |
y = ABS(y); |
y += q; |
|
y += (1 << (iFcode - 1)) - 1; |
|
102 |
y >>= (iFcode - 1); |
y >>= (iFcode - 1); |
103 |
if (y > 32) y = 32; |
bits += mvtab[y]; |
104 |
yb = mvtab[y] + iFcode; |
|
105 |
} else yb = 1; |
return bits; |
|
return xb + yb; |
|
106 |
} |
} |
107 |
|
|
108 |
static int32_t ChromaSAD2(int fx, int fy, int bx, int by, const SearchData * const data) |
static int32_t ChromaSAD2(const int fx, const int fy, const int bx, const int by, |
109 |
|
const SearchData * const data) |
110 |
{ |
{ |
111 |
int sad; |
int sad; |
112 |
const uint32_t stride = data->iEdgedWidth/2; |
const uint32_t stride = data->iEdgedWidth/2; |
114 |
* f_refv = data->RefQ + 8, |
* f_refv = data->RefQ + 8, |
115 |
* b_refu = data->RefQ + 16, |
* b_refu = data->RefQ + 16, |
116 |
* b_refv = data->RefQ + 24; |
* b_refv = data->RefQ + 24; |
117 |
|
int offset = (fx>>1) + (fy>>1)*stride; |
118 |
|
|
119 |
switch (((fx & 1) << 1) | (fy & 1)) { |
switch (((fx & 1) << 1) | (fy & 1)) { |
120 |
case 0: |
case 0: |
121 |
fx = fx / 2; fy = fy / 2; |
f_refu = (uint8_t*)data->RefP[4] + offset; |
122 |
f_refu = (uint8_t*)data->RefCU + fy * stride + fx, stride; |
f_refv = (uint8_t*)data->RefP[5] + offset; |
|
f_refv = (uint8_t*)data->RefCV + fy * stride + fx, stride; |
|
123 |
break; |
break; |
124 |
case 1: |
case 1: |
125 |
fx = fx / 2; fy = (fy - 1) / 2; |
interpolate8x8_halfpel_v(f_refu, data->RefP[4] + offset, stride, data->rounding); |
126 |
interpolate8x8_halfpel_v(f_refu, data->RefCU + fy * stride + fx, stride, data->rounding); |
interpolate8x8_halfpel_v(f_refv, data->RefP[5] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_v(f_refv, data->RefCV + fy * stride + fx, stride, data->rounding); |
|
127 |
break; |
break; |
128 |
case 2: |
case 2: |
129 |
fx = (fx - 1) / 2; fy = fy / 2; |
interpolate8x8_halfpel_h(f_refu, data->RefP[4] + offset, stride, data->rounding); |
130 |
interpolate8x8_halfpel_h(f_refu, data->RefCU + fy * stride + fx, stride, data->rounding); |
interpolate8x8_halfpel_h(f_refv, data->RefP[5] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_h(f_refv, data->RefCV + fy * stride + fx, stride, data->rounding); |
|
131 |
break; |
break; |
132 |
default: |
default: |
133 |
fx = (fx - 1) / 2; fy = (fy - 1) / 2; |
interpolate8x8_halfpel_hv(f_refu, data->RefP[4] + offset, stride, data->rounding); |
134 |
interpolate8x8_halfpel_hv(f_refu, data->RefCU + fy * stride + fx, stride, data->rounding); |
interpolate8x8_halfpel_hv(f_refv, data->RefP[5] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_hv(f_refv, data->RefCV + fy * stride + fx, stride, data->rounding); |
|
135 |
break; |
break; |
136 |
} |
} |
137 |
|
|
138 |
|
offset = (bx>>1) + (by>>1)*stride; |
139 |
switch (((bx & 1) << 1) | (by & 1)) { |
switch (((bx & 1) << 1) | (by & 1)) { |
140 |
case 0: |
case 0: |
141 |
bx = bx / 2; by = by / 2; |
b_refu = (uint8_t*)data->b_RefP[4] + offset; |
142 |
b_refu = (uint8_t*)data->b_RefCU + by * stride + bx, stride; |
b_refv = (uint8_t*)data->b_RefP[5] + offset; |
|
b_refv = (uint8_t*)data->b_RefCV + by * stride + bx, stride; |
|
143 |
break; |
break; |
144 |
case 1: |
case 1: |
145 |
bx = bx / 2; by = (by - 1) / 2; |
interpolate8x8_halfpel_v(b_refu, data->b_RefP[4] + offset, stride, data->rounding); |
146 |
interpolate8x8_halfpel_v(b_refu, data->b_RefCU + by * stride + bx, stride, data->rounding); |
interpolate8x8_halfpel_v(b_refv, data->b_RefP[5] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_v(b_refv, data->b_RefCV + by * stride + bx, stride, data->rounding); |
|
147 |
break; |
break; |
148 |
case 2: |
case 2: |
149 |
bx = (bx - 1) / 2; by = by / 2; |
interpolate8x8_halfpel_h(b_refu, data->b_RefP[4] + offset, stride, data->rounding); |
150 |
interpolate8x8_halfpel_h(b_refu, data->b_RefCU + by * stride + bx, stride, data->rounding); |
interpolate8x8_halfpel_h(b_refv, data->b_RefP[5] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_h(b_refv, data->b_RefCV + by * stride + bx, stride, data->rounding); |
|
151 |
break; |
break; |
152 |
default: |
default: |
153 |
bx = (bx - 1) / 2; by = (by - 1) / 2; |
interpolate8x8_halfpel_hv(b_refu, data->b_RefP[4] + offset, stride, data->rounding); |
154 |
interpolate8x8_halfpel_hv(b_refu, data->b_RefCU + by * stride + bx, stride, data->rounding); |
interpolate8x8_halfpel_hv(b_refv, data->b_RefP[5] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_hv(b_refv, data->b_RefCV + by * stride + bx, stride, data->rounding); |
|
155 |
break; |
break; |
156 |
} |
} |
157 |
|
|
161 |
return sad; |
return sad; |
162 |
} |
} |
163 |
|
|
|
|
|
164 |
static int32_t |
static int32_t |
165 |
ChromaSAD(int dx, int dy, const SearchData * const data) |
ChromaSAD(const int dx, const int dy, const SearchData * const data) |
166 |
{ |
{ |
167 |
int sad; |
int sad; |
168 |
const uint32_t stride = data->iEdgedWidth/2; |
const uint32_t stride = data->iEdgedWidth/2; |
169 |
|
int offset = (dx>>1) + (dy>>1)*stride; |
170 |
|
|
171 |
if (dx == data->temp[5] && dy == data->temp[6]) return data->temp[7]; //it has been checked recently |
if (dx == data->temp[5] && dy == data->temp[6]) return data->temp[7]; //it has been checked recently |
172 |
data->temp[5] = dx; data->temp[6] = dy; // backup |
data->temp[5] = dx; data->temp[6] = dy; // backup |
173 |
|
|
174 |
switch (((dx & 1) << 1) | (dy & 1)) { |
switch (((dx & 1) << 1) | (dy & 1)) { |
175 |
case 0: |
case 0: |
176 |
dx = dx / 2; dy = dy / 2; |
sad = sad8(data->CurU, data->RefP[4] + offset, stride); |
177 |
sad = sad8(data->CurU, data->RefCU + dy * stride + dx, stride); |
sad += sad8(data->CurV, data->RefP[5] + offset, stride); |
|
sad += sad8(data->CurV, data->RefCV + dy * stride + dx, stride); |
|
178 |
break; |
break; |
179 |
case 1: |
case 1: |
180 |
dx = dx / 2; dy = (dy - 1) / 2; |
sad = sad8bi(data->CurU, data->RefP[4] + offset, data->RefP[4] + offset + stride, stride); |
181 |
sad = sad8bi(data->CurU, data->RefCU + dy * stride + dx, data->RefCU + (dy+1) * stride + dx, stride); |
sad += sad8bi(data->CurV, data->RefP[5] + offset, data->RefP[5] + offset + stride, stride); |
|
sad += sad8bi(data->CurV, data->RefCV + dy * stride + dx, data->RefCV + (dy+1) * stride + dx, stride); |
|
182 |
break; |
break; |
183 |
case 2: |
case 2: |
184 |
dx = (dx - 1) / 2; dy = dy / 2; |
sad = sad8bi(data->CurU, data->RefP[4] + offset, data->RefP[4] + offset + 1, stride); |
185 |
sad = sad8bi(data->CurU, data->RefCU + dy * stride + dx, data->RefCU + dy * stride + dx+1, stride); |
sad += sad8bi(data->CurV, data->RefP[5] + offset, data->RefP[5] + offset + 1, stride); |
|
sad += sad8bi(data->CurV, data->RefCV + dy * stride + dx, data->RefCV + dy * stride + dx+1, stride); |
|
186 |
break; |
break; |
187 |
default: |
default: |
188 |
dx = (dx - 1) / 2; dy = (dy - 1) / 2; |
interpolate8x8_halfpel_hv(data->RefQ, data->RefP[4] + offset, stride, data->rounding); |
|
interpolate8x8_halfpel_hv(data->RefQ, data->RefCU + dy * stride + dx, stride, data->rounding); |
|
189 |
sad = sad8(data->CurU, data->RefQ, stride); |
sad = sad8(data->CurU, data->RefQ, stride); |
190 |
|
|
191 |
interpolate8x8_halfpel_hv(data->RefQ, data->RefCV + dy * stride + dx, stride, data->rounding); |
interpolate8x8_halfpel_hv(data->RefQ, data->RefP[5] + offset, stride, data->rounding); |
192 |
sad += sad8(data->CurV, data->RefQ, stride); |
sad += sad8(data->CurV, data->RefQ, stride); |
193 |
break; |
break; |
194 |
} |
} |
200 |
GetReferenceB(const int x, const int y, const uint32_t dir, const SearchData * const data) |
GetReferenceB(const int x, const int y, const uint32_t dir, const SearchData * const data) |
201 |
{ |
{ |
202 |
// dir : 0 = forward, 1 = backward |
// dir : 0 = forward, 1 = backward |
203 |
switch ( (dir << 2) | ((x&1)<<1) | (y&1) ) { |
const uint8_t* const *direction = ( dir == 0 ? data->RefP : data->b_RefP ); |
204 |
case 0 : return data->Ref + x/2 + (y/2)*(data->iEdgedWidth); |
const int picture = ((x&1)<<1) | (y&1); |
205 |
case 1 : return data->RefV + x/2 + ((y-1)/2)*(data->iEdgedWidth); |
const int offset = (x>>1) + (y>>1)*data->iEdgedWidth; |
206 |
case 2 : return data->RefH + (x-1)/2 + (y/2)*(data->iEdgedWidth); |
return direction[picture] + offset; |
|
case 3 : return data->RefHV + (x-1)/2 + ((y-1)/2)*(data->iEdgedWidth); |
|
|
case 4 : return data->bRef + x/2 + (y/2)*(data->iEdgedWidth); |
|
|
case 5 : return data->bRefV + x/2 + ((y-1)/2)*(data->iEdgedWidth); |
|
|
case 6 : return data->bRefH + (x-1)/2 + (y/2)*(data->iEdgedWidth); |
|
|
default : return data->bRefHV + (x-1)/2 + ((y-1)/2)*(data->iEdgedWidth); |
|
|
} |
|
207 |
} |
} |
208 |
|
|
209 |
// this is a simpler copy of GetReferenceB, but as it's __inline anyway, we can keep the two separate |
// this is a simpler copy of GetReferenceB, but as it's __inline anyway, we can keep the two separate |
210 |
static __inline const uint8_t * |
static __inline const uint8_t * |
211 |
GetReference(const int x, const int y, const SearchData * const data) |
GetReference(const int x, const int y, const SearchData * const data) |
212 |
{ |
{ |
213 |
switch ( ((x&1)<<1) | (y&1) ) { |
const int picture = ((x&1)<<1) | (y&1); |
214 |
case 0 : return data->Ref + x/2 + (y/2)*(data->iEdgedWidth); |
const int offset = (x>>1) + (y>>1)*data->iEdgedWidth; |
215 |
case 3 : return data->RefHV + (x-1)/2 + ((y-1)/2)*(data->iEdgedWidth); |
return data->RefP[picture] + offset; |
|
case 1 : return data->RefV + x/2 + ((y-1)/2)*(data->iEdgedWidth); |
|
|
default : return data->RefH + (x-1)/2 + (y/2)*(data->iEdgedWidth); //case 2 |
|
|
} |
|
216 |
} |
} |
217 |
|
|
218 |
static uint8_t * |
static uint8_t * |
229 |
ref1 = GetReferenceB(halfpel_x, halfpel_y, dir, data); |
ref1 = GetReferenceB(halfpel_x, halfpel_y, dir, data); |
230 |
ref1 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref1 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
231 |
switch( ((x&1)<<1) + (y&1) ) { |
switch( ((x&1)<<1) + (y&1) ) { |
232 |
case 0: // pure halfpel position |
case 3: // x and y in qpel resolution - the "corners" (top left/right and |
233 |
return (uint8_t *) ref1; |
// bottom left/right) during qpel refinement |
234 |
|
ref2 = GetReferenceB(halfpel_x, y - halfpel_y, dir, data); |
235 |
|
ref3 = GetReferenceB(x - halfpel_x, halfpel_y, dir, data); |
236 |
|
ref4 = GetReferenceB(x - halfpel_x, y - halfpel_y, dir, data); |
237 |
|
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
238 |
|
ref3 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
239 |
|
ref4 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
240 |
|
interpolate8x8_avg4(Reference, ref1, ref2, ref3, ref4, iEdgedWidth, rounding); |
241 |
break; |
break; |
242 |
|
|
243 |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
252 |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
253 |
break; |
break; |
254 |
|
|
255 |
default: // x and y in qpel resolution - the "corners" (top left/right and |
default: // pure halfpel position |
256 |
// bottom left/right) during qpel refinement |
return (uint8_t *) ref1; |
257 |
ref2 = GetReferenceB(halfpel_x, y - halfpel_y, dir, data); |
|
|
ref3 = GetReferenceB(x - halfpel_x, halfpel_y, dir, data); |
|
|
ref4 = GetReferenceB(x - halfpel_x, y - halfpel_y, dir, data); |
|
|
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
|
|
ref3 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
|
|
ref4 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
|
|
interpolate8x8_avg4(Reference, ref1, ref2, ref3, ref4, iEdgedWidth, rounding); |
|
|
break; |
|
258 |
} |
} |
259 |
return Reference; |
return Reference; |
260 |
} |
} |
299 |
interpolate8x8_avg2(Reference+8*iEdgedWidth+8, ref1+8*iEdgedWidth+8, ref2+8*iEdgedWidth+8, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference+8*iEdgedWidth+8, ref1+8*iEdgedWidth+8, ref2+8*iEdgedWidth+8, iEdgedWidth, rounding, 8); |
300 |
break; |
break; |
301 |
|
|
302 |
case 0: // pure halfpel position |
default: // pure halfpel position |
303 |
return (uint8_t *) ref1; |
return (uint8_t *) ref1; |
304 |
} |
} |
305 |
return Reference; |
return Reference; |
359 |
{ |
{ |
360 |
int32_t sad; uint32_t t; |
int32_t sad; uint32_t t; |
361 |
const uint8_t * Reference; |
const uint8_t * Reference; |
362 |
|
VECTOR * current; |
363 |
|
|
364 |
if ( (x > data->max_dx) || (x < data->min_dx) |
if ( (x > data->max_dx) || (x < data->min_dx) |
365 |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
366 |
|
|
367 |
if (!data->qpel_precision) Reference = GetReference(x, y, data); |
if (!data->qpel_precision) { |
368 |
else Reference = Interpolate16x16qpel(x, y, 0, data); |
Reference = GetReference(x, y, data); |
369 |
|
current = data->currentMV; |
370 |
|
} else { // x and y are in 1/4 precision |
371 |
|
Reference = Interpolate8x8qpel(x, y, 0, 0, data); |
372 |
|
current = data->currentQMV; |
373 |
|
} |
374 |
|
|
375 |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
376 |
t = d_mv_bits(x, y, data->predMV, data->iFcode, data->qpel^data->qpel_precision, 0); |
t = d_mv_bits(x, y, data->predMV, data->iFcode, data->qpel^data->qpel_precision, 0); |
379 |
|
|
380 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
381 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
382 |
data->currentMV->x = x; data->currentMV->y = y; |
current->x = x; current->y = y; |
383 |
*dir = Direction; |
*dir = Direction; |
384 |
} |
} |
385 |
} |
} |
391 |
uint32_t t; |
uint32_t t; |
392 |
const uint8_t * Reference; |
const uint8_t * Reference; |
393 |
|
|
394 |
if ( (!(x&1) && x !=0) || (!(y&1) && y !=0) || //non-zero integer value |
if ( (!(x&1) && x !=0) || (!(y&1) && y !=0) || //non-zero even value |
395 |
(x > data->max_dx) || (x < data->min_dx) |
(x > data->max_dx) || (x < data->min_dx) |
396 |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
397 |
|
|
426 |
uint32_t t; |
uint32_t t; |
427 |
VECTOR * current; |
VECTOR * current; |
428 |
|
|
429 |
if ( (x > data->max_dx) | ( x < data->min_dx) |
if ( (x > data->max_dx) || ( x < data->min_dx) |
430 |
| (y > data->max_dy) | (y < data->min_dy) ) return; |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
431 |
|
|
432 |
if (data->rrv && (!(x&1) && x !=0) | (!(y&1) && y !=0) ) return; //non-zero even value |
if (data->rrv && (!(x&1) && x !=0) | (!(y&1) && y !=0) ) return; //non-zero even value |
433 |
|
|
449 |
if (data->chroma) sad += ChromaSAD((xc >> 1) + roundtab_79[xc & 0x3], |
if (data->chroma) sad += ChromaSAD((xc >> 1) + roundtab_79[xc & 0x3], |
450 |
(yc >> 1) + roundtab_79[yc & 0x3], data); |
(yc >> 1) + roundtab_79[yc & 0x3], data); |
451 |
|
|
|
|
|
452 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
453 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
454 |
current->x = x; current->y = y; |
current->x = x; current->y = y; |
465 |
if ( (x > data->max_dx) || (x < data->min_dx) |
if ( (x > data->max_dx) || (x < data->min_dx) |
466 |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
467 |
|
|
468 |
sad = sad32v_c(data->Cur, data->Ref + x/2 + (y/2)*(data->iEdgedWidth), |
sad = sad32v_c(data->Cur, data->RefP[0] + x/2 + (y/2)*(data->iEdgedWidth), |
469 |
data->iEdgedWidth, data->temp+1); |
data->iEdgedWidth, data->temp+1); |
470 |
|
|
471 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
492 |
const uint8_t *ReferenceF, *ReferenceB; |
const uint8_t *ReferenceF, *ReferenceB; |
493 |
VECTOR *current; |
VECTOR *current; |
494 |
|
|
495 |
if ( (xf > data->max_dx) | (xf < data->min_dx) |
if ((xf > data->max_dx) || (xf < data->min_dx) || |
496 |
| (yf > data->max_dy) | (yf < data->min_dy) ) return; |
(yf > data->max_dy) || (yf < data->min_dy)) |
497 |
|
return; |
498 |
|
|
499 |
if (!data->qpel_precision) { |
if (!data->qpel_precision) { |
500 |
ReferenceF = GetReference(xf, yf, data); |
ReferenceF = GetReference(xf, yf, data); |
539 |
const uint8_t *ReferenceB; |
const uint8_t *ReferenceB; |
540 |
VECTOR mvs, b_mvs; |
VECTOR mvs, b_mvs; |
541 |
|
|
542 |
if (( x > 31) | ( x < -32) | ( y > 31) | (y < -32)) return; |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
543 |
|
|
544 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
545 |
mvs.x = data->directmvF[k].x + x; |
mvs.x = data->directmvF[k].x + x; |
552 |
data->directmvB[k].y |
data->directmvB[k].y |
553 |
: mvs.y - data->referencemv[k].y); |
: mvs.y - data->referencemv[k].y); |
554 |
|
|
555 |
if ( (mvs.x > data->max_dx) | (mvs.x < data->min_dx) |
if ((mvs.x > data->max_dx) || (mvs.x < data->min_dx) || |
556 |
| (mvs.y > data->max_dy) | (mvs.y < data->min_dy) |
(mvs.y > data->max_dy) || (mvs.y < data->min_dy) || |
557 |
| (b_mvs.x > data->max_dx) | (b_mvs.x < data->min_dx) |
(b_mvs.x > data->max_dx) || (b_mvs.x < data->min_dx) || |
558 |
| (b_mvs.y > data->max_dy) | (b_mvs.y < data->min_dy) ) return; |
(b_mvs.y > data->max_dy) || (b_mvs.y < data->min_dy) ) |
559 |
|
return; |
560 |
|
|
561 |
if (data->qpel) { |
if (data->qpel) { |
562 |
xcf += mvs.x/2; ycf += mvs.y/2; |
xcf += mvs.x/2; ycf += mvs.y/2; |
598 |
const uint8_t *ReferenceB; |
const uint8_t *ReferenceB; |
599 |
VECTOR mvs, b_mvs; |
VECTOR mvs, b_mvs; |
600 |
|
|
601 |
if (( x > 31) | ( x < -32) | ( y > 31) | (y < -32)) return; |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
602 |
|
|
603 |
mvs.x = data->directmvF[0].x + x; |
mvs.x = data->directmvF[0].x + x; |
604 |
b_mvs.x = ((x == 0) ? |
b_mvs.x = ((x == 0) ? |
610 |
data->directmvB[0].y |
data->directmvB[0].y |
611 |
: mvs.y - data->referencemv[0].y); |
: mvs.y - data->referencemv[0].y); |
612 |
|
|
613 |
if ( (mvs.x > data->max_dx) | (mvs.x < data->min_dx) |
if ( (mvs.x > data->max_dx) || (mvs.x < data->min_dx) |
614 |
| (mvs.y > data->max_dy) | (mvs.y < data->min_dy) |
|| (mvs.y > data->max_dy) || (mvs.y < data->min_dy) |
615 |
| (b_mvs.x > data->max_dx) | (b_mvs.x < data->min_dx) |
|| (b_mvs.x > data->max_dx) || (b_mvs.x < data->min_dx) |
616 |
| (b_mvs.y > data->max_dy) | (b_mvs.y < data->min_dy) ) return; |
|| (b_mvs.y > data->max_dy) || (b_mvs.y < data->min_dy) ) return; |
617 |
|
|
618 |
if (data->qpel) { |
if (data->qpel) { |
619 |
xcf = 4*(mvs.x/2); ycf = 4*(mvs.y/2); |
xcf = 4*(mvs.x/2); ycf = 4*(mvs.y/2); |
642 |
} |
} |
643 |
} |
} |
644 |
|
|
645 |
|
|
646 |
|
static void |
647 |
|
CheckCandidateBits16(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
648 |
|
{ |
649 |
|
|
650 |
|
int16_t *in = data->dctSpace, *coeff = data->dctSpace + 64; |
651 |
|
int32_t bits = 0, sum; |
652 |
|
VECTOR * current; |
653 |
|
const uint8_t * ptr; |
654 |
|
int i, cbp = 0, t, xc, yc; |
655 |
|
|
656 |
|
if ( (x > data->max_dx) || (x < data->min_dx) |
657 |
|
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
658 |
|
|
659 |
|
if (!data->qpel_precision) { |
660 |
|
ptr = GetReference(x, y, data); |
661 |
|
current = data->currentMV; |
662 |
|
xc = x; yc = y; |
663 |
|
} else { // x and y are in 1/4 precision |
664 |
|
ptr = Interpolate16x16qpel(x, y, 0, data); |
665 |
|
current = data->currentQMV; |
666 |
|
xc = x/2; yc = y/2; |
667 |
|
} |
668 |
|
|
669 |
|
for(i = 0; i < 4; i++) { |
670 |
|
int s = 8*((i&1) + (i>>1)*data->iEdgedWidth); |
671 |
|
transfer_8to16subro(in, data->Cur + s, ptr + s, data->iEdgedWidth); |
672 |
|
fdct(in); |
673 |
|
if (data->lambda8 == 0) sum = quant_inter(coeff, in, data->lambda16); |
674 |
|
else sum = quant4_inter(coeff, in, data->lambda16); |
675 |
|
if (sum > 0) { |
676 |
|
cbp |= 1 << (5 - i); |
677 |
|
bits += data->temp[i] = CodeCoeffInter_CalcBits(coeff, scan_tables[0]); |
678 |
|
} else data->temp[i] = 0; |
679 |
|
} |
680 |
|
|
681 |
|
bits += t = d_mv_bits(x, y, data->predMV, data->iFcode, data->qpel^data->qpel_precision, 0); |
682 |
|
|
683 |
|
if (bits < data->iMinSAD[0]) { // there is still a chance, adding chroma |
684 |
|
xc = (xc >> 1) + roundtab_79[xc & 0x3]; |
685 |
|
yc = (yc >> 1) + roundtab_79[yc & 0x3]; |
686 |
|
|
687 |
|
//chroma U |
688 |
|
ptr = interpolate8x8_switch2(data->RefQ + 64, data->RefP[4], 0, 0, xc, yc, data->iEdgedWidth/2, data->rounding); |
689 |
|
transfer_8to16subro(in, ptr, data->CurU, data->iEdgedWidth/2); |
690 |
|
fdct(in); |
691 |
|
if (data->lambda8 == 0) sum = quant_inter(coeff, in, data->lambda16); |
692 |
|
else sum = quant4_inter(coeff, in, data->lambda16); |
693 |
|
if (sum > 0) { |
694 |
|
cbp |= 1 << (5 - 4); |
695 |
|
bits += CodeCoeffInter_CalcBits(coeff, scan_tables[0]); |
696 |
|
} |
697 |
|
|
698 |
|
if (bits < data->iMinSAD[0]) { |
699 |
|
//chroma V |
700 |
|
ptr = interpolate8x8_switch2(data->RefQ + 64, data->RefP[5], 0, 0, xc, yc, data->iEdgedWidth/2, data->rounding); |
701 |
|
transfer_8to16subro(in, ptr, data->CurV, data->iEdgedWidth/2); |
702 |
|
fdct(in); |
703 |
|
if (data->lambda8 == 0) sum = quant_inter(coeff, in, data->lambda16); |
704 |
|
else sum = quant4_inter(coeff, in, data->lambda16); |
705 |
|
if (sum > 0) { |
706 |
|
cbp |= 1 << (5 - 5); |
707 |
|
bits += CodeCoeffInter_CalcBits(coeff, scan_tables[0]); |
708 |
|
} |
709 |
|
} |
710 |
|
} |
711 |
|
|
712 |
|
bits += xvid_cbpy_tab[15-(cbp>>2)].len; |
713 |
|
bits += mcbpc_inter_tab[(MODE_INTER & 7) | ((cbp & 3) << 3)].len; |
714 |
|
|
715 |
|
if (bits < data->iMinSAD[0]) { |
716 |
|
data->iMinSAD[0] = bits; |
717 |
|
current[0].x = x; current[0].y = y; |
718 |
|
*dir = Direction; |
719 |
|
} |
720 |
|
|
721 |
|
if (data->temp[0] + t < data->iMinSAD[1]) { |
722 |
|
data->iMinSAD[1] = data->temp[0] + t; current[1].x = x; current[1].y = y; } |
723 |
|
if (data->temp[1] < data->iMinSAD[2]) { |
724 |
|
data->iMinSAD[2] = data->temp[1]; current[2].x = x; current[2].y = y; } |
725 |
|
if (data->temp[2] < data->iMinSAD[3]) { |
726 |
|
data->iMinSAD[3] = data->temp[2]; current[3].x = x; current[3].y = y; } |
727 |
|
if (data->temp[3] < data->iMinSAD[4]) { |
728 |
|
data->iMinSAD[4] = data->temp[3]; current[4].x = x; current[4].y = y; } |
729 |
|
|
730 |
|
} |
731 |
|
static void |
732 |
|
CheckCandidateBits8(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
733 |
|
{ |
734 |
|
|
735 |
|
int16_t *in = data->dctSpace, *coeff = data->dctSpace + 64; |
736 |
|
int32_t sum, bits; |
737 |
|
VECTOR * current; |
738 |
|
const uint8_t * ptr; |
739 |
|
int cbp; |
740 |
|
|
741 |
|
if ( (x > data->max_dx) || (x < data->min_dx) |
742 |
|
|| (y > data->max_dy) || (y < data->min_dy) ) return; |
743 |
|
|
744 |
|
if (!data->qpel_precision) { |
745 |
|
ptr = GetReference(x, y, data); |
746 |
|
current = data->currentMV; |
747 |
|
} else { // x and y are in 1/4 precision |
748 |
|
ptr = Interpolate8x8qpel(x, y, 0, 0, data); |
749 |
|
current = data->currentQMV; |
750 |
|
} |
751 |
|
|
752 |
|
transfer_8to16subro(in, data->Cur, ptr, data->iEdgedWidth); |
753 |
|
fdct(in); |
754 |
|
if (data->lambda8 == 0) sum = quant_inter(coeff, in, data->lambda16); |
755 |
|
else sum = quant4_inter(coeff, in, data->lambda16); |
756 |
|
if (sum > 0) { |
757 |
|
bits = CodeCoeffInter_CalcBits(coeff, scan_tables[0]); |
758 |
|
cbp = 1; |
759 |
|
} else cbp = bits = 0; |
760 |
|
|
761 |
|
bits += sum = d_mv_bits(x, y, data->predMV, data->iFcode, data->qpel^data->qpel_precision, 0); |
762 |
|
|
763 |
|
if (bits < data->iMinSAD[0]) { |
764 |
|
data->temp[0] = cbp; |
765 |
|
data->iMinSAD[0] = bits; |
766 |
|
current[0].x = x; current[0].y = y; |
767 |
|
*dir = Direction; |
768 |
|
} |
769 |
|
} |
770 |
|
|
771 |
/* CHECK_CANDIATE FUNCTIONS END */ |
/* CHECK_CANDIATE FUNCTIONS END */ |
772 |
|
|
773 |
/* MAINSEARCH FUNCTIONS START */ |
/* MAINSEARCH FUNCTIONS START */ |
938 |
const uint32_t stride, const uint32_t iQuant, int rrv) |
const uint32_t stride, const uint32_t iQuant, int rrv) |
939 |
|
|
940 |
{ |
{ |
941 |
|
int offset = (x + y*stride)*8; |
942 |
if(!rrv) { |
if(!rrv) { |
943 |
uint32_t sadC = sad8(current->u + x*8 + y*stride*8, |
uint32_t sadC = sad8(current->u + offset, |
944 |
reference->u + x*8 + y*stride*8, stride); |
reference->u + offset, stride); |
945 |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
946 |
sadC += sad8(current->v + (x + y*stride)*8, |
sadC += sad8(current->v + offset, |
947 |
reference->v + (x + y*stride)*8, stride); |
reference->v + offset, stride); |
948 |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
949 |
return 1; |
return 1; |
950 |
|
|
951 |
} else { |
} else { |
952 |
uint32_t sadC = sad16(current->u + x*16 + y*stride*16, |
uint32_t sadC = sad16(current->u + 2*offset, |
953 |
reference->u + x*16 + y*stride*16, stride, 256*4096); |
reference->u + 2*offset, stride, 256*4096); |
954 |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
955 |
sadC += sad16(current->v + (x + y*stride)*16, |
sadC += sad16(current->v + 2*offset, |
956 |
reference->v + (x + y*stride)*16, stride, 256*4096); |
reference->v + 2*offset, stride, 256*4096); |
957 |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
958 |
return 1; |
return 1; |
959 |
} |
} |
984 |
uint32_t mb_width = pParam->mb_width; |
uint32_t mb_width = pParam->mb_width; |
985 |
uint32_t mb_height = pParam->mb_height; |
uint32_t mb_height = pParam->mb_height; |
986 |
const uint32_t iEdgedWidth = pParam->edged_width; |
const uint32_t iEdgedWidth = pParam->edged_width; |
987 |
|
const uint32_t MotionFlags = MakeGoodMotionFlags(current->motion_flags, current->global_flags); |
988 |
|
|
989 |
uint32_t x, y; |
uint32_t x, y; |
990 |
uint32_t iIntra = 0; |
uint32_t iIntra = 0; |
991 |
int32_t InterBias, quant = current->quant, sad00; |
int32_t quant = current->quant, sad00; |
992 |
|
int skip_thresh = INITIAL_SKIP_THRESH * |
993 |
|
(current->global_flags & XVID_REDUCED ? 4:1) * |
994 |
|
(current->global_flags & XVID_MODEDECISION_BITS ? 2:1); |
995 |
|
|
996 |
// some pre-initialized thingies for SearchP |
// some pre-initialized thingies for SearchP |
997 |
int32_t temp[8]; |
int32_t temp[8]; |
998 |
VECTOR currentMV[5]; |
VECTOR currentMV[5]; |
999 |
VECTOR currentQMV[5]; |
VECTOR currentQMV[5]; |
1000 |
int32_t iMinSAD[5]; |
int32_t iMinSAD[5]; |
1001 |
|
DECLARE_ALIGNED_MATRIX(dct_space, 2, 64, int16_t, CACHE_LINE); |
1002 |
SearchData Data; |
SearchData Data; |
1003 |
memset(&Data, 0, sizeof(SearchData)); |
memset(&Data, 0, sizeof(SearchData)); |
1004 |
Data.iEdgedWidth = iEdgedWidth; |
Data.iEdgedWidth = iEdgedWidth; |
1009 |
Data.iFcode = current->fcode; |
Data.iFcode = current->fcode; |
1010 |
Data.rounding = pParam->m_rounding_type; |
Data.rounding = pParam->m_rounding_type; |
1011 |
Data.qpel = pParam->m_quarterpel; |
Data.qpel = pParam->m_quarterpel; |
1012 |
Data.chroma = current->motion_flags & PMV_CHROMA16; |
Data.chroma = MotionFlags & PMV_CHROMA16; |
1013 |
Data.rrv = current->global_flags & XVID_REDUCED; |
Data.rrv = current->global_flags & XVID_REDUCED; |
1014 |
|
Data.dctSpace = dct_space; |
1015 |
|
|
1016 |
if ((current->global_flags & XVID_REDUCED)) { |
if ((current->global_flags & XVID_REDUCED)) { |
1017 |
mb_width = (pParam->width + 31) / 32; |
mb_width = (pParam->width + 31) / 32; |
1018 |
mb_height = (pParam->height + 31) / 32; |
mb_height = (pParam->height + 31) / 32; |
1019 |
Data.qpel = Data.chroma = 0; |
Data.qpel = 0; |
1020 |
} |
} |
1021 |
|
|
1022 |
Data.RefQ = pRefV->u; // a good place, also used in MC (for similar purpose) |
Data.RefQ = pRefV->u; // a good place, also used in MC (for similar purpose) |
1060 |
//initial skip decision |
//initial skip decision |
1061 |
/* no early skip for GMC (global vector = skip vector is unknown!) */ |
/* no early skip for GMC (global vector = skip vector is unknown!) */ |
1062 |
if (!(current->global_flags & XVID_GMC)) { /* no fast SKIP for S(GMC)-VOPs */ |
if (!(current->global_flags & XVID_GMC)) { /* no fast SKIP for S(GMC)-VOPs */ |
1063 |
if (pMB->dquant == NO_CHANGE && sad00 < pMB->quant * INITIAL_SKIP_THRESH * (Data.rrv ? 4:1) ) |
if (pMB->dquant == NO_CHANGE && sad00 < pMB->quant * skip_thresh) |
1064 |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, iEdgedWidth/2, pMB->quant, Data.rrv)) { |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, iEdgedWidth/2, pMB->quant, Data.rrv)) { |
1065 |
SkipMacroblockP(pMB, sad00); |
SkipMacroblockP(pMB, sad00); |
1066 |
continue; |
continue; |
1068 |
} |
} |
1069 |
|
|
1070 |
SearchP(pRef, pRefH->y, pRefV->y, pRefHV->y, pCurrent, x, |
SearchP(pRef, pRefH->y, pRefV->y, pRefHV->y, pCurrent, x, |
1071 |
y, current->motion_flags, pMB->quant, |
y, MotionFlags, current->global_flags, pMB->quant, |
1072 |
&Data, pParam, pMBs, reference->mbs, |
&Data, pParam, pMBs, reference->mbs, |
1073 |
current->global_flags & XVID_INTER4V, pMB); |
current->global_flags & XVID_INTER4V, pMB); |
1074 |
|
|
1075 |
/* final skip decision, a.k.a. "the vector you found, really that good?" */ |
/* final skip decision, a.k.a. "the vector you found, really that good?" */ |
1076 |
if (!(current->global_flags & XVID_GMC)) { |
if (!(current->global_flags & XVID_GMC || current->global_flags & XVID_MODEDECISION_BITS)) { |
1077 |
if ( (pMB->dquant == NO_CHANGE) && (sad00 < pMB->quant * MAX_SAD00_FOR_SKIP) |
if ( pMB->dquant == NO_CHANGE && sad00 < pMB->quant * MAX_SAD00_FOR_SKIP) { |
1078 |
&& ((100*pMB->sad16)/(sad00+1) > FINAL_SKIP_THRESH * (Data.rrv ? 4:1)) ) |
if ( (100*pMB->sad16)/(sad00+1) > FINAL_SKIP_THRESH * (Data.rrv ? 4:1) ) |
1079 |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, iEdgedWidth/2, pMB->quant, Data.rrv)) { |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, iEdgedWidth/2, pMB->quant, Data.rrv)) |
1080 |
SkipMacroblockP(pMB, sad00); |
SkipMacroblockP(pMB, sad00); |
|
continue; |
|
|
} |
|
|
} |
|
|
|
|
|
/* finally, intra decision */ |
|
|
|
|
|
InterBias = MV16_INTER_BIAS; |
|
|
if (pMB->quant > 8) InterBias += 100 * (pMB->quant - 8); // to make high quants work |
|
|
if (y != 0) |
|
|
if ((pMB - pParam->mb_width)->mode == MODE_INTRA ) InterBias -= 80; |
|
|
if (x != 0) |
|
|
if ((pMB - 1)->mode == MODE_INTRA ) InterBias -= 80; |
|
|
|
|
|
if (Data.chroma) InterBias += 50; // to compensate bigger SAD |
|
|
if (Data.rrv) InterBias *= 4; |
|
|
|
|
|
if (InterBias < pMB->sad16) { |
|
|
int32_t deviation; |
|
|
if (!Data.rrv) |
|
|
deviation = dev16(pCurrent->y + (x + y * iEdgedWidth) * 16, iEdgedWidth); |
|
|
else { |
|
|
deviation = dev16(pCurrent->y + (x + y * iEdgedWidth) * 32, iEdgedWidth) |
|
|
+ dev16(pCurrent->y + (x + y * iEdgedWidth) * 32 + 16, iEdgedWidth) |
|
|
+ dev16(pCurrent->y + (x + y * iEdgedWidth) * 32 + 16 * iEdgedWidth, iEdgedWidth) |
|
|
+ dev16(pCurrent->y + (x + y * iEdgedWidth) * 32 + 16 * (iEdgedWidth+1), iEdgedWidth); |
|
|
} |
|
|
if (deviation < (pMB->sad16 - InterBias)) { |
|
|
if (++iIntra >= iLimit) return 1; |
|
|
SkipMacroblockP(pMB, 0); //same thing |
|
|
pMB->mode = MODE_INTRA; |
|
1081 |
} |
} |
1082 |
} |
} |
1083 |
|
if (pMB->mode == MODE_INTRA) |
1084 |
|
if (++iIntra > iLimit) return 1; |
1085 |
} |
} |
1086 |
} |
} |
1087 |
|
|
1089 |
{ |
{ |
1090 |
current->warp = GlobalMotionEst( pMBs, pParam, current, reference, pRefH, pRefV, pRefHV); |
current->warp = GlobalMotionEst( pMBs, pParam, current, reference, pRefH, pRefV, pRefHV); |
1091 |
} |
} |
|
|
|
1092 |
return 0; |
return 0; |
1093 |
} |
} |
1094 |
|
|
1152 |
} |
} |
1153 |
} |
} |
1154 |
|
|
1155 |
|
static int |
1156 |
|
ModeDecision(const uint32_t iQuant, SearchData * const Data, |
1157 |
|
int inter4v, |
1158 |
|
MACROBLOCK * const pMB, |
1159 |
|
const MACROBLOCK * const pMBs, |
1160 |
|
const int x, const int y, |
1161 |
|
const MBParam * const pParam, |
1162 |
|
const uint32_t MotionFlags, |
1163 |
|
const uint32_t GlobalFlags) |
1164 |
|
{ |
1165 |
|
|
1166 |
|
int mode = MODE_INTER; |
1167 |
|
|
1168 |
|
if (!(GlobalFlags & XVID_MODEDECISION_BITS)) { //normal, fast, SAD-based mode decision |
1169 |
|
int sad; |
1170 |
|
int InterBias = MV16_INTER_BIAS; |
1171 |
|
if (inter4v == 0 || Data->iMinSAD[0] < Data->iMinSAD[1] + Data->iMinSAD[2] + |
1172 |
|
Data->iMinSAD[3] + Data->iMinSAD[4] + IMV16X16 * (int32_t)iQuant) { |
1173 |
|
mode = MODE_INTER; |
1174 |
|
sad = Data->iMinSAD[0]; |
1175 |
|
} else { |
1176 |
|
mode = MODE_INTER4V; |
1177 |
|
sad = Data->iMinSAD[1] + Data->iMinSAD[2] + |
1178 |
|
Data->iMinSAD[3] + Data->iMinSAD[4] + IMV16X16 * (int32_t)iQuant; |
1179 |
|
Data->iMinSAD[0] = sad; |
1180 |
|
} |
1181 |
|
|
1182 |
|
/* intra decision */ |
1183 |
|
|
1184 |
|
if (iQuant > 8) InterBias += 100 * (iQuant - 8); // to make high quants work |
1185 |
|
if (y != 0) |
1186 |
|
if ((pMB - pParam->mb_width)->mode == MODE_INTRA ) InterBias -= 80; |
1187 |
|
if (x != 0) |
1188 |
|
if ((pMB - 1)->mode == MODE_INTRA ) InterBias -= 80; |
1189 |
|
|
1190 |
|
if (Data->chroma) InterBias += 50; // to compensate bigger SAD |
1191 |
|
if (Data->rrv) InterBias *= 4; |
1192 |
|
|
1193 |
|
if (InterBias < pMB->sad16) { |
1194 |
|
int32_t deviation; |
1195 |
|
if (!Data->rrv) deviation = dev16(Data->Cur, Data->iEdgedWidth); |
1196 |
|
else deviation = dev16(Data->Cur, Data->iEdgedWidth) + |
1197 |
|
dev16(Data->Cur+8, Data->iEdgedWidth) + |
1198 |
|
dev16(Data->Cur + 8*Data->iEdgedWidth, Data->iEdgedWidth) + |
1199 |
|
dev16(Data->Cur+8+8*Data->iEdgedWidth, Data->iEdgedWidth); |
1200 |
|
|
1201 |
|
if (deviation < (sad - InterBias)) return MODE_INTRA; |
1202 |
|
} |
1203 |
|
return mode; |
1204 |
|
|
1205 |
|
} else { |
1206 |
|
|
1207 |
|
int bits, intra, i; |
1208 |
|
VECTOR backup[5], *v; |
1209 |
|
Data->lambda16 = iQuant; |
1210 |
|
Data->lambda8 = pParam->m_quant_type; |
1211 |
|
|
1212 |
|
v = Data->qpel ? Data->currentQMV : Data->currentMV; |
1213 |
|
for (i = 0; i < 5; i++) { |
1214 |
|
Data->iMinSAD[i] = 256*4096; |
1215 |
|
backup[i] = v[i]; |
1216 |
|
} |
1217 |
|
|
1218 |
|
bits = CountMBBitsInter(Data, pMBs, x, y, pParam, MotionFlags); |
1219 |
|
if (bits == 0) return MODE_INTER; // quick stop |
1220 |
|
|
1221 |
|
if (inter4v) { |
1222 |
|
int bits_inter4v = CountMBBitsInter4v(Data, pMB, pMBs, x, y, pParam, MotionFlags, backup); |
1223 |
|
if (bits_inter4v < bits) { Data->iMinSAD[0] = bits = bits_inter4v; mode = MODE_INTER4V; } |
1224 |
|
} |
1225 |
|
|
1226 |
|
|
1227 |
|
intra = CountMBBitsIntra(Data); |
1228 |
|
|
1229 |
|
if (intra < bits) { *Data->iMinSAD = bits = intra; return MODE_INTRA; } |
1230 |
|
|
1231 |
|
return mode; |
1232 |
|
} |
1233 |
|
} |
1234 |
|
|
1235 |
static void |
static void |
1236 |
SearchP(const IMAGE * const pRef, |
SearchP(const IMAGE * const pRef, |
1237 |
const uint8_t * const pRefH, |
const uint8_t * const pRefH, |
1241 |
const int x, |
const int x, |
1242 |
const int y, |
const int y, |
1243 |
const uint32_t MotionFlags, |
const uint32_t MotionFlags, |
1244 |
|
const uint32_t GlobalFlags, |
1245 |
const uint32_t iQuant, |
const uint32_t iQuant, |
1246 |
SearchData * const Data, |
SearchData * const Data, |
1247 |
const MBParam * const pParam, |
const MBParam * const pParam, |
1265 |
Data->CurV = pCur->v + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
Data->CurV = pCur->v + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
1266 |
Data->CurU = pCur->u + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
Data->CurU = pCur->u + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
1267 |
|
|
1268 |
Data->Ref = pRef->y + (x + Data->iEdgedWidth*y) * 16*i; |
Data->RefP[0] = pRef->y + (x + Data->iEdgedWidth*y) * 16*i; |
1269 |
Data->RefH = pRefH + (x + Data->iEdgedWidth*y) * 16*i; |
Data->RefP[2] = pRefH + (x + Data->iEdgedWidth*y) * 16*i; |
1270 |
Data->RefV = pRefV + (x + Data->iEdgedWidth*y) * 16*i; |
Data->RefP[1] = pRefV + (x + Data->iEdgedWidth*y) * 16*i; |
1271 |
Data->RefHV = pRefHV + (x + Data->iEdgedWidth*y) * 16*i; |
Data->RefP[3] = pRefHV + (x + Data->iEdgedWidth*y) * 16*i; |
1272 |
Data->RefCV = pRef->v + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
Data->RefP[4] = pRef->u + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
1273 |
Data->RefCU = pRef->u + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
Data->RefP[5] = pRef->v + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
1274 |
|
|
1275 |
Data->lambda16 = lambda_vec16[iQuant]; |
Data->lambda16 = lambda_vec16[iQuant]; |
1276 |
Data->lambda8 = lambda_vec8[iQuant]; |
Data->lambda8 = lambda_vec8[iQuant]; |
1278 |
|
|
1279 |
if (pMB->dquant != NO_CHANGE) inter4v = 0; |
if (pMB->dquant != NO_CHANGE) inter4v = 0; |
1280 |
|
|
1281 |
for(i = 0; i < 5; i++) |
memset(Data->currentMV, 0, 5*sizeof(VECTOR)); |
|
Data->currentMV[i].x = Data->currentMV[i].y = 0; |
|
1282 |
|
|
1283 |
if (Data->qpel) Data->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
if (Data->qpel) Data->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
1284 |
else Data->predMV = pmv[0]; |
else Data->predMV = pmv[0]; |
1290 |
Data->iMinSAD[3] = pMB->sad8[2]; |
Data->iMinSAD[3] = pMB->sad8[2]; |
1291 |
Data->iMinSAD[4] = pMB->sad8[3]; |
Data->iMinSAD[4] = pMB->sad8[3]; |
1292 |
|
|
1293 |
if (x | y) { |
if ((!(GlobalFlags & XVID_MODEDECISION_BITS)) || (x | y)) { |
1294 |
threshA = Data->temp[0]; // that's when we keep this SAD atm |
threshA = Data->temp[0]; // that's where we keep this SAD atm |
1295 |
if (threshA < 512) threshA = 512; |
if (threshA < 512) threshA = 512; |
1296 |
else if (threshA > 1024) threshA = 1024; |
else if (threshA > 1024) threshA = 1024; |
1297 |
} else threshA = 512; |
} else |
1298 |
|
threshA = 512; |
1299 |
|
|
1300 |
PreparePredictionsP(pmv, x, y, pParam->mb_width, pParam->mb_height, |
PreparePredictionsP(pmv, x, y, pParam->mb_width, pParam->mb_height, |
1301 |
prevMBs + x + y * pParam->mb_width, Data->rrv); |
prevMBs + x + y * pParam->mb_width, Data->rrv); |
1309 |
|
|
1310 |
for (i = 1; i < 7; i++) { |
for (i = 1; i < 7; i++) { |
1311 |
if (!(mask = make_mask(pmv, i)) ) continue; |
if (!(mask = make_mask(pmv, i)) ) continue; |
1312 |
(*CheckCandidate)(pmv[i].x, pmv[i].y, mask, &iDirection, Data); |
CheckCandidate(pmv[i].x, pmv[i].y, mask, &iDirection, Data); |
1313 |
if (Data->iMinSAD[0] <= threshA) break; |
if (Data->iMinSAD[0] <= threshA) break; |
1314 |
} |
} |
1315 |
|
|
1316 |
if ((Data->iMinSAD[0] <= threshA) || |
if ((Data->iMinSAD[0] <= threshA) || |
1317 |
(MVequal(Data->currentMV[0], (prevMBs+x+y*pParam->mb_width)->mvs[0]) && |
(MVequal(Data->currentMV[0], (prevMBs+x+y*pParam->mb_width)->mvs[0]) && |
1318 |
(Data->iMinSAD[0] < (prevMBs+x+y*pParam->mb_width)->sad16))) |
(Data->iMinSAD[0] < (prevMBs+x+y*pParam->mb_width)->sad16))) { |
1319 |
inter4v = 0; |
if (!(GlobalFlags & XVID_MODEDECISION_BITS)) inter4v = 0; } |
1320 |
else { |
else { |
1321 |
|
|
1322 |
MainSearchFunc * MainSearchPtr; |
MainSearchFunc * MainSearchPtr; |
1324 |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
1325 |
else MainSearchPtr = DiamondSearch; |
else MainSearchPtr = DiamondSearch; |
1326 |
|
|
1327 |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, iDirection); |
MainSearchPtr(Data->currentMV->x, Data->currentMV->y, Data, iDirection); |
1328 |
|
|
1329 |
/* extended search, diamond starting in 0,0 and in prediction. |
/* extended search, diamond starting in 0,0 and in prediction. |
1330 |
note that this search is/might be done in halfpel positions, |
note that this search is/might be done in halfpel positions, |
1336 |
if (Data->rrv) { |
if (Data->rrv) { |
1337 |
startMV.x = RRV_MV_SCALEUP(startMV.x); |
startMV.x = RRV_MV_SCALEUP(startMV.x); |
1338 |
startMV.y = RRV_MV_SCALEUP(startMV.y); |
startMV.y = RRV_MV_SCALEUP(startMV.y); |
1339 |
} else |
} |
|
if (!(MotionFlags & PMV_HALFPELREFINE16)) // who's gonna use extsearch and no halfpel? |
|
|
startMV.x = EVEN(startMV.x); startMV.y = EVEN(startMV.y); |
|
1340 |
if (!(MVequal(startMV, backupMV))) { |
if (!(MVequal(startMV, backupMV))) { |
1341 |
bSAD = Data->iMinSAD[0]; Data->iMinSAD[0] = MV_MAX_ERROR; |
bSAD = Data->iMinSAD[0]; Data->iMinSAD[0] = MV_MAX_ERROR; |
1342 |
|
|
1343 |
(*CheckCandidate)(startMV.x, startMV.y, 255, &iDirection, Data); |
CheckCandidate(startMV.x, startMV.y, 255, &iDirection, Data); |
1344 |
(*MainSearchPtr)(startMV.x, startMV.y, Data, 255); |
MainSearchPtr(startMV.x, startMV.y, Data, 255); |
1345 |
if (bSAD < Data->iMinSAD[0]) { |
if (bSAD < Data->iMinSAD[0]) { |
1346 |
Data->currentMV[0] = backupMV; |
Data->currentMV[0] = backupMV; |
1347 |
Data->iMinSAD[0] = bSAD; } |
Data->iMinSAD[0] = bSAD; } |
1348 |
} |
} |
1349 |
|
|
1350 |
backupMV = Data->currentMV[0]; |
backupMV = Data->currentMV[0]; |
1351 |
if (MotionFlags & PMV_HALFPELREFINE16 && !Data->rrv) startMV.x = startMV.y = 1; |
startMV.x = startMV.y = 1; |
|
else startMV.x = startMV.y = 0; |
|
1352 |
if (!(MVequal(startMV, backupMV))) { |
if (!(MVequal(startMV, backupMV))) { |
1353 |
bSAD = Data->iMinSAD[0]; Data->iMinSAD[0] = MV_MAX_ERROR; |
bSAD = Data->iMinSAD[0]; Data->iMinSAD[0] = MV_MAX_ERROR; |
1354 |
|
|
1355 |
(*CheckCandidate)(startMV.x, startMV.y, 255, &iDirection, Data); |
CheckCandidate(startMV.x, startMV.y, 255, &iDirection, Data); |
1356 |
(*MainSearchPtr)(startMV.x, startMV.y, Data, 255); |
MainSearchPtr(startMV.x, startMV.y, Data, 255); |
1357 |
if (bSAD < Data->iMinSAD[0]) { |
if (bSAD < Data->iMinSAD[0]) { |
1358 |
Data->currentMV[0] = backupMV; |
Data->currentMV[0] = backupMV; |
1359 |
Data->iMinSAD[0] = bSAD; } |
Data->iMinSAD[0] = bSAD; } |
1361 |
} |
} |
1362 |
} |
} |
1363 |
|
|
1364 |
if (MotionFlags & PMV_HALFPELREFINE16) SubpelRefine(Data); |
if (MotionFlags & PMV_HALFPELREFINE16) |
1365 |
|
if ((!(MotionFlags & HALFPELREFINE16_BITS)) || Data->iMinSAD[0] < 200*(int)iQuant) |
1366 |
|
SubpelRefine(Data); |
1367 |
|
|
1368 |
for(i = 0; i < 5; i++) { |
for(i = 0; i < 5; i++) { |
1369 |
Data->currentQMV[i].x = 2 * Data->currentMV[i].x; // initialize qpel vectors |
Data->currentQMV[i].x = 2 * Data->currentMV[i].x; // initialize qpel vectors |
1370 |
Data->currentQMV[i].y = 2 * Data->currentMV[i].y; |
Data->currentQMV[i].y = 2 * Data->currentMV[i].y; |
1371 |
} |
} |
1372 |
|
|
1373 |
if (Data->qpel && MotionFlags & PMV_QUARTERPELREFINE16) { |
if (MotionFlags & PMV_QUARTERPELREFINE16) { |
1374 |
Data->qpel_precision = 1; |
|
1375 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
1376 |
pParam->width, pParam->height, Data->iFcode, 1, 0); |
pParam->width, pParam->height, Data->iFcode, 1, 0); |
1377 |
|
|
1378 |
|
if ((!(MotionFlags & QUARTERPELREFINE16_BITS)) || (Data->iMinSAD[0] < 200*(int)iQuant)) { |
1379 |
|
Data->qpel_precision = 1; |
1380 |
SubpelRefine(Data); |
SubpelRefine(Data); |
1381 |
} |
} |
1382 |
|
} |
1383 |
|
|
1384 |
|
if ((!(GlobalFlags & XVID_MODEDECISION_BITS)) && (Data->iMinSAD[0] < (int32_t)iQuant * 30)) inter4v = 0; |
1385 |
|
|
1386 |
|
if (inter4v && (!(GlobalFlags & XVID_MODEDECISION_BITS) || |
1387 |
|
(!(MotionFlags & QUARTERPELREFINE8_BITS)) || (!(MotionFlags & HALFPELREFINE8_BITS)) || |
1388 |
|
((!(MotionFlags & EXTSEARCH_BITS)) && (!(MotionFlags&PMV_EXTSEARCH8)) ))) { |
1389 |
|
// if decision is BITS-based and all refinement steps will be done in BITS domain, there is no reason to call this loop |
1390 |
|
|
|
if (Data->iMinSAD[0] < (int32_t)iQuant * 30) inter4v = 0; |
|
|
if (inter4v) { |
|
1391 |
SearchData Data8; |
SearchData Data8; |
1392 |
memcpy(&Data8, Data, sizeof(SearchData)); //quick copy of common data |
memcpy(&Data8, Data, sizeof(SearchData)); //quick copy of common data |
1393 |
|
|
1396 |
Search8(Data, 2*x, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 2, &Data8); |
Search8(Data, 2*x, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 2, &Data8); |
1397 |
Search8(Data, 2*x + 1, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 3, &Data8); |
Search8(Data, 2*x + 1, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 3, &Data8); |
1398 |
|
|
1399 |
if (Data->chroma) { |
if ((Data->chroma) && (!(GlobalFlags & XVID_MODEDECISION_BITS))) { |
1400 |
|
// chroma is only used for comparsion to INTER. if the comparsion will be done in BITS domain, there is no reason to compute it |
1401 |
int sumx = 0, sumy = 0; |
int sumx = 0, sumy = 0; |
1402 |
const int div = 1 + Data->qpel; |
const int div = 1 + Data->qpel; |
1403 |
const VECTOR * const mv = Data->qpel ? pMB->qmvs : pMB->mvs; |
const VECTOR * const mv = Data->qpel ? pMB->qmvs : pMB->mvs; |
1412 |
} |
} |
1413 |
} |
} |
1414 |
|
|
1415 |
|
inter4v = ModeDecision(iQuant, Data, inter4v, pMB, pMBs, x, y, pParam, MotionFlags, GlobalFlags); |
1416 |
|
|
1417 |
if (Data->rrv) { |
if (Data->rrv) { |
1418 |
Data->currentMV[0].x = RRV_MV_SCALEDOWN(Data->currentMV[0].x); |
Data->currentMV[0].x = RRV_MV_SCALEDOWN(Data->currentMV[0].x); |
1419 |
Data->currentMV[0].y = RRV_MV_SCALEDOWN(Data->currentMV[0].y); |
Data->currentMV[0].y = RRV_MV_SCALEDOWN(Data->currentMV[0].y); |
1420 |
} |
} |
1421 |
|
|
1422 |
if (!(inter4v) || |
if (inter4v == MODE_INTER) { |
|
(Data->iMinSAD[0] < Data->iMinSAD[1] + Data->iMinSAD[2] + |
|
|
Data->iMinSAD[3] + Data->iMinSAD[4] + IMV16X16 * (int32_t)iQuant )) { |
|
|
// INTER MODE |
|
1423 |
pMB->mode = MODE_INTER; |
pMB->mode = MODE_INTER; |
1424 |
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = Data->currentMV[0]; |
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = Data->currentMV[0]; |
1425 |
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = pMB->sad8[2] = pMB->sad8[3] = Data->iMinSAD[0]; |
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = pMB->sad8[2] = pMB->sad8[3] = Data->iMinSAD[0]; |
1433 |
pMB->pmvs[0].x = Data->currentMV[0].x - Data->predMV.x; |
pMB->pmvs[0].x = Data->currentMV[0].x - Data->predMV.x; |
1434 |
pMB->pmvs[0].y = Data->currentMV[0].y - Data->predMV.y; |
pMB->pmvs[0].y = Data->currentMV[0].y - Data->predMV.y; |
1435 |
} |
} |
1436 |
} else { |
|
1437 |
// INTER4V MODE; all other things are already set in Search8 |
} else if (inter4v == MODE_INTER4V) { |
1438 |
pMB->mode = MODE_INTER4V; |
pMB->mode = MODE_INTER4V; |
1439 |
pMB->sad16 = Data->iMinSAD[1] + Data->iMinSAD[2] + |
pMB->sad16 = Data->iMinSAD[0]; |
1440 |
Data->iMinSAD[3] + Data->iMinSAD[4] + IMV16X16 * iQuant; |
} else { // INTRA mode |
1441 |
|
SkipMacroblockP(pMB, 0); // not skip, but similar enough |
1442 |
|
pMB->mode = MODE_INTRA; |
1443 |
} |
} |
1444 |
|
|
1445 |
} |
} |
1446 |
|
|
1447 |
static void |
static void |
1472 |
*(Data->iMinSAD) += (Data->lambda8 * i * (*Data->iMinSAD + NEIGH_8X8_BIAS))>>10; |
*(Data->iMinSAD) += (Data->lambda8 * i * (*Data->iMinSAD + NEIGH_8X8_BIAS))>>10; |
1473 |
|
|
1474 |
if (MotionFlags & (PMV_EXTSEARCH8|PMV_HALFPELREFINE8|PMV_QUARTERPELREFINE8)) { |
if (MotionFlags & (PMV_EXTSEARCH8|PMV_HALFPELREFINE8|PMV_QUARTERPELREFINE8)) { |
|
if (Data->rrv) i = 2; else i = 1; |
|
1475 |
|
|
1476 |
Data->Ref = OldData->Ref + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
if (Data->rrv) i = 16; else i = 8; |
1477 |
Data->RefH = OldData->RefH + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
|
1478 |
Data->RefV = OldData->RefV + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
Data->RefP[0] = OldData->RefP[0] + i * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1479 |
Data->RefHV = OldData->RefHV + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
Data->RefP[1] = OldData->RefP[1] + i * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1480 |
|
Data->RefP[2] = OldData->RefP[2] + i * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1481 |
|
Data->RefP[3] = OldData->RefP[3] + i * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1482 |
|
|
1483 |
Data->Cur = OldData->Cur + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
Data->Cur = OldData->Cur + i * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1484 |
Data->qpel_precision = 0; |
Data->qpel_precision = 0; |
1485 |
|
|
1486 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
1489 |
if (!Data->rrv) CheckCandidate = CheckCandidate8; |
if (!Data->rrv) CheckCandidate = CheckCandidate8; |
1490 |
else CheckCandidate = CheckCandidate16no4v; |
else CheckCandidate = CheckCandidate16no4v; |
1491 |
|
|
1492 |
if (MotionFlags & PMV_EXTSEARCH8) { |
if (MotionFlags & PMV_EXTSEARCH8 && (!(MotionFlags & EXTSEARCH_BITS))) { |
1493 |
int32_t temp_sad = *(Data->iMinSAD); // store current MinSAD |
int32_t temp_sad = *(Data->iMinSAD); // store current MinSAD |
1494 |
|
|
1495 |
MainSearchFunc *MainSearchPtr; |
MainSearchFunc *MainSearchPtr; |
1497 |
else if (MotionFlags & PMV_ADVANCEDDIAMOND8) MainSearchPtr = AdvDiamondSearch; |
else if (MotionFlags & PMV_ADVANCEDDIAMOND8) MainSearchPtr = AdvDiamondSearch; |
1498 |
else MainSearchPtr = DiamondSearch; |
else MainSearchPtr = DiamondSearch; |
1499 |
|
|
1500 |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, 255); |
MainSearchPtr(Data->currentMV->x, Data->currentMV->y, Data, 255); |
1501 |
|
|
1502 |
if(*(Data->iMinSAD) < temp_sad) { |
if(*(Data->iMinSAD) < temp_sad) { |
1503 |
Data->currentQMV->x = 2 * Data->currentMV->x; // update our qpel vector |
Data->currentQMV->x = 2 * Data->currentMV->x; // update our qpel vector |
1614 |
Data->qpel_precision = 0; |
Data->qpel_precision = 0; |
1615 |
Data->temp[5] = Data->temp[6] = Data->temp[7] = 256*4096; // reset chroma-sad cache |
Data->temp[5] = Data->temp[6] = Data->temp[7] = 256*4096; // reset chroma-sad cache |
1616 |
|
|
1617 |
Data->Ref = pRef->y + (x + y * Data->iEdgedWidth) * 16; |
Data->RefP[0] = pRef->y + (x + Data->iEdgedWidth*y) * 16; |
1618 |
Data->RefH = pRefH + (x + y * Data->iEdgedWidth) * 16; |
Data->RefP[2] = pRefH + (x + Data->iEdgedWidth*y) * 16; |
1619 |
Data->RefV = pRefV + (x + y * Data->iEdgedWidth) * 16; |
Data->RefP[1] = pRefV + (x + Data->iEdgedWidth*y) * 16; |
1620 |
Data->RefHV = pRefHV + (x + y * Data->iEdgedWidth) * 16; |
Data->RefP[3] = pRefHV + (x + Data->iEdgedWidth*y) * 16; |
1621 |
Data->RefCU = pRef->u + (x + y * Data->iEdgedWidth/2) * 8; |
Data->RefP[4] = pRef->u + (x + y * (Data->iEdgedWidth/2)) * 8; |
1622 |
Data->RefCV = pRef->v + (x + y * Data->iEdgedWidth/2) * 8; |
Data->RefP[5] = pRef->v + (x + y * (Data->iEdgedWidth/2)) * 8; |
1623 |
|
|
1624 |
Data->predMV = *predMV; |
Data->predMV = *predMV; |
1625 |
|
|
1644 |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
1645 |
else MainSearchPtr = DiamondSearch; |
else MainSearchPtr = DiamondSearch; |
1646 |
|
|
1647 |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, iDirection); |
MainSearchPtr(Data->currentMV->x, Data->currentMV->y, Data, iDirection); |
1648 |
|
|
1649 |
SubpelRefine(Data); |
SubpelRefine(Data); |
1650 |
|
|
1701 |
|
|
1702 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
1703 |
dy += Data->directmvF[k].y / div; |
dy += Data->directmvF[k].y / div; |
1704 |
dx += Data->directmvF[0].x / div; |
dx += Data->directmvF[k].x / div; |
1705 |
b_dy += Data->directmvB[0].y / div; |
b_dy += Data->directmvB[k].y / div; |
1706 |
b_dx += Data->directmvB[0].x / div; |
b_dx += Data->directmvB[k].x / div; |
1707 |
} |
} |
1708 |
|
|
1709 |
dy = (dy >> 3) + roundtab_76[dy & 0xf]; |
dy = (dy >> 3) + roundtab_76[dy & 0xf]; |
1723 |
b_Ref->v + (y*8 + b_dy/2) * stride + x*8 + b_dx/2, |
b_Ref->v + (y*8 + b_dy/2) * stride + x*8 + b_dx/2, |
1724 |
stride); |
stride); |
1725 |
|
|
1726 |
if (sum < 2 * MAX_CHROMA_SAD_FOR_SKIP * pMB->quant) pMB->mode = MODE_DIRECT_NONE_MV; //skipped |
if (sum < 2 * MAX_CHROMA_SAD_FOR_SKIP * pMB->quant) { |
1727 |
|
pMB->mode = MODE_DIRECT_NONE_MV; //skipped |
1728 |
|
for (k = 0; k < 4; k++) { |
1729 |
|
pMB->qmvs[k] = pMB->mvs[k]; |
1730 |
|
pMB->b_qmvs[k] = pMB->b_mvs[k]; |
1731 |
|
} |
1732 |
|
} |
1733 |
} |
} |
1734 |
|
|
1735 |
static __inline uint32_t |
static __inline uint32_t |
1757 |
MainSearchFunc *MainSearchPtr; |
MainSearchFunc *MainSearchPtr; |
1758 |
|
|
1759 |
*Data->iMinSAD = 256*4096; |
*Data->iMinSAD = 256*4096; |
1760 |
Data->Ref = f_Ref->y + k; |
Data->RefP[0] = f_Ref->y + k; |
1761 |
Data->RefH = f_RefH + k; |
Data->RefP[2] = f_RefH + k; |
1762 |
Data->RefV = f_RefV + k; |
Data->RefP[1] = f_RefV + k; |
1763 |
Data->RefHV = f_RefHV + k; |
Data->RefP[3] = f_RefHV + k; |
1764 |
Data->bRef = b_Ref->y + k; |
Data->b_RefP[0] = b_Ref->y + k; |
1765 |
Data->bRefH = b_RefH + k; |
Data->b_RefP[2] = b_RefH + k; |
1766 |
Data->bRefV = b_RefV + k; |
Data->b_RefP[1] = b_RefV + k; |
1767 |
Data->bRefHV = b_RefHV + k; |
Data->b_RefP[3] = b_RefHV + k; |
1768 |
Data->RefCU = f_Ref->u + (x + (Data->iEdgedWidth/2) * y) * 8; |
Data->RefP[4] = f_Ref->u + (x + (Data->iEdgedWidth/2) * y) * 8; |
1769 |
Data->RefCV = f_Ref->v + (x + (Data->iEdgedWidth/2) * y) * 8; |
Data->RefP[5] = f_Ref->v + (x + (Data->iEdgedWidth/2) * y) * 8; |
1770 |
Data->b_RefCU = b_Ref->u + (x + (Data->iEdgedWidth/2) * y) * 8; |
Data->b_RefP[4] = b_Ref->u + (x + (Data->iEdgedWidth/2) * y) * 8; |
1771 |
Data->b_RefCV = b_Ref->v + (x + (Data->iEdgedWidth/2) * y) * 8; |
Data->b_RefP[5] = b_Ref->v + (x + (Data->iEdgedWidth/2) * y) * 8; |
1772 |
|
|
1773 |
k = Data->qpel ? 4 : 2; |
k = Data->qpel ? 4 : 2; |
1774 |
Data->max_dx = k * (pParam->width - x * 16); |
Data->max_dx = k * (pParam->width - x * 16); |
1804 |
|
|
1805 |
CheckCandidate = b_mb->mode == MODE_INTER4V ? CheckCandidateDirect : CheckCandidateDirectno4v; |
CheckCandidate = b_mb->mode == MODE_INTER4V ? CheckCandidateDirect : CheckCandidateDirectno4v; |
1806 |
|
|
1807 |
(*CheckCandidate)(0, 0, 255, &k, Data); |
CheckCandidate(0, 0, 255, &k, Data); |
1808 |
|
|
1809 |
// initial (fast) skip decision |
// initial (fast) skip decision |
1810 |
if (*Data->iMinSAD < pMB->quant * INITIAL_SKIP_THRESH * (2 + Data->chroma?1:0)) { |
if (*Data->iMinSAD < pMB->quant * INITIAL_SKIP_THRESH * (2 + Data->chroma?1:0)) { |
1818 |
} |
} |
1819 |
} |
} |
1820 |
|
|
1821 |
|
*Data->iMinSAD += Data->lambda16; |
1822 |
skip_sad = *Data->iMinSAD; |
skip_sad = *Data->iMinSAD; |
1823 |
|
|
1824 |
// DIRECT MODE DELTA VECTOR SEARCH. |
// DIRECT MODE DELTA VECTOR SEARCH. |
1828 |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
1829 |
else MainSearchPtr = DiamondSearch; |
else MainSearchPtr = DiamondSearch; |
1830 |
|
|
1831 |
(*MainSearchPtr)(0, 0, Data, 255); |
MainSearchPtr(0, 0, Data, 255); |
1832 |
|
|
1833 |
SubpelRefine(Data); |
SubpelRefine(Data); |
1834 |
|
|
1899 |
fData->iFcode = bData.bFcode = fcode; fData->bFcode = bData.iFcode = bcode; |
fData->iFcode = bData.bFcode = fcode; fData->bFcode = bData.iFcode = bcode; |
1900 |
|
|
1901 |
i = (x + y * fData->iEdgedWidth) * 16; |
i = (x + y * fData->iEdgedWidth) * 16; |
|
bData.bRef = fData->Ref = f_Ref->y + i; |
|
|
bData.bRefH = fData->RefH = f_RefH + i; |
|
|
bData.bRefV = fData->RefV = f_RefV + i; |
|
|
bData.bRefHV = fData->RefHV = f_RefHV + i; |
|
|
bData.Ref = fData->bRef = b_Ref->y + i; |
|
|
bData.RefH = fData->bRefH = b_RefH + i; |
|
|
bData.RefV = fData->bRefV = b_RefV + i; |
|
|
bData.RefHV = fData->bRefHV = b_RefHV + i; |
|
|
bData.b_RefCU = fData->RefCU = f_Ref->u + (x + (fData->iEdgedWidth/2) * y) * 8; |
|
|
bData.b_RefCV = fData->RefCV = f_Ref->v + (x + (fData->iEdgedWidth/2) * y) * 8; |
|
|
bData.RefCU = fData->b_RefCU = b_Ref->u + (x + (fData->iEdgedWidth/2) * y) * 8; |
|
|
bData.RefCV = fData->b_RefCV = b_Ref->v + (x + (fData->iEdgedWidth/2) * y) * 8; |
|
1902 |
|
|
1903 |
|
bData.b_RefP[0] = fData->RefP[0] = f_Ref->y + i; |
1904 |
|
bData.b_RefP[2] = fData->RefP[2] = f_RefH + i; |
1905 |
|
bData.b_RefP[1] = fData->RefP[1] = f_RefV + i; |
1906 |
|
bData.b_RefP[3] = fData->RefP[3] = f_RefHV + i; |
1907 |
|
bData.RefP[0] = fData->b_RefP[0] = b_Ref->y + i; |
1908 |
|
bData.RefP[2] = fData->b_RefP[2] = b_RefH + i; |
1909 |
|
bData.RefP[1] = fData->b_RefP[1] = b_RefV + i; |
1910 |
|
bData.RefP[3] = fData->b_RefP[3] = b_RefHV + i; |
1911 |
|
bData.b_RefP[4] = fData->RefP[4] = f_Ref->u + (x + (fData->iEdgedWidth/2) * y) * 8; |
1912 |
|
bData.b_RefP[5] = fData->RefP[5] = f_Ref->v + (x + (fData->iEdgedWidth/2) * y) * 8; |
1913 |
|
bData.RefP[4] = fData->b_RefP[4] = b_Ref->u + (x + (fData->iEdgedWidth/2) * y) * 8; |
1914 |
|
bData.RefP[5] = fData->b_RefP[5] = b_Ref->v + (x + (fData->iEdgedWidth/2) * y) * 8; |
1915 |
|
|
1916 |
bData.bpredMV = fData->predMV = *f_predMV; |
bData.bpredMV = fData->predMV = *f_predMV; |
1917 |
fData->bpredMV = bData.predMV = *b_predMV; |
fData->bpredMV = bData.predMV = *b_predMV; |
2145 |
|
|
2146 |
int i, mask; |
int i, mask; |
2147 |
VECTOR pmv[3]; |
VECTOR pmv[3]; |
2148 |
MACROBLOCK * pMB = &pMBs[x + y * pParam->mb_width]; |
MACROBLOCK * const pMB = &pMBs[x + y * pParam->mb_width]; |
2149 |
|
|
2150 |
for (i = 0; i < 5; i++) Data->iMinSAD[i] = MV_MAX_ERROR; |
for (i = 0; i < 5; i++) Data->iMinSAD[i] = MV_MAX_ERROR; |
2151 |
|
|
2159 |
else Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, 0); //else median |
else Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, 0); //else median |
2160 |
|
|
2161 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
2162 |
pParam->width, pParam->height, Data->iFcode - pParam->m_quarterpel, 0, Data->rrv); |
pParam->width, pParam->height, Data->iFcode - pParam->m_quarterpel, 0, 0); |
2163 |
|
|
2164 |
Data->Cur = pCur + (x + y * pParam->edged_width) * 16; |
Data->Cur = pCur + (x + y * pParam->edged_width) * 16; |
2165 |
Data->Ref = pRef + (x + y * pParam->edged_width) * 16; |
Data->RefP[0] = pRef + (x + y * pParam->edged_width) * 16; |
2166 |
|
|
2167 |
pmv[1].x = EVEN(pMB->mvs[0].x); |
pmv[1].x = EVEN(pMB->mvs[0].x); |
2168 |
pmv[1].y = EVEN(pMB->mvs[0].y); |
pmv[1].y = EVEN(pMB->mvs[0].y); |
2172 |
|
|
2173 |
CheckCandidate32I(0, 0, 255, &i, Data); |
CheckCandidate32I(0, 0, 255, &i, Data); |
2174 |
|
|
2175 |
if (*Data->iMinSAD > 4 * MAX_SAD00_FOR_SKIP * 4) { |
if (*Data->iMinSAD > 4 * MAX_SAD00_FOR_SKIP) { |
2176 |
|
|
2177 |
if (!(mask = make_mask(pmv, 1))) |
if (!(mask = make_mask(pmv, 1))) |
2178 |
CheckCandidate32I(pmv[1].x, pmv[1].y, mask, &i, Data); |
CheckCandidate32I(pmv[1].x, pmv[1].y, mask, &i, Data); |
2179 |
if (!(mask = make_mask(pmv, 2))) |
if (!(mask = make_mask(pmv, 2))) |
2180 |
CheckCandidate32I(pmv[2].x, pmv[2].y, mask, &i, Data); |
CheckCandidate32I(pmv[2].x, pmv[2].y, mask, &i, Data); |
2181 |
|
|
2182 |
if (*Data->iMinSAD > 4 * MAX_SAD00_FOR_SKIP * 4) // diamond only if needed |
if (*Data->iMinSAD > 4 * MAX_SAD00_FOR_SKIP) // diamond only if needed |
2183 |
DiamondSearch(Data->currentMV->x, Data->currentMV->y, Data, i); |
DiamondSearch(Data->currentMV->x, Data->currentMV->y, Data, i); |
2184 |
|
} |
2185 |
|
|
2186 |
for (i = 0; i < 4; i++) { |
for (i = 0; i < 4; i++) { |
2187 |
MACROBLOCK * MB = &pMBs[x + (i&1) + (y+(i>>1)) * pParam->mb_width]; |
MACROBLOCK * MB = &pMBs[x + (i&1) + (y+(i>>1)) * pParam->mb_width]; |
2190 |
MB->sad16 = Data->iMinSAD[i+1]; |
MB->sad16 = Data->iMinSAD[i+1]; |
2191 |
} |
} |
2192 |
} |
} |
|
} |
|
2193 |
|
|
2194 |
#define INTRA_BIAS 2500 |
#define INTRA_THRESH 2400 |
2195 |
#define INTRA_THRESH 1500 |
#define INTER_THRESH 1300 |
|
#define INTER_THRESH 1400 |
|
2196 |
|
|
2197 |
int |
int |
2198 |
MEanalysis( const IMAGE * const pRef, |
MEanalysis( const IMAGE * const pRef, |
2199 |
FRAMEINFO * const Current, |
const FRAMEINFO * const Current, |
2200 |
MBParam * const pParam, |
const MBParam * const pParam, |
2201 |
int maxIntra, //maximum number if non-I frames |
const int maxIntra, //maximum number if non-I frames |
2202 |
int intraCount, //number of non-I frames after last I frame; 0 if we force P/B frame |
const int intraCount, //number of non-I frames after last I frame; 0 if we force P/B frame |
2203 |
int bCount) // number of B frames in a row |
const int bCount, // number of B frames in a row |
2204 |
|
const int b_thresh) |
2205 |
{ |
{ |
2206 |
uint32_t x, y, intra = 0; |
uint32_t x, y, intra = 0; |
2207 |
int sSAD = 0; |
int sSAD = 0; |
2208 |
MACROBLOCK * const pMBs = Current->mbs; |
MACROBLOCK * const pMBs = Current->mbs; |
2209 |
const IMAGE * const pCurrent = &Current->image; |
const IMAGE * const pCurrent = &Current->image; |
2210 |
int IntraThresh = INTRA_THRESH, InterThresh = INTER_THRESH; |
int IntraThresh = INTRA_THRESH, InterThresh = INTER_THRESH + 10*b_thresh; |
2211 |
|
int s = 0, blocks = 0; |
2212 |
|
|
2213 |
int32_t iMinSAD[5], temp[5]; |
int32_t iMinSAD[5], temp[5]; |
2214 |
VECTOR currentMV[5]; |
VECTOR currentMV[5]; |
2217 |
Data.currentMV = currentMV; |
Data.currentMV = currentMV; |
2218 |
Data.iMinSAD = iMinSAD; |
Data.iMinSAD = iMinSAD; |
2219 |
Data.iFcode = Current->fcode; |
Data.iFcode = Current->fcode; |
|
Data.rrv = Current->global_flags & XVID_REDUCED; |
|
2220 |
Data.temp = temp; |
Data.temp = temp; |
2221 |
CheckCandidate = CheckCandidate32I; |
CheckCandidate = CheckCandidate32I; |
2222 |
|
|
2223 |
if (intraCount != 0 && intraCount < 10) // we're right after an I frame |
if (intraCount != 0 && intraCount < 10) // we're right after an I frame |
2224 |
IntraThresh += 4 * (intraCount - 10) * (intraCount - 10); |
IntraThresh += 8 * (intraCount - 10) * (intraCount - 10); |
2225 |
else |
else |
2226 |
if ( 5*(maxIntra - intraCount) < maxIntra) // we're close to maximum. 2 sec when max is 10 sec |
if ( 5*(maxIntra - intraCount) < maxIntra) // we're close to maximum. 2 sec when max is 10 sec |
2227 |
IntraThresh -= (IntraThresh * (maxIntra - 5*(maxIntra - intraCount)))/maxIntra; |
IntraThresh -= (IntraThresh * (maxIntra - 5*(maxIntra - intraCount)))/maxIntra; |
2228 |
|
|
2229 |
InterThresh += 400 * (1 - bCount); |
InterThresh -= (350 - 8*b_thresh) * bCount; |
2230 |
if (InterThresh < 300) InterThresh = 300; |
if (InterThresh < 300 + 5*b_thresh) InterThresh = 300 + 5*b_thresh; |
2231 |
|
|
2232 |
if (sadInit) (*sadInit) (); |
if (sadInit) (*sadInit) (); |
2233 |
|
|
2234 |
for (y = 1; y < pParam->mb_height-1; y += 2) { |
for (y = 1; y < pParam->mb_height-1; y += 2) { |
2235 |
for (x = 1; x < pParam->mb_width-1; x += 2) { |
for (x = 1; x < pParam->mb_width-1; x += 2) { |
2236 |
int i; |
int i; |
2237 |
|
blocks += 4; |
2238 |
|
|
2239 |
if (bCount == 0) pMBs[x + y * pParam->mb_width].mvs[0] = zeroMV; |
if (bCount == 0) pMBs[x + y * pParam->mb_width].mvs[0] = zeroMV; |
2240 |
|
else { //extrapolation of the vector found for last frame |
2241 |
|
pMBs[x + y * pParam->mb_width].mvs[0].x = |
2242 |
|
(pMBs[x + y * pParam->mb_width].mvs[0].x * (bCount+1) ) / bCount; |
2243 |
|
pMBs[x + y * pParam->mb_width].mvs[0].y = |
2244 |
|
(pMBs[x + y * pParam->mb_width].mvs[0].y * (bCount+1) ) / bCount; |
2245 |
|
} |
2246 |
|
|
2247 |
MEanalyzeMB(pRef->y, pCurrent->y, x, y, pParam, pMBs, &Data); |
MEanalyzeMB(pRef->y, pCurrent->y, x, y, pParam, pMBs, &Data); |
2248 |
|
|
2254 |
pParam->edged_width); |
pParam->edged_width); |
2255 |
if (dev + IntraThresh < pMB->sad16) { |
if (dev + IntraThresh < pMB->sad16) { |
2256 |
pMB->mode = MODE_INTRA; |
pMB->mode = MODE_INTRA; |
2257 |
if (++intra > (pParam->mb_height-2)*(pParam->mb_width-2)/2) return I_VOP; |
if (++intra > ((pParam->mb_height-2)*(pParam->mb_width-2))/2) return I_VOP; |
2258 |
} |
} |
2259 |
} |
} |
2260 |
|
if (pMB->mvs[0].x == 0 && pMB->mvs[0].y == 0) s++; |
2261 |
|
|
2262 |
sSAD += pMB->sad16; |
sSAD += pMB->sad16; |
2263 |
} |
} |
2264 |
} |
} |
2265 |
} |
} |
2266 |
sSAD /= (pParam->mb_height-2)*(pParam->mb_width-2); |
|
2267 |
// if (sSAD > IntraThresh + INTRA_BIAS) return I_VOP; |
sSAD /= blocks; |
2268 |
|
s = (10*s) / blocks; |
2269 |
|
|
2270 |
|
if (s > 4) sSAD += (s - 3) * (300 - 2*b_thresh); //static block - looks bad when in bframe... |
2271 |
|
|
2272 |
if (sSAD > InterThresh ) return P_VOP; |
if (sSAD > InterThresh ) return P_VOP; |
2273 |
emms(); |
emms(); |
2274 |
return B_VOP; |
return B_VOP; |
|
|
|
2275 |
} |
} |
2276 |
|
|
2277 |
|
|
2303 |
double meanx,meany; |
double meanx,meany; |
2304 |
int num,oldnum; |
int num,oldnum; |
2305 |
|
|
2306 |
if (!MBmask) { fprintf(stderr,"Mem error\n"); return gmc;} |
if (!MBmask) { fprintf(stderr,"Mem error\n"); |
2307 |
|
gmc.duv[0].x= gmc.duv[0].y = |
2308 |
|
gmc.duv[1].x= gmc.duv[1].y = |
2309 |
|
gmc.duv[2].x= gmc.duv[2].y = 0; |
2310 |
|
return gmc; } |
2311 |
|
|
2312 |
// filter mask of all blocks |
// filter mask of all blocks |
2313 |
|
|
2314 |
for (my = 1; my < MBh-1; my++) |
for (my = 1; my < (uint32_t)MBh-1; my++) |
2315 |
for (mx = 1; mx < MBw-1; mx++) |
for (mx = 1; mx < (uint32_t)MBw-1; mx++) |
2316 |
{ |
{ |
2317 |
const int mbnum = mx + my * MBw; |
const int mbnum = mx + my * MBw; |
2318 |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
2328 |
MBmask[mbnum]=1; |
MBmask[mbnum]=1; |
2329 |
} |
} |
2330 |
|
|
2331 |
for (my = 1; my < MBh-1; my++) |
for (my = 1; my < (uint32_t)MBh-1; my++) |
2332 |
for (mx = 1; mx < MBw-1; mx++) |
for (mx = 1; mx < (uint32_t)MBw-1; mx++) |
2333 |
{ |
{ |
2334 |
const uint8_t *const pCur = current->image.y + 16*my*pParam->edged_width + 16*mx; |
const uint8_t *const pCur = current->image.y + 16*my*pParam->edged_width + 16*mx; |
2335 |
|
|
2337 |
if (!MBmask[mbnum]) |
if (!MBmask[mbnum]) |
2338 |
continue; |
continue; |
2339 |
|
|
2340 |
if (sad16 ( pCur, pCur+1 , pParam->edged_width, 65536) <= grad ) |
if (sad16 ( pCur, pCur+1 , pParam->edged_width, 65536) <= (uint32_t)grad ) |
2341 |
MBmask[mbnum] = 0; |
MBmask[mbnum] = 0; |
2342 |
if (sad16 ( pCur, pCur+pParam->edged_width, pParam->edged_width, 65536) <= grad ) |
if (sad16 ( pCur, pCur+pParam->edged_width, pParam->edged_width, 65536) <= (uint32_t)grad ) |
2343 |
MBmask[mbnum] = 0; |
MBmask[mbnum] = 0; |
2344 |
|
|
2345 |
} |
} |
2350 |
|
|
2351 |
a = b = c = n = 0; |
a = b = c = n = 0; |
2352 |
DtimesF[0] = DtimesF[1] = DtimesF[2] = DtimesF[3] = 0.; |
DtimesF[0] = DtimesF[1] = DtimesF[2] = DtimesF[3] = 0.; |
2353 |
for (my = 0; my < MBh; my++) |
for (my = 0; my < (uint32_t)MBh; my++) |
2354 |
for (mx = 0; mx < MBw; mx++) |
for (mx = 0; mx < (uint32_t)MBw; mx++) |
2355 |
{ |
{ |
2356 |
const int mbnum = mx + my * MBw; |
const int mbnum = mx + my * MBw; |
2357 |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
2388 |
|
|
2389 |
meanx = meany = 0.; |
meanx = meany = 0.; |
2390 |
oldnum = 0; |
oldnum = 0; |
2391 |
for (my = 0; my < MBh; my++) |
for (my = 0; my < (uint32_t)MBh; my++) |
2392 |
for (mx = 0; mx < MBw; mx++) |
for (mx = 0; mx < (uint32_t)MBw; mx++) |
2393 |
{ |
{ |
2394 |
const int mbnum = mx + my * MBw; |
const int mbnum = mx + my * MBw; |
2395 |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
2417 |
fprintf(stderr,"meanx = %8.5f meany = %8.5f %d\n",meanx,meany, oldnum); |
fprintf(stderr,"meanx = %8.5f meany = %8.5f %d\n",meanx,meany, oldnum); |
2418 |
*/ |
*/ |
2419 |
num = 0; |
num = 0; |
2420 |
for (my = 0; my < MBh; my++) |
for (my = 0; my < (uint32_t)MBh; my++) |
2421 |
for (mx = 0; mx < MBw; mx++) |
for (mx = 0; mx < (uint32_t)MBw; mx++) |
2422 |
{ |
{ |
2423 |
const int mbnum = mx + my * MBw; |
const int mbnum = mx + my * MBw; |
2424 |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
const MACROBLOCK *pMB = &pMBs[mbnum]; |
2456 |
|
|
2457 |
return gmc; |
return gmc; |
2458 |
} |
} |
2459 |
|
|
2460 |
|
// functions which perform BITS-based search/bitcount |
2461 |
|
|
2462 |
|
static int |
2463 |
|
CountMBBitsInter(SearchData * const Data, |
2464 |
|
const MACROBLOCK * const pMBs, const int x, const int y, |
2465 |
|
const MBParam * const pParam, |
2466 |
|
const uint32_t MotionFlags) |
2467 |
|
{ |
2468 |
|
int i, iDirection; |
2469 |
|
int32_t bsad[5]; |
2470 |
|
|
2471 |
|
CheckCandidate = CheckCandidateBits16; |
2472 |
|
|
2473 |
|
if (Data->qpel) { |
2474 |
|
for(i = 0; i < 5; i++) { |
2475 |
|
Data->currentMV[i].x = Data->currentQMV[i].x/2; |
2476 |
|
Data->currentMV[i].y = Data->currentQMV[i].y/2; |
2477 |
|
} |
2478 |
|
Data->qpel_precision = 1; |
2479 |
|
CheckCandidateBits16(Data->currentQMV[0].x, Data->currentQMV[0].y, 255, &iDirection, Data); |
2480 |
|
|
2481 |
|
//checking if this vector is perfect. if it is, we stop. |
2482 |
|
if (Data->temp[0] == 0 && Data->temp[1] == 0 && Data->temp[2] == 0 && Data->temp[3] == 0) |
2483 |
|
return 0; //quick stop |
2484 |
|
|
2485 |
|
if (MotionFlags & (HALFPELREFINE16_BITS | EXTSEARCH_BITS)) { //we have to prepare for halfpixel-precision search |
2486 |
|
for(i = 0; i < 5; i++) bsad[i] = Data->iMinSAD[i]; |
2487 |
|
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
2488 |
|
pParam->width, pParam->height, Data->iFcode - Data->qpel, 0, Data->rrv); |
2489 |
|
Data->qpel_precision = 0; |
2490 |
|
if (Data->currentQMV->x & 1 || Data->currentQMV->y & 1) |
2491 |
|
CheckCandidateBits16(Data->currentMV[0].x, Data->currentMV[0].y, 255, &iDirection, Data); |
2492 |
|
} |
2493 |
|
|
2494 |
|
} else { // not qpel |
2495 |
|
|
2496 |
|
CheckCandidateBits16(Data->currentMV[0].x, Data->currentMV[0].y, 255, &iDirection, Data); |
2497 |
|
//checking if this vector is perfect. if it is, we stop. |
2498 |
|
if (Data->temp[0] == 0 && Data->temp[1] == 0 && Data->temp[2] == 0 && Data->temp[3] == 0) { |
2499 |
|
return 0; //inter |
2500 |
|
} |
2501 |
|
} |
2502 |
|
|
2503 |
|
if (MotionFlags&EXTSEARCH_BITS) SquareSearch(Data->currentMV->x, Data->currentMV->y, Data, iDirection); |
2504 |
|
|
2505 |
|
if (MotionFlags&HALFPELREFINE16_BITS) SubpelRefine(Data); |
2506 |
|
|
2507 |
|
if (Data->qpel) { |
2508 |
|
if (MotionFlags&(EXTSEARCH_BITS | HALFPELREFINE16_BITS)) { // there was halfpel-precision search |
2509 |
|
for(i = 0; i < 5; i++) if (bsad[i] > Data->iMinSAD[i]) { |
2510 |
|
Data->currentQMV[i].x = 2 * Data->currentMV[i].x; // we have found a better match |
2511 |
|
Data->currentQMV[i].y = 2 * Data->currentMV[i].y; |
2512 |
|
} |
2513 |
|
|
2514 |
|
// preparing for qpel-precision search |
2515 |
|
Data->qpel_precision = 1; |
2516 |
|
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
2517 |
|
pParam->width, pParam->height, Data->iFcode, 1, 0); |
2518 |
|
} |
2519 |
|
if (MotionFlags&QUARTERPELREFINE16_BITS) SubpelRefine(Data); |
2520 |
|
} |
2521 |
|
|
2522 |
|
if (MotionFlags&CHECKPREDICTION_BITS) { //let's check vector equal to prediction |
2523 |
|
VECTOR * v = Data->qpel ? Data->currentQMV : Data->currentMV; |
2524 |
|
if (!(Data->predMV.x == v->x && Data->predMV.y == v->y)) |
2525 |
|
CheckCandidateBits16(Data->predMV.x, Data->predMV.y, 255, &iDirection, Data); |
2526 |
|
} |
2527 |
|
return Data->iMinSAD[0]; |
2528 |
|
} |
2529 |
|
|
2530 |
|
|
2531 |
|
static int |
2532 |
|
CountMBBitsInter4v(const SearchData * const Data, |
2533 |
|
MACROBLOCK * const pMB, const MACROBLOCK * const pMBs, |
2534 |
|
const int x, const int y, |
2535 |
|
const MBParam * const pParam, const uint32_t MotionFlags, |
2536 |
|
const VECTOR * const backup) |
2537 |
|
{ |
2538 |
|
|
2539 |
|
int cbp = 0, bits = 0, t = 0, i, iDirection; |
2540 |
|
SearchData Data2, *Data8 = &Data2; |
2541 |
|
int sumx = 0, sumy = 0; |
2542 |
|
int16_t *in = Data->dctSpace, *coeff = Data->dctSpace + 64; |
2543 |
|
|
2544 |
|
memcpy(Data8, Data, sizeof(SearchData)); |
2545 |
|
CheckCandidate = CheckCandidateBits8; |
2546 |
|
|
2547 |
|
for (i = 0; i < 4; i++) { |
2548 |
|
Data8->iMinSAD = Data->iMinSAD + i + 1; |
2549 |
|
Data8->currentMV = Data->currentMV + i + 1; |
2550 |
|
Data8->currentQMV = Data->currentQMV + i + 1; |
2551 |
|
Data8->Cur = Data->Cur + 8*((i&1) + (i>>1)*Data->iEdgedWidth); |
2552 |
|
Data8->RefP[0] = Data->RefP[0] + 8*((i&1) + (i>>1)*Data->iEdgedWidth); |
2553 |
|
Data8->RefP[2] = Data->RefP[2] + 8*((i&1) + (i>>1)*Data->iEdgedWidth); |
2554 |
|
Data8->RefP[1] = Data->RefP[1] + 8*((i&1) + (i>>1)*Data->iEdgedWidth); |
2555 |
|
Data8->RefP[3] = Data->RefP[3] + 8*((i&1) + (i>>1)*Data->iEdgedWidth); |
2556 |
|
|
2557 |
|
if(Data->qpel) { |
2558 |
|
Data8->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x, y, i); |
2559 |
|
if (i != 0) t = d_mv_bits( Data8->currentQMV->x, Data8->currentQMV->y, |
2560 |
|
Data8->predMV, Data8->iFcode, 0, 0); |
2561 |
|
} else { |
2562 |
|
Data8->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, i); |
2563 |
|
if (i != 0) t = d_mv_bits( Data8->currentMV->x, Data8->currentMV->y, |
2564 |
|
Data8->predMV, Data8->iFcode, 0, 0); |
2565 |
|
} |
2566 |
|
|
2567 |
|
get_range(&Data8->min_dx, &Data8->max_dx, &Data8->min_dy, &Data8->max_dy, 2*x + (i&1), 2*y + (i>>1), 8, |
2568 |
|
pParam->width, pParam->height, Data8->iFcode, Data8->qpel, 0); |
2569 |
|
|
2570 |
|
*Data8->iMinSAD += t; |
2571 |
|
|
2572 |
|
Data8->qpel_precision = Data8->qpel; |
2573 |
|
// checking the vector which has been found by SAD-based 8x8 search (if it's different than the one found so far) |
2574 |
|
if (Data8->qpel) { |
2575 |
|
if (!(Data8->currentQMV->x == backup[i+1].x && Data8->currentQMV->y == backup[i+1].y)) |
2576 |
|
CheckCandidateBits8(backup[i+1].x, backup[i+1].y, 255, &iDirection, Data8); |
2577 |
|
} else { |
2578 |
|
if (!(Data8->currentMV->x == backup[i+1].x && Data8->currentMV->y == backup[i+1].y)) |
2579 |
|
CheckCandidateBits8(backup[i+1].x, backup[i+1].y, 255, &iDirection, Data8); |
2580 |
|
} |
2581 |
|
|
2582 |
|
if (Data8->qpel) { |
2583 |
|
if (MotionFlags&HALFPELREFINE8_BITS || (MotionFlags&PMV_EXTSEARCH8 && MotionFlags&EXTSEARCH_BITS)) { // halfpixel motion search follows |
2584 |
|
int32_t s = *Data8->iMinSAD; |
2585 |
|
Data8->currentMV->x = Data8->currentQMV->x/2; |
2586 |
|
Data8->currentMV->y = Data8->currentQMV->y/2; |
2587 |
|
Data8->qpel_precision = 0; |
2588 |
|
get_range(&Data8->min_dx, &Data8->max_dx, &Data8->min_dy, &Data8->max_dy, 2*x + (i&1), 2*y + (i>>1), 8, |
2589 |
|
pParam->width, pParam->height, Data8->iFcode - 1, 0, 0); |
2590 |
|
|
2591 |
|
if (Data8->currentQMV->x & 1 || Data8->currentQMV->y & 1) |
2592 |
|
CheckCandidateBits8(Data8->currentMV->x, Data8->currentMV->y, 255, &iDirection, Data8); |
2593 |
|
|
2594 |
|
if (MotionFlags & PMV_EXTSEARCH8 && MotionFlags & EXTSEARCH_BITS) |
2595 |
|
SquareSearch(Data8->currentMV->x, Data8->currentMV->x, Data8, 255); |
2596 |
|
|
2597 |
|
if (MotionFlags & HALFPELREFINE8_BITS) SubpelRefine(Data8); |
2598 |
|
|
2599 |
|
if(s > *Data8->iMinSAD) { //we have found a better match |
2600 |
|
Data8->currentQMV->x = 2*Data8->currentMV->x; |
2601 |
|
Data8->currentQMV->y = 2*Data8->currentMV->y; |
2602 |
|
} |
2603 |
|
|
2604 |
|
Data8->qpel_precision = 1; |
2605 |
|
get_range(&Data8->min_dx, &Data8->max_dx, &Data8->min_dy, &Data8->max_dy, 2*x + (i&1), 2*y + (i>>1), 8, |
2606 |
|
pParam->width, pParam->height, Data8->iFcode, 1, 0); |
2607 |
|
|
2608 |
|
} |
2609 |
|
if (MotionFlags & QUARTERPELREFINE8_BITS) SubpelRefine(Data8); |
2610 |
|
|
2611 |
|
} else // not qpel |
2612 |
|
if (MotionFlags & HALFPELREFINE8_BITS) SubpelRefine(Data8); //halfpel mode, halfpel refinement |
2613 |
|
|
2614 |
|
//checking vector equal to predicion |
2615 |
|
if (i != 0 && MotionFlags & CHECKPREDICTION_BITS) { |
2616 |
|
const VECTOR * v = Data->qpel ? Data8->currentQMV : Data8->currentMV; |
2617 |
|
if (!(Data8->predMV.x == v->x && Data8->predMV.y == v->y)) |
2618 |
|
CheckCandidateBits8(Data8->predMV.x, Data8->predMV.y, 255, &iDirection, Data8); |
2619 |
|
} |
2620 |
|
|
2621 |
|
bits += *Data8->iMinSAD; |
2622 |
|
if (bits >= Data->iMinSAD[0]) break; // no chances for INTER4V |
2623 |
|
|
2624 |
|
// MB structures for INTER4V mode; we have to set them here, we don't have predictor anywhere else |
2625 |
|
if(Data->qpel) { |
2626 |
|
pMB->pmvs[i].x = Data8->currentQMV->x - Data8->predMV.x; |
2627 |
|
pMB->pmvs[i].y = Data8->currentQMV->y - Data8->predMV.y; |
2628 |
|
pMB->qmvs[i] = *Data8->currentQMV; |
2629 |
|
sumx += Data8->currentQMV->x/2; |
2630 |
|
sumy += Data8->currentQMV->y/2; |
2631 |
|
} else { |
2632 |
|
pMB->pmvs[i].x = Data8->currentMV->x - Data8->predMV.x; |
2633 |
|
pMB->pmvs[i].y = Data8->currentMV->y - Data8->predMV.y; |
2634 |
|
sumx += Data8->currentMV->x; |
2635 |
|
sumy += Data8->currentMV->y; |
2636 |
|
} |
2637 |
|
pMB->mvs[i] = *Data8->currentMV; |
2638 |
|
pMB->sad8[i] = 4 * *Data8->iMinSAD; |
2639 |
|
if (Data8->temp[0]) cbp |= 1 << (5 - i); |
2640 |
|
} |
2641 |
|
|
2642 |
|
if (bits < *Data->iMinSAD) { // there is still a chance for inter4v mode. let's check chroma |
2643 |
|
const uint8_t * ptr; |
2644 |
|
sumx = (sumx >> 3) + roundtab_76[sumx & 0xf]; |
2645 |
|
sumy = (sumy >> 3) + roundtab_76[sumy & 0xf]; |
2646 |
|
|
2647 |
|
//chroma U |
2648 |
|
ptr = interpolate8x8_switch2(Data->RefQ + 64, Data->RefP[4], 0, 0, sumx, sumy, Data->iEdgedWidth/2, Data->rounding); |
2649 |
|
transfer_8to16subro(in, Data->CurU, ptr, Data->iEdgedWidth/2); |
2650 |
|
fdct(in); |
2651 |
|
if (Data->lambda8 == 0) i = quant_inter(coeff, in, Data->lambda16); |
2652 |
|
else i = quant4_inter(coeff, in, Data->lambda16); |
2653 |
|
if (i > 0) { |
2654 |
|
bits += CodeCoeffInter_CalcBits(coeff, scan_tables[0]); |
2655 |
|
cbp |= 1 << (5 - 4); |
2656 |
|
} |
2657 |
|
|
2658 |
|
if (bits < *Data->iMinSAD) { // still possible |
2659 |
|
//chroma V |
2660 |
|
ptr = interpolate8x8_switch2(Data->RefQ + 64, Data->RefP[5], 0, 0, sumx, sumy, Data->iEdgedWidth/2, Data->rounding); |
2661 |
|
transfer_8to16subro(in, Data->CurV, ptr, Data->iEdgedWidth/2); |
2662 |
|
fdct(in); |
2663 |
|
if (Data->lambda8 == 0) i = quant_inter(coeff, in, Data->lambda16); |
2664 |
|
else i = quant4_inter(coeff, in, Data->lambda16); |
2665 |
|
if (i > 0) { |
2666 |
|
bits += CodeCoeffInter_CalcBits(coeff, scan_tables[0]); |
2667 |
|
cbp |= 1 << (5 - 5); |
2668 |
|
} |
2669 |
|
bits += xvid_cbpy_tab[15-(cbp>>2)].len; |
2670 |
|
bits += mcbpc_inter_tab[(MODE_INTER4V & 7) | ((cbp & 3) << 3)].len; |
2671 |
|
} |
2672 |
|
} |
2673 |
|
|
2674 |
|
return bits; |
2675 |
|
} |
2676 |
|
|
2677 |
|
|
2678 |
|
static int |
2679 |
|
CountMBBitsIntra(const SearchData * const Data) |
2680 |
|
{ |
2681 |
|
int bits = 1; //this one is ac/dc prediction flag. always 1. |
2682 |
|
int cbp = 0, i, t, dc = 1024, b_dc; |
2683 |
|
const uint32_t iQuant = Data->lambda16; |
2684 |
|
int16_t *in = Data->dctSpace, * coeff = Data->dctSpace + 64; |
2685 |
|
uint32_t iDcScaler = get_dc_scaler(iQuant, 1);; |
2686 |
|
|
2687 |
|
for(i = 0; i < 4; i++) { |
2688 |
|
int s = 8*((i&1) + (i>>1)*Data->iEdgedWidth); |
2689 |
|
transfer_8to16copy(in, Data->Cur + s, Data->iEdgedWidth); |
2690 |
|
fdct(in); |
2691 |
|
b_dc = in[0]; |
2692 |
|
in[0] -= dc; |
2693 |
|
dc = b_dc; |
2694 |
|
if (Data->lambda8 == 0) quant_intra(coeff, in, iQuant, iDcScaler); |
2695 |
|
else quant4_intra(coeff, in, iQuant, iDcScaler); |
2696 |
|
|
2697 |
|
bits += t = CodeCoeffIntra_CalcBits(coeff, scan_tables[0]) + dcy_tab[coeff[0] + 255].len;; |
2698 |
|
Data->temp[i] = t; |
2699 |
|
if (t != 0) cbp |= 1 << (5 - i); |
2700 |
|
if (bits >= Data->iMinSAD[0]) break; |
2701 |
|
} |
2702 |
|
|
2703 |
|
if (bits < Data->iMinSAD[0]) { // INTRA still looks good, let's add chroma |
2704 |
|
iDcScaler = get_dc_scaler(iQuant, 0); |
2705 |
|
//chroma U |
2706 |
|
transfer_8to16copy(in, Data->CurU, Data->iEdgedWidth/2); |
2707 |
|
fdct(in); |
2708 |
|
in[0] -= 1024; |
2709 |
|
if (Data->lambda8 == 0) quant_intra(coeff, in, iQuant, iDcScaler); |
2710 |
|
else quant4_intra(coeff, in, iQuant, iDcScaler); |
2711 |
|
|
2712 |
|
bits += t = CodeCoeffIntra_CalcBits(coeff, scan_tables[0]) + dcc_tab[coeff[0] + 255].len; |
2713 |
|
if (t != 0) cbp |= 1 << (5 - 4); |
2714 |
|
|
2715 |
|
if (bits < Data->iMinSAD[0]) { |
2716 |
|
//chroma V |
2717 |
|
transfer_8to16copy(in, Data->CurV, Data->iEdgedWidth/2); |
2718 |
|
fdct(in); |
2719 |
|
in[0] -= 1024; |
2720 |
|
if (Data->lambda8 == 0) quant_intra(coeff, in, iQuant, iDcScaler); |
2721 |
|
else quant4_intra(coeff, in, iQuant, iDcScaler); |
2722 |
|
|
2723 |
|
bits += t = CodeCoeffIntra_CalcBits(coeff, scan_tables[0]) + dcc_tab[coeff[0] + 255].len; |
2724 |
|
if (t != 0) cbp |= 1 << (5 - 5); |
2725 |
|
|
2726 |
|
bits += xvid_cbpy_tab[cbp>>2].len; |
2727 |
|
bits += mcbpc_inter_tab[(MODE_INTRA & 7) | ((cbp & 3) << 3)].len; |
2728 |
|
} |
2729 |
|
} |
2730 |
|
return bits; |
2731 |
|
} |