31 |
#include <assert.h> |
#include <assert.h> |
32 |
#include <stdio.h> |
#include <stdio.h> |
33 |
#include <stdlib.h> |
#include <stdlib.h> |
34 |
|
#include <string.h> // memcpy |
35 |
|
#include <math.h> // lrint |
36 |
|
|
37 |
#include "../encoder.h" |
#include "../encoder.h" |
38 |
#include "../utils/mbfunctions.h" |
#include "../utils/mbfunctions.h" |
53 |
#define CHECK_CANDIDATE(X,Y,D) { \ |
#define CHECK_CANDIDATE(X,Y,D) { \ |
54 |
(*CheckCandidate)((const int)(X),(const int)(Y), (D), &iDirection, data ); } |
(*CheckCandidate)((const int)(X),(const int)(Y), (D), &iDirection, data ); } |
55 |
|
|
56 |
static __inline int |
static __inline uint32_t |
57 |
d_mv_bits(int x, int y, const uint32_t iFcode, const int qpel, const int rrv) |
d_mv_bits(int x, int y, const VECTOR pred, const uint32_t iFcode, const int qpel, const int rrv) |
58 |
{ |
{ |
59 |
int xb, yb; |
int xb, yb; |
60 |
if (qpel) { x *= 2; y *= 2;} |
x += x * qpel; y += y * qpel; |
61 |
else if (rrv) { x = RRV_MV_SCALEDOWN(x); y = RRV_MV_SCALEDOWN(y); } |
if (rrv) { x = RRV_MV_SCALEDOWN(x); y = RRV_MV_SCALEDOWN(y); } |
62 |
|
x -= pred.x; |
63 |
|
y -= pred.y; |
64 |
|
|
65 |
if (x == 0) xb = 1; |
if (x) { |
66 |
else { |
x = ABS(x); |
|
if (x < 0) x = -x; |
|
67 |
x += (1 << (iFcode - 1)) - 1; |
x += (1 << (iFcode - 1)) - 1; |
68 |
x >>= (iFcode - 1); |
x >>= (iFcode - 1); |
69 |
if (x > 32) x = 32; |
if (x > 32) x = 32; |
70 |
xb = mvtab[x] + iFcode; |
xb = mvtab[x] + iFcode; |
71 |
} |
} else xb = 1; |
72 |
|
|
73 |
if (y == 0) yb = 1; |
if (y) { |
74 |
else { |
y = ABS(y); |
|
if (y < 0) y = -y; |
|
75 |
y += (1 << (iFcode - 1)) - 1; |
y += (1 << (iFcode - 1)) - 1; |
76 |
y >>= (iFcode - 1); |
y >>= (iFcode - 1); |
77 |
if (y > 32) y = 32; |
if (y > 32) y = 32; |
78 |
yb = mvtab[y] + iFcode; |
yb = mvtab[y] + iFcode; |
79 |
} |
} else yb = 1; |
80 |
return xb + yb; |
return xb + yb; |
81 |
} |
} |
82 |
|
|
84 |
ChromaSAD(int dx, int dy, const SearchData * const data) |
ChromaSAD(int dx, int dy, const SearchData * const data) |
85 |
{ |
{ |
86 |
int sad; |
int sad; |
87 |
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
const uint32_t stride = data->iEdgedWidth/2; |
|
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
|
88 |
|
|
89 |
if (dx == data->temp[5] && dy == data->temp[6]) return data->temp[7]; //it has been checked recently |
if (dx == data->temp[5] && dy == data->temp[6]) return data->temp[7]; //it has been checked recently |
90 |
|
data->temp[5] = dx; data->temp[6] = dy; // backup |
91 |
|
|
92 |
switch (((dx & 1) << 1) | (dy & 1)) { |
switch (((dx & 1) << 1) | (dy & 1)) { |
93 |
case 0: |
case 0: |
94 |
sad = sad8(data->CurU, data->RefCU + (dy/2) * (data->iEdgedWidth/2) + dx/2, data->iEdgedWidth/2); |
dx = dx / 2; dy = dy / 2; |
95 |
sad += sad8(data->CurV, data->RefCV + (dy/2) * (data->iEdgedWidth/2) + dx/2, data->iEdgedWidth/2); |
sad = sad8(data->CurU, data->RefCU + dy * stride + dx, stride); |
96 |
|
sad += sad8(data->CurV, data->RefCV + dy * stride + dx, stride); |
97 |
break; |
break; |
98 |
case 1: |
case 1: |
99 |
dx = dx / 2; dy = (dy - 1) / 2; |
dx = dx / 2; dy = (dy - 1) / 2; |
100 |
sad = sad8bi(data->CurU, data->RefCU + dy * (data->iEdgedWidth/2) + dx, data->RefCU + (dy+1) * (data->iEdgedWidth/2) + dx, data->iEdgedWidth/2); |
sad = sad8bi(data->CurU, data->RefCU + dy * stride + dx, data->RefCU + (dy+1) * stride + dx, stride); |
101 |
sad += sad8bi(data->CurV, data->RefCV + dy * (data->iEdgedWidth/2) + dx, data->RefCV + (dy+1) * (data->iEdgedWidth/2) + dx, data->iEdgedWidth/2); |
sad += sad8bi(data->CurV, data->RefCV + dy * stride + dx, data->RefCV + (dy+1) * stride + dx, stride); |
102 |
break; |
break; |
103 |
case 2: |
case 2: |
104 |
dx = (dx - 1) / 2; dy = dy / 2; |
dx = (dx - 1) / 2; dy = dy / 2; |
105 |
sad = sad8bi(data->CurU, data->RefCU + dy * (data->iEdgedWidth/2) + dx, data->RefCU + dy * (data->iEdgedWidth/2) + dx+1, data->iEdgedWidth/2); |
sad = sad8bi(data->CurU, data->RefCU + dy * stride + dx, data->RefCU + dy * stride + dx+1, stride); |
106 |
sad += sad8bi(data->CurV, data->RefCV + dy * (data->iEdgedWidth/2) + dx, data->RefCV + dy * (data->iEdgedWidth/2) + dx+1, data->iEdgedWidth/2); |
sad += sad8bi(data->CurV, data->RefCV + dy * stride + dx, data->RefCV + dy * stride + dx+1, stride); |
107 |
break; |
break; |
108 |
default: |
default: |
109 |
dx = (dx - 1) / 2; dy = (dy - 1) / 2; |
dx = (dx - 1) / 2; dy = (dy - 1) / 2; |
110 |
interpolate8x8_halfpel_hv(data->RefQ, |
interpolate8x8_halfpel_hv(data->RefQ, data->RefCU + dy * stride + dx, stride, data->rounding); |
111 |
data->RefCU + dy * (data->iEdgedWidth/2) + dx, data->iEdgedWidth/2, |
sad = sad8(data->CurU, data->RefQ, stride); |
112 |
data->rounding); |
|
113 |
sad = sad8(data->CurU, data->RefQ, data->iEdgedWidth/2); |
interpolate8x8_halfpel_hv(data->RefQ, data->RefCV + dy * stride + dx, stride, data->rounding); |
114 |
interpolate8x8_halfpel_hv(data->RefQ, |
sad += sad8(data->CurV, data->RefQ, stride); |
|
data->RefCV + dy * (data->iEdgedWidth/2) + dx, data->iEdgedWidth/2, |
|
|
data->rounding); |
|
|
sad += sad8(data->CurV, data->RefQ, data->iEdgedWidth/2); |
|
115 |
break; |
break; |
116 |
} |
} |
117 |
data->temp[5] = dx; data->temp[6] = dy; data->temp[7] = sad; //backup |
data->temp[7] = sad; //backup, part 2 |
118 |
return sad; |
return sad; |
119 |
} |
} |
120 |
|
|
121 |
static __inline const uint8_t * |
static __inline const uint8_t * |
122 |
GetReference(const int x, const int y, const int dir, const SearchData * const data) |
GetReferenceB(const int x, const int y, const uint32_t dir, const SearchData * const data) |
123 |
{ |
{ |
124 |
// dir : 0 = forward, 1 = backward |
// dir : 0 = forward, 1 = backward |
125 |
switch ( (dir << 2) | ((x&1)<<1) | (y&1) ) { |
switch ( (dir << 2) | ((x&1)<<1) | (y&1) ) { |
134 |
} |
} |
135 |
} |
} |
136 |
|
|
137 |
|
// this is a simpler copy of GetReferenceB, but as it's __inline anyway, we can keep the two separate |
138 |
|
static __inline const uint8_t * |
139 |
|
GetReference(const int x, const int y, const SearchData * const data) |
140 |
|
{ |
141 |
|
switch ( ((x&1)<<1) | (y&1) ) { |
142 |
|
case 0 : return data->Ref + x/2 + (y/2)*(data->iEdgedWidth); |
143 |
|
case 1 : return data->RefV + x/2 + ((y-1)/2)*(data->iEdgedWidth); |
144 |
|
case 2 : return data->RefH + (x-1)/2 + (y/2)*(data->iEdgedWidth); |
145 |
|
default : return data->RefHV + (x-1)/2 + ((y-1)/2)*(data->iEdgedWidth); |
146 |
|
} |
147 |
|
} |
148 |
|
|
149 |
static uint8_t * |
static uint8_t * |
150 |
Interpolate8x8qpel(const int x, const int y, const int block, const int dir, const SearchData * const data) |
Interpolate8x8qpel(const int x, const int y, const uint32_t block, const uint32_t dir, const SearchData * const data) |
151 |
{ |
{ |
152 |
// create or find a qpel-precision reference picture; return pointer to it |
// create or find a qpel-precision reference picture; return pointer to it |
153 |
uint8_t * Reference = (uint8_t *)data->RefQ + 16*dir; |
uint8_t * Reference = data->RefQ + 16*dir; |
154 |
const int32_t iEdgedWidth = data->iEdgedWidth; |
const uint32_t iEdgedWidth = data->iEdgedWidth; |
155 |
const uint32_t rounding = data->rounding; |
const uint32_t rounding = data->rounding; |
156 |
const int halfpel_x = x/2; |
const int halfpel_x = x/2; |
157 |
const int halfpel_y = y/2; |
const int halfpel_y = y/2; |
158 |
const uint8_t *ref1, *ref2, *ref3, *ref4; |
const uint8_t *ref1, *ref2, *ref3, *ref4; |
159 |
|
|
160 |
ref1 = GetReference(halfpel_x, halfpel_y, dir, data); // this reference is used in all cases |
ref1 = GetReferenceB(halfpel_x, halfpel_y, dir, data); |
161 |
ref1 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref1 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
162 |
switch( ((x&1)<<1) + (y&1) ) { |
switch( ((x&1)<<1) + (y&1) ) { |
163 |
case 0: // pure halfpel position |
case 0: // pure halfpel position |
164 |
Reference = (uint8_t *) GetReference(halfpel_x, halfpel_y, dir, data); |
return (uint8_t *) ref1; |
|
Reference += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
|
165 |
break; |
break; |
166 |
|
|
167 |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
168 |
ref2 = GetReference(halfpel_x, y - halfpel_y, dir, data); |
ref2 = GetReferenceB(halfpel_x, y - halfpel_y, dir, data); |
169 |
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
170 |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
171 |
break; |
break; |
172 |
|
|
173 |
case 2: // x qpel, y halfpel - left or right during qpel refinement |
case 2: // x qpel, y halfpel - left or right during qpel refinement |
174 |
ref2 = GetReference(x - halfpel_x, halfpel_y, dir, data); |
ref2 = GetReferenceB(x - halfpel_x, halfpel_y, dir, data); |
175 |
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
176 |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
177 |
break; |
break; |
178 |
|
|
179 |
default: // x and y in qpel resolution - the "corners" (top left/right and |
default: // x and y in qpel resolution - the "corners" (top left/right and |
180 |
// bottom left/right) during qpel refinement |
// bottom left/right) during qpel refinement |
181 |
ref2 = GetReference(halfpel_x, y - halfpel_y, dir, data); |
ref2 = GetReferenceB(halfpel_x, y - halfpel_y, dir, data); |
182 |
ref3 = GetReference(x - halfpel_x, halfpel_y, dir, data); |
ref3 = GetReferenceB(x - halfpel_x, halfpel_y, dir, data); |
183 |
ref4 = GetReference(x - halfpel_x, y - halfpel_y, dir, data); |
ref4 = GetReferenceB(x - halfpel_x, y - halfpel_y, dir, data); |
184 |
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref2 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
185 |
ref3 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref3 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
186 |
ref4 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
ref4 += 8 * (block&1) + 8 * (block>>1) * iEdgedWidth; |
191 |
} |
} |
192 |
|
|
193 |
static uint8_t * |
static uint8_t * |
194 |
Interpolate16x16qpel(const int x, const int y, const int dir, const SearchData * const data) |
Interpolate16x16qpel(const int x, const int y, const uint32_t dir, const SearchData * const data) |
195 |
{ |
{ |
196 |
// create or find a qpel-precision reference picture; return pointer to it |
// create or find a qpel-precision reference picture; return pointer to it |
197 |
uint8_t * Reference = (uint8_t *)data->RefQ + 16*dir; |
uint8_t * Reference = data->RefQ + 16*dir; |
198 |
const int32_t iEdgedWidth = data->iEdgedWidth; |
const uint32_t iEdgedWidth = data->iEdgedWidth; |
199 |
const uint32_t rounding = data->rounding; |
const uint32_t rounding = data->rounding; |
200 |
const int halfpel_x = x/2; |
const int halfpel_x = x/2; |
201 |
const int halfpel_y = y/2; |
const int halfpel_y = y/2; |
202 |
const uint8_t *ref1, *ref2, *ref3, *ref4; |
const uint8_t *ref1, *ref2, *ref3, *ref4; |
203 |
|
|
204 |
ref1 = GetReference(halfpel_x, halfpel_y, dir, data); // this reference is used in all cases |
ref1 = GetReferenceB(halfpel_x, halfpel_y, dir, data); |
205 |
switch( ((x&1)<<1) + (y&1) ) { |
switch( ((x&1)<<1) + (y&1) ) { |
206 |
case 0: // pure halfpel position |
case 0: // pure halfpel position |
207 |
return (uint8_t *) GetReference(halfpel_x, halfpel_y, dir, data); |
return (uint8_t *) ref1; |
208 |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
209 |
ref2 = GetReference(halfpel_x, y - halfpel_y, dir, data); |
ref2 = GetReferenceB(halfpel_x, y - halfpel_y, dir, data); |
210 |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
211 |
interpolate8x8_avg2(Reference+8, ref1+8, ref2+8, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference+8, ref1+8, ref2+8, iEdgedWidth, rounding, 8); |
212 |
interpolate8x8_avg2(Reference+8*iEdgedWidth, ref1+8*iEdgedWidth, ref2+8*iEdgedWidth, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference+8*iEdgedWidth, ref1+8*iEdgedWidth, ref2+8*iEdgedWidth, iEdgedWidth, rounding, 8); |
214 |
break; |
break; |
215 |
|
|
216 |
case 2: // x qpel, y halfpel - left or right during qpel refinement |
case 2: // x qpel, y halfpel - left or right during qpel refinement |
217 |
ref2 = GetReference(x - halfpel_x, halfpel_y, dir, data); |
ref2 = GetReferenceB(x - halfpel_x, halfpel_y, dir, data); |
218 |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference, ref1, ref2, iEdgedWidth, rounding, 8); |
219 |
interpolate8x8_avg2(Reference+8, ref1+8, ref2+8, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference+8, ref1+8, ref2+8, iEdgedWidth, rounding, 8); |
220 |
interpolate8x8_avg2(Reference+8*iEdgedWidth, ref1+8*iEdgedWidth, ref2+8*iEdgedWidth, iEdgedWidth, rounding, 8); |
interpolate8x8_avg2(Reference+8*iEdgedWidth, ref1+8*iEdgedWidth, ref2+8*iEdgedWidth, iEdgedWidth, rounding, 8); |
223 |
|
|
224 |
default: // x and y in qpel resolution - the "corners" (top left/right and |
default: // x and y in qpel resolution - the "corners" (top left/right and |
225 |
// bottom left/right) during qpel refinement |
// bottom left/right) during qpel refinement |
226 |
ref2 = GetReference(halfpel_x, y - halfpel_y, dir, data); |
ref2 = GetReferenceB(halfpel_x, y - halfpel_y, dir, data); |
227 |
ref3 = GetReference(x - halfpel_x, halfpel_y, dir, data); |
ref3 = GetReferenceB(x - halfpel_x, halfpel_y, dir, data); |
228 |
ref4 = GetReference(x - halfpel_x, y - halfpel_y, dir, data); |
ref4 = GetReferenceB(x - halfpel_x, y - halfpel_y, dir, data); |
229 |
interpolate8x8_avg4(Reference, ref1, ref2, ref3, ref4, iEdgedWidth, rounding); |
interpolate8x8_avg4(Reference, ref1, ref2, ref3, ref4, iEdgedWidth, rounding); |
230 |
interpolate8x8_avg4(Reference+8, ref1+8, ref2+8, ref3+8, ref4+8, iEdgedWidth, rounding); |
interpolate8x8_avg4(Reference+8, ref1+8, ref2+8, ref3+8, ref4+8, iEdgedWidth, rounding); |
231 |
interpolate8x8_avg4(Reference+8*iEdgedWidth, ref1+8*iEdgedWidth, ref2+8*iEdgedWidth, ref3+8*iEdgedWidth, ref4+8*iEdgedWidth, iEdgedWidth, rounding); |
interpolate8x8_avg4(Reference+8*iEdgedWidth, ref1+8*iEdgedWidth, ref2+8*iEdgedWidth, ref3+8*iEdgedWidth, ref4+8*iEdgedWidth, iEdgedWidth, rounding); |
244 |
const uint8_t * Reference; |
const uint8_t * Reference; |
245 |
VECTOR * current; |
VECTOR * current; |
246 |
|
|
247 |
if (( x > data->max_dx) || ( x < data->min_dx) |
if ( (x > data->max_dx) | (x < data->min_dx) |
248 |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
| (y > data->max_dy) | (y < data->min_dy) ) return; |
249 |
|
|
250 |
if (data->qpel_precision) { // x and y are in 1/4 precision |
if (data->qpel_precision) { // x and y are in 1/4 precision |
251 |
Reference = Interpolate16x16qpel(x, y, 0, data); |
Reference = Interpolate16x16qpel(x, y, 0, data); |
252 |
xc = x/2; yc = y/2; //for chroma sad |
xc = x/2; yc = y/2; //for chroma sad |
253 |
current = data->currentQMV; |
current = data->currentQMV; |
254 |
} else { |
} else { |
255 |
Reference = GetReference(x, y, 0, data); |
Reference = GetReference(x, y, data); |
256 |
current = data->currentMV; |
current = data->currentMV; |
257 |
xc = x; yc = y; |
xc = x; yc = y; |
258 |
} |
} |
259 |
t = d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode, data->qpel && !data->qpel_precision, 0); |
t = d_mv_bits(x, y, data->predMV, data->iFcode, data->qpel^data->qpel_precision, 0); |
260 |
|
|
261 |
data->temp[0] = sad16v(data->Cur, Reference, data->iEdgedWidth, data->temp + 1); |
data->temp[0] = sad16v(data->Cur, Reference, data->iEdgedWidth, data->temp + 1); |
262 |
|
|
263 |
data->temp[0] += (data->lambda16 * t * data->temp[0])/1000; |
data->temp[0] += (data->lambda16 * t * data->temp[0])>>10; |
264 |
data->temp[1] += (data->lambda8 * t * (data->temp[1] + NEIGH_8X8_BIAS))/100; |
data->temp[1] += (data->lambda8 * t * (data->temp[1] + NEIGH_8X8_BIAS))>>10; |
265 |
|
|
266 |
if (data->chroma) data->temp[0] += ChromaSAD(xc, yc, data); |
if (data->chroma) data->temp[0] += ChromaSAD((xc >> 1) + roundtab_79[xc & 0x3], |
267 |
|
(yc >> 1) + roundtab_79[yc & 0x3], data); |
268 |
|
|
269 |
if (data->temp[0] < data->iMinSAD[0]) { |
if (data->temp[0] < data->iMinSAD[0]) { |
270 |
data->iMinSAD[0] = data->temp[0]; |
data->iMinSAD[0] = data->temp[0]; |
285 |
static void |
static void |
286 |
CheckCandidate32(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
CheckCandidate32(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
287 |
{ |
{ |
288 |
int t; |
uint32_t t; |
289 |
const uint8_t * Reference; |
const uint8_t * Reference; |
290 |
|
|
291 |
if ( (!(x&1) && x !=0) || (!(y&1) && y !=0) || //non-zero integer value |
if ( (!(x&1) && x !=0) | (!(y&1) && y !=0) || //non-zero integer value |
292 |
( x > data->max_dx) || ( x < data->min_dx) |
(x > data->max_dx) | (x < data->min_dx) |
293 |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
| (y > data->max_dy) | (y < data->min_dy) ) return; |
294 |
|
|
295 |
Reference = GetReference(x, y, 0, data); |
Reference = GetReference(x, y, data); |
296 |
t = d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode, 0, 1); |
t = d_mv_bits(x, y, data->predMV, data->iFcode, 0, 1); |
297 |
|
|
298 |
data->temp[0] = sad32v_c(data->Cur, Reference, data->iEdgedWidth, data->temp + 1); |
data->temp[0] = sad32v_c(data->Cur, Reference, data->iEdgedWidth, data->temp + 1); |
299 |
|
|
300 |
data->temp[0] += (data->lambda16 * t * data->temp[0])/1000; |
data->temp[0] += (data->lambda16 * t * data->temp[0]) >> 10; |
301 |
data->temp[1] += (data->lambda8 * t * (data->temp[1] + NEIGH_8X8_BIAS))/100; |
data->temp[1] += (data->lambda8 * t * (data->temp[1] + NEIGH_8X8_BIAS))>>10; |
302 |
|
|
303 |
if (data->temp[0] < data->iMinSAD[0]) { |
if (data->temp[0] < data->iMinSAD[0]) { |
304 |
data->iMinSAD[0] = data->temp[0]; |
data->iMinSAD[0] = data->temp[0]; |
320 |
{ |
{ |
321 |
int32_t sad; |
int32_t sad; |
322 |
const uint8_t * Reference; |
const uint8_t * Reference; |
323 |
int t; |
uint32_t t; |
324 |
VECTOR * current; |
VECTOR * current; |
325 |
|
|
326 |
if (( x > data->max_dx) || ( x < data->min_dx) |
if ( (x > data->max_dx) | ( x < data->min_dx) |
327 |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
| (y > data->max_dy) | (y < data->min_dy) ) return; |
328 |
|
|
329 |
if (data->rrv) |
if (data->rrv && (!(x&1) && x !=0) | (!(y&1) && y !=0) ) return; //non-zero even value |
|
if ( (!(x&1) && x !=0) || (!(y&1) && y !=0) ) return; //non-zero integer value |
|
330 |
|
|
331 |
if (data->qpel_precision) { // x and y are in 1/4 precision |
if (data->qpel_precision) { // x and y are in 1/4 precision |
332 |
Reference = Interpolate16x16qpel(x, y, 0, data); |
Reference = Interpolate16x16qpel(x, y, 0, data); |
333 |
current = data->currentQMV; |
current = data->currentQMV; |
334 |
} else { |
} else { |
335 |
Reference = GetReference(x, y, 0, data); |
Reference = GetReference(x, y, data); |
336 |
current = data->currentMV; |
current = data->currentMV; |
337 |
} |
} |
338 |
t = d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode, |
t = d_mv_bits(x, y, data->predMV, data->iFcode, |
339 |
data->qpel && !data->qpel_precision && !data->rrv, data->rrv); |
data->qpel^data->qpel_precision, data->rrv); |
340 |
|
|
341 |
sad = sad16(data->Cur, Reference, data->iEdgedWidth, 256*4096); |
sad = sad16(data->Cur, Reference, data->iEdgedWidth, 256*4096); |
342 |
sad += (data->lambda16 * t * sad)/1000; |
sad += (data->lambda16 * t * sad)>>10; |
343 |
|
|
344 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
345 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
348 |
} |
} |
349 |
|
|
350 |
static void |
static void |
351 |
CheckCandidate16no4vI(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
CheckCandidate32I(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
352 |
{ |
{ |
353 |
// maximum speed - for P/B/I decision |
// maximum speed - for P/B/I decision |
|
int32_t sad; |
|
354 |
|
|
355 |
if (( x > data->max_dx) || ( x < data->min_dx) |
if ( (x > data->max_dx) | (x < data->min_dx) |
356 |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
| (y > data->max_dy) | (y < data->min_dy) ) return; |
357 |
|
|
358 |
sad = sad16(data->Cur, data->Ref + x/2 + (y/2)*(data->iEdgedWidth), |
data->temp[0] = sad32v_c(data->Cur, data->Ref + x/2 + (y/2)*(data->iEdgedWidth), |
359 |
data->iEdgedWidth, 256*4096); |
data->iEdgedWidth, data->temp+1); |
360 |
|
|
361 |
if (sad < *(data->iMinSAD)) { |
if (data->temp[0] < *(data->iMinSAD)) { |
362 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = data->temp[0]; |
363 |
data->currentMV[0].x = x; data->currentMV[0].y = y; |
data->currentMV[0].x = x; data->currentMV[0].y = y; |
364 |
*dir = Direction; } |
*dir = Direction; } |
365 |
} |
if (data->temp[1] < data->iMinSAD[1]) { |
366 |
|
data->iMinSAD[1] = data->temp[1]; data->currentMV[1].x = x; data->currentMV[1].y = y; } |
367 |
|
if (data->temp[2] < data->iMinSAD[2]) { |
368 |
|
data->iMinSAD[2] = data->temp[2]; data->currentMV[2].x = x; data->currentMV[2].y = y; } |
369 |
|
if (data->temp[3] < data->iMinSAD[3]) { |
370 |
|
data->iMinSAD[3] = data->temp[3]; data->currentMV[3].x = x; data->currentMV[3].y = y; } |
371 |
|
if (data->temp[4] < data->iMinSAD[4]) { |
372 |
|
data->iMinSAD[4] = data->temp[4]; data->currentMV[4].x = x; data->currentMV[4].y = y; } |
373 |
|
|
374 |
|
} |
375 |
|
|
376 |
static void |
static void |
377 |
CheckCandidateInt(const int xf, const int yf, const int Direction, int * const dir, const SearchData * const data) |
CheckCandidateInt(const int xf, const int yf, const int Direction, int * const dir, const SearchData * const data) |
378 |
{ |
{ |
379 |
int32_t sad; |
int32_t sad, xb, yb; |
380 |
int xb, yb, t; |
uint32_t t; |
381 |
const uint8_t *ReferenceF, *ReferenceB; |
const uint8_t *ReferenceF, *ReferenceB; |
382 |
VECTOR *current; |
VECTOR *current; |
383 |
|
|
384 |
if (( xf > data->max_dx) || ( xf < data->min_dx) |
if ( (xf > data->max_dx) | (xf < data->min_dx) |
385 |
|| ( yf > data->max_dy) || (yf < data->min_dy)) return; |
| (yf > data->max_dy) | (yf < data->min_dy) ) return; |
386 |
|
|
387 |
if (data->qpel_precision) { |
if (!data->qpel_precision) { |
388 |
|
ReferenceF = GetReference(xf, yf, data); |
389 |
|
xb = data->currentMV[1].x; yb = data->currentMV[1].y; |
390 |
|
ReferenceB = GetReferenceB(xb, yb, 1, data); |
391 |
|
current = data->currentMV; |
392 |
|
} else { |
393 |
ReferenceF = Interpolate16x16qpel(xf, yf, 0, data); |
ReferenceF = Interpolate16x16qpel(xf, yf, 0, data); |
394 |
xb = data->currentQMV[1].x; yb = data->currentQMV[1].y; |
xb = data->currentQMV[1].x; yb = data->currentQMV[1].y; |
395 |
current = data->currentQMV; |
current = data->currentQMV; |
396 |
ReferenceB = Interpolate16x16qpel(xb, yb, 1, data); |
ReferenceB = Interpolate16x16qpel(xb, yb, 1, data); |
|
} else { |
|
|
ReferenceF = GetReference(xf, yf, 0, data); |
|
|
xb = data->currentMV[1].x; yb = data->currentMV[1].y; |
|
|
ReferenceB = GetReference(xb, yb, 1, data); |
|
|
current = data->currentMV; |
|
397 |
} |
} |
398 |
|
|
399 |
t = d_mv_bits(xf - data->predMV.x, yf - data->predMV.y, data->iFcode, data->qpel && !data->qpel_precision, 0) |
t = d_mv_bits(xf, yf, data->predMV, data->iFcode, data->qpel^data->qpel_precision, 0) |
400 |
+ d_mv_bits(xb - data->bpredMV.x, yb - data->bpredMV.y, data->iFcode, data->qpel && !data->qpel_precision, 0); |
+ d_mv_bits(xb, yb, data->bpredMV, data->iFcode, data->qpel^data->qpel_precision, 0); |
401 |
|
|
402 |
sad = sad16bi(data->Cur, ReferenceF, ReferenceB, data->iEdgedWidth); |
sad = sad16bi(data->Cur, ReferenceF, ReferenceB, data->iEdgedWidth); |
403 |
sad += (data->lambda16 * t * sad)/1000; |
sad += (data->lambda16 * t * sad)>>10; |
404 |
|
|
405 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
406 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
412 |
CheckCandidateDirect(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
CheckCandidateDirect(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
413 |
{ |
{ |
414 |
int32_t sad = 0; |
int32_t sad = 0; |
415 |
int k; |
uint32_t k; |
416 |
const uint8_t *ReferenceF; |
const uint8_t *ReferenceF; |
417 |
const uint8_t *ReferenceB; |
const uint8_t *ReferenceB; |
418 |
VECTOR mvs, b_mvs; |
VECTOR mvs, b_mvs; |
419 |
|
|
420 |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
if (( x > 31) | ( x < -32) | ( y > 31) | (y < -32)) return; |
421 |
|
|
422 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
423 |
mvs.x = data->directmvF[k].x + x; |
mvs.x = data->directmvF[k].x + x; |
435 |
|| ( b_mvs.x > data->max_dx ) || ( b_mvs.x < data->min_dx ) |
|| ( b_mvs.x > data->max_dx ) || ( b_mvs.x < data->min_dx ) |
436 |
|| ( b_mvs.y > data->max_dy ) || ( b_mvs.y < data->min_dy )) return; |
|| ( b_mvs.y > data->max_dy ) || ( b_mvs.y < data->min_dy )) return; |
437 |
|
|
438 |
if (!data->qpel) { |
|
439 |
mvs.x *= 2; mvs.y *= 2; |
mvs.x *= 2 - data->qpel; mvs.y *= 2 - data->qpel; |
440 |
b_mvs.x *= 2; b_mvs.y *= 2; //we move to qpel precision anyway |
b_mvs.x *= 2 - data->qpel; b_mvs.y *= 2 - data->qpel; //we move to qpel precision anyway |
441 |
} |
|
442 |
ReferenceF = Interpolate8x8qpel(mvs.x, mvs.y, k, 0, data); |
ReferenceF = Interpolate8x8qpel(mvs.x, mvs.y, k, 0, data); |
443 |
ReferenceB = Interpolate8x8qpel(b_mvs.x, b_mvs.y, k, 1, data); |
ReferenceB = Interpolate8x8qpel(b_mvs.x, b_mvs.y, k, 1, data); |
444 |
|
|
445 |
sad += sad8bi(data->Cur + 8*(k&1) + 8*(k>>1)*(data->iEdgedWidth), |
sad += sad8bi(data->Cur + 8*(k&1) + 8*(k>>1)*(data->iEdgedWidth), |
446 |
ReferenceF, ReferenceB, |
ReferenceF, ReferenceB, data->iEdgedWidth); |
|
data->iEdgedWidth); |
|
447 |
if (sad > *(data->iMinSAD)) return; |
if (sad > *(data->iMinSAD)) return; |
448 |
} |
} |
449 |
|
|
450 |
sad += (data->lambda16 * d_mv_bits(x, y, 1, 0, 0) * sad)/1000; |
sad += (data->lambda16 * d_mv_bits(x, y, zeroMV, 1, 0, 0) * sad)>>10; |
451 |
|
|
452 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
453 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
463 |
const uint8_t *ReferenceB; |
const uint8_t *ReferenceB; |
464 |
VECTOR mvs, b_mvs; |
VECTOR mvs, b_mvs; |
465 |
|
|
466 |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
if (( x > 31) | ( x < -32) | ( y > 31) | (y < -32)) return; |
467 |
|
|
468 |
mvs.x = data->directmvF[0].x + x; |
mvs.x = data->directmvF[0].x + x; |
469 |
b_mvs.x = ((x == 0) ? |
b_mvs.x = ((x == 0) ? |
480 |
|| ( b_mvs.x > data->max_dx ) || ( b_mvs.x < data->min_dx ) |
|| ( b_mvs.x > data->max_dx ) || ( b_mvs.x < data->min_dx ) |
481 |
|| ( b_mvs.y > data->max_dy ) || ( b_mvs.y < data->min_dy )) return; |
|| ( b_mvs.y > data->max_dy ) || ( b_mvs.y < data->min_dy )) return; |
482 |
|
|
483 |
if (!data->qpel) { |
mvs.x *= 2 - data->qpel; mvs.y *= 2 - data->qpel; |
484 |
mvs.x *= 2; mvs.y *= 2; |
b_mvs.x *= 2 - data->qpel; b_mvs.y *= 2 - data->qpel; //we move to qpel precision anyway |
485 |
b_mvs.x *= 2; b_mvs.y *= 2; //we move to qpel precision anyway |
|
|
} |
|
486 |
ReferenceF = Interpolate16x16qpel(mvs.x, mvs.y, 0, data); |
ReferenceF = Interpolate16x16qpel(mvs.x, mvs.y, 0, data); |
487 |
ReferenceB = Interpolate16x16qpel(b_mvs.x, b_mvs.y, 1, data); |
ReferenceB = Interpolate16x16qpel(b_mvs.x, b_mvs.y, 1, data); |
488 |
|
|
489 |
sad = sad16bi(data->Cur, ReferenceF, ReferenceB, data->iEdgedWidth); |
sad = sad16bi(data->Cur, ReferenceF, ReferenceB, data->iEdgedWidth); |
490 |
sad += (data->lambda16 * d_mv_bits(x, y, 1, 0, 0) * sad)/1000; |
sad += (data->lambda16 * d_mv_bits(x, y, zeroMV, 1, 0, 0) * sad)>>10; |
491 |
|
|
492 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
493 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
498 |
static void |
static void |
499 |
CheckCandidate8(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
CheckCandidate8(const int x, const int y, const int Direction, int * const dir, const SearchData * const data) |
500 |
{ |
{ |
501 |
int32_t sad; int t; |
int32_t sad; uint32_t t; |
502 |
const uint8_t * Reference; |
const uint8_t * Reference; |
503 |
|
|
504 |
if (( x > data->max_dx) || ( x < data->min_dx) |
if ( (x > data->max_dx) | (x < data->min_dx) |
505 |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
| (y > data->max_dy) | (y < data->min_dy) ) return; |
506 |
|
|
507 |
if (data->qpel) Reference = Interpolate16x16qpel(x, y, 0, data); |
if (data->qpel) Reference = Interpolate16x16qpel(x, y, 0, data); |
508 |
else Reference = GetReference(x, y, 0, data); |
else Reference = GetReference(x, y, data); |
509 |
|
|
510 |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
511 |
t = d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode, data->qpel && !data->qpel_precision, 0); |
t = d_mv_bits(x, y, data->predMV, data->iFcode, data->qpel && !data->qpel_precision, 0); |
512 |
|
|
513 |
sad += (data->lambda8 * t * (sad+NEIGH_8X8_BIAS))/100; |
sad += (data->lambda8 * t * (sad+NEIGH_8X8_BIAS))>>10; |
514 |
|
|
515 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
516 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
539 |
|
|
540 |
/* now we're doing diagonal checks near our candidate */ |
/* now we're doing diagonal checks near our candidate */ |
541 |
|
|
542 |
if (iDirection) { //checking if anything found |
if (iDirection) { //if anything found |
543 |
bDirection = iDirection; |
bDirection = iDirection; |
544 |
iDirection = 0; |
iDirection = 0; |
545 |
x = data->currentMV->x; y = data->currentMV->y; |
x = data->currentMV->x; y = data->currentMV->y; |
665 |
|
|
666 |
/* MAINSEARCH FUNCTIONS END */ |
/* MAINSEARCH FUNCTIONS END */ |
667 |
|
|
|
/* HALFPELREFINE COULD BE A MAINSEARCH FUNCTION, BUT THERE IS NO NEED FOR IT */ |
|
|
|
|
668 |
static void |
static void |
669 |
SubpelRefine(const SearchData * const data) |
SubpelRefine(const SearchData * const data) |
670 |
{ |
{ |
671 |
/* Do a half-pel or q-pel refinement */ |
/* Do a half-pel or q-pel refinement */ |
672 |
VECTOR backupMV; |
const VECTOR centerMV = data->qpel_precision ? *data->currentQMV : *data->currentMV; |
673 |
int iDirection; //not needed |
int iDirection; //only needed because macro expects it |
|
|
|
|
if (data->qpel_precision) |
|
|
backupMV = *(data->currentQMV); |
|
|
else backupMV = *(data->currentMV); |
|
|
|
|
|
CHECK_CANDIDATE(backupMV.x - 1, backupMV.y - 1, 0); |
|
|
CHECK_CANDIDATE(backupMV.x + 1, backupMV.y - 1, 0); |
|
|
CHECK_CANDIDATE(backupMV.x - 1, backupMV.y + 1, 0); |
|
|
CHECK_CANDIDATE(backupMV.x + 1, backupMV.y + 1, 0); |
|
|
|
|
|
CHECK_CANDIDATE(backupMV.x - 1, backupMV.y, 0); |
|
|
CHECK_CANDIDATE(backupMV.x + 1, backupMV.y, 0); |
|
674 |
|
|
675 |
CHECK_CANDIDATE(backupMV.x, backupMV.y + 1, 0); |
CHECK_CANDIDATE(centerMV.x, centerMV.y - 1, 0); |
676 |
CHECK_CANDIDATE(backupMV.x, backupMV.y - 1, 0); |
CHECK_CANDIDATE(centerMV.x + 1, centerMV.y - 1, 0); |
677 |
|
CHECK_CANDIDATE(centerMV.x + 1, centerMV.y, 0); |
678 |
|
CHECK_CANDIDATE(centerMV.x + 1, centerMV.y + 1, 0); |
679 |
|
CHECK_CANDIDATE(centerMV.x, centerMV.y + 1, 0); |
680 |
|
CHECK_CANDIDATE(centerMV.x - 1, centerMV.y + 1, 0); |
681 |
|
CHECK_CANDIDATE(centerMV.x - 1, centerMV.y, 0); |
682 |
|
CHECK_CANDIDATE(centerMV.x - 1, centerMV.y - 1, 0); |
683 |
} |
} |
684 |
|
|
685 |
static __inline int |
static __inline int |
686 |
SkipDecisionP(const IMAGE * current, const IMAGE * reference, |
SkipDecisionP(const IMAGE * current, const IMAGE * reference, |
687 |
const int x, const int y, |
const int x, const int y, |
688 |
const uint32_t iEdgedWidth, const uint32_t iQuant, int rrv) |
const uint32_t stride, const uint32_t iQuant, int rrv) |
689 |
|
|
690 |
{ |
{ |
691 |
/* keep repeating checks for all b-frames before this P frame, |
if(!rrv) { |
692 |
to make sure that SKIP is possible (todo) |
uint32_t sadC = sad8(current->u + x*8 + y*stride*8, |
693 |
how: if skip is not possible set sad00 to a very high value */ |
reference->u + x*8 + y*stride*8, stride); |
|
if(rrv) { |
|
|
uint32_t sadC = sad16(current->u + x*16 + y*(iEdgedWidth/2)*16, |
|
|
reference->u + x*16 + y*(iEdgedWidth/2)*16, iEdgedWidth/2, 256*4096); |
|
|
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
|
|
sadC += sad16(current->v + (x + y*(iEdgedWidth/2))*16, |
|
|
reference->v + (x + y*(iEdgedWidth/2))*16, iEdgedWidth/2, 256*4096); |
|
|
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
|
|
return 1; |
|
|
} else { |
|
|
uint32_t sadC = sad8(current->u + x*8 + y*(iEdgedWidth/2)*8, |
|
|
reference->u + x*8 + y*(iEdgedWidth/2)*8, iEdgedWidth/2); |
|
694 |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
695 |
sadC += sad8(current->v + (x + y*(iEdgedWidth/2))*8, |
sadC += sad8(current->v + (x + y*stride)*8, |
696 |
reference->v + (x + y*(iEdgedWidth/2))*8, iEdgedWidth/2); |
reference->v + (x + y*stride)*8, stride); |
697 |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP) return 0; |
698 |
return 1; |
return 1; |
699 |
|
|
700 |
|
} else { |
701 |
|
uint32_t sadC = sad16(current->u + x*16 + y*stride*16, |
702 |
|
reference->u + x*16 + y*stride*16, stride, 256*4096); |
703 |
|
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
704 |
|
sadC += sad16(current->v + (x + y*stride)*16, |
705 |
|
reference->v + (x + y*stride)*16, stride, 256*4096); |
706 |
|
if (sadC > iQuant * MAX_CHROMA_SAD_FOR_SKIP*4) return 0; |
707 |
|
return 1; |
708 |
} |
} |
709 |
} |
} |
710 |
|
|
712 |
SkipMacroblockP(MACROBLOCK *pMB, const int32_t sad) |
SkipMacroblockP(MACROBLOCK *pMB, const int32_t sad) |
713 |
{ |
{ |
714 |
pMB->mode = MODE_NOT_CODED; |
pMB->mode = MODE_NOT_CODED; |
715 |
pMB->mvs[0].x = pMB->mvs[1].x = pMB->mvs[2].x = pMB->mvs[3].x = 0; |
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = zeroMV; |
716 |
pMB->mvs[0].y = pMB->mvs[1].y = pMB->mvs[2].y = pMB->mvs[3].y = 0; |
pMB->qmvs[0] = pMB->qmvs[1] = pMB->qmvs[2] = pMB->qmvs[3] = zeroMV; |
|
|
|
|
pMB->qmvs[0].x = pMB->qmvs[1].x = pMB->qmvs[2].x = pMB->qmvs[3].x = 0; |
|
|
pMB->qmvs[0].y = pMB->qmvs[1].y = pMB->qmvs[2].y = pMB->qmvs[3].y = 0; |
|
|
|
|
717 |
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = pMB->sad8[2] = pMB->sad8[3] = sad; |
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = pMB->sad8[2] = pMB->sad8[3] = sad; |
718 |
} |
} |
719 |
|
|
730 |
const IMAGE *const pCurrent = ¤t->image; |
const IMAGE *const pCurrent = ¤t->image; |
731 |
const IMAGE *const pRef = &reference->image; |
const IMAGE *const pRef = &reference->image; |
732 |
|
|
|
const VECTOR zeroMV = { 0, 0 }; |
|
|
|
|
733 |
uint32_t mb_width = pParam->mb_width; |
uint32_t mb_width = pParam->mb_width; |
734 |
uint32_t mb_height = pParam->mb_height; |
uint32_t mb_height = pParam->mb_height; |
735 |
|
const uint32_t iEdgedWidth = pParam->edged_width; |
736 |
|
|
737 |
uint32_t x, y; |
uint32_t x, y; |
738 |
uint32_t iIntra = 0; |
uint32_t iIntra = 0; |
739 |
int32_t InterBias, quant = current->quant, sad00; |
int32_t InterBias, quant = current->quant, sad00; |
|
uint8_t *qimage; |
|
740 |
|
|
741 |
// some pre-initialized thingies for SearchP |
// some pre-initialized thingies for SearchP |
742 |
int32_t temp[8]; |
int32_t temp[8]; |
745 |
int32_t iMinSAD[5]; |
int32_t iMinSAD[5]; |
746 |
SearchData Data; |
SearchData Data; |
747 |
memset(&Data, 0, sizeof(SearchData)); |
memset(&Data, 0, sizeof(SearchData)); |
748 |
Data.iEdgedWidth = pParam->edged_width; |
Data.iEdgedWidth = iEdgedWidth; |
749 |
Data.currentMV = currentMV; |
Data.currentMV = currentMV; |
750 |
Data.currentQMV = currentQMV; |
Data.currentQMV = currentQMV; |
751 |
Data.iMinSAD = iMinSAD; |
Data.iMinSAD = iMinSAD; |
753 |
Data.iFcode = current->fcode; |
Data.iFcode = current->fcode; |
754 |
Data.rounding = pParam->m_rounding_type; |
Data.rounding = pParam->m_rounding_type; |
755 |
Data.qpel = pParam->m_quarterpel; |
Data.qpel = pParam->m_quarterpel; |
756 |
Data.chroma = current->global_flags & XVID_ME_COLOUR; |
Data.chroma = current->global_flags & ( PMV_CHROMA16 | PMV_CHROMA8 ); |
757 |
Data.rrv = current->global_flags & XVID_REDUCED; |
Data.rrv = current->global_flags & XVID_REDUCED; |
758 |
|
|
759 |
if ((current->global_flags & XVID_REDUCED)) { |
if ((current->global_flags & XVID_REDUCED)) { |
762 |
Data.qpel = Data.chroma = 0; |
Data.qpel = Data.chroma = 0; |
763 |
} |
} |
764 |
|
|
765 |
if((qimage = (uint8_t *) malloc(32 * pParam->edged_width)) == NULL) |
Data.RefQ = pRefV->u; // a good place, also used in MC (for similar purpose) |
|
return 1; // allocate some mem for qpel interpolated blocks |
|
|
// somehow this is dirty since I think we shouldn't use malloc outside |
|
|
// encoder_create() - so please fix me! |
|
|
Data.RefQ = qimage; |
|
766 |
if (sadInit) (*sadInit) (); |
if (sadInit) (*sadInit) (); |
767 |
|
|
768 |
for (y = 0; y < mb_height; y++) { |
for (y = 0; y < mb_height; y++) { |
769 |
for (x = 0; x < mb_width; x++) { |
for (x = 0; x < mb_width; x++) { |
770 |
MACROBLOCK *pMB = &pMBs[x + y * pParam->mb_width]; |
MACROBLOCK *pMB = &pMBs[x + y * pParam->mb_width]; |
771 |
|
|
772 |
if (Data.rrv) pMB->sad16 = |
if (!Data.rrv) pMB->sad16 = |
773 |
sad32v_c(pCurrent->y + (x + y * pParam->edged_width) * 32, |
sad16v(pCurrent->y + (x + y * iEdgedWidth) * 16, |
774 |
pRef->y + (x + y * pParam->edged_width) * 32, |
pRef->y + (x + y * iEdgedWidth) * 16, |
775 |
pParam->edged_width, pMB->sad8 ); |
pParam->edged_width, pMB->sad8 ); |
776 |
|
|
777 |
else pMB->sad16 = |
else pMB->sad16 = |
778 |
sad16v(pCurrent->y + (x + y * pParam->edged_width) * 16, |
sad32v_c(pCurrent->y + (x + y * iEdgedWidth) * 32, |
779 |
pRef->y + (x + y * pParam->edged_width) * 16, |
pRef->y + (x + y * iEdgedWidth) * 32, |
780 |
pParam->edged_width, pMB->sad8 ); |
pParam->edged_width, pMB->sad8 ); |
781 |
|
|
782 |
if (Data.chroma) { |
if (Data.chroma) { |
783 |
pMB->sad16 += sad8(pCurrent->u + x*8 + y*(pParam->edged_width/2)*8, |
Data.temp[7] = sad8(pCurrent->u + x*8 + y*(iEdgedWidth/2)*8, |
784 |
pRef->u + x*8 + y*(pParam->edged_width/2)*8, pParam->edged_width/2); |
pRef->u + x*8 + y*(iEdgedWidth/2)*8, iEdgedWidth/2) |
785 |
|
+ sad8(pCurrent->v + (x + y*(iEdgedWidth/2))*8, |
786 |
pMB->sad16 += sad8(pCurrent->v + (x + y*(pParam->edged_width/2))*8, |
pRef->v + (x + y*(iEdgedWidth/2))*8, iEdgedWidth/2); |
787 |
pRef->v + (x + y*(pParam->edged_width/2))*8, pParam->edged_width/2); |
pMB->sad16 += Data.temp[7]; |
788 |
} |
} |
789 |
|
|
790 |
sad00 = pMB->sad16; //if no gmc; else sad00 = (..) |
sad00 = pMB->sad16; |
791 |
|
|
792 |
if (!(current->global_flags & XVID_LUMIMASKING)) { |
if (!(current->global_flags & XVID_LUMIMASKING)) { |
793 |
pMB->dquant = NO_CHANGE; |
pMB->dquant = NO_CHANGE; |
|
pMB->quant = current->quant; |
|
794 |
} else { |
} else { |
795 |
if (pMB->dquant != NO_CHANGE) { |
if (pMB->dquant != NO_CHANGE) { |
796 |
quant += DQtab[pMB->dquant]; |
quant += DQtab[pMB->dquant]; |
797 |
if (quant > 31) quant = 31; |
if (quant > 31) quant = 31; |
798 |
else if (quant < 1) quant = 1; |
else if (quant < 1) quant = 1; |
799 |
} |
} |
|
pMB->quant = quant; |
|
800 |
} |
} |
801 |
|
pMB->quant = current->quant; |
802 |
|
|
803 |
//initial skip decision |
//initial skip decision |
804 |
/* no early skip for GMC (global vector = skip vector is unknown!) */ |
/* no early skip for GMC (global vector = skip vector is unknown!) */ |
805 |
if (current->coding_type == P_VOP) { /* no fast SKIP for S(GMC)-VOPs */ |
if (!(current->global_flags & XVID_GMC)) { /* no fast SKIP for S(GMC)-VOPs */ |
806 |
if (pMB->dquant == NO_CHANGE && sad00 < pMB->quant * INITIAL_SKIP_THRESH * (Data.rrv ? 4:1) ) |
if (pMB->dquant == NO_CHANGE && sad00 < pMB->quant * INITIAL_SKIP_THRESH * (Data.rrv ? 4:1) ) |
807 |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, pParam->edged_width, pMB->quant, Data.rrv)) { |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, iEdgedWidth/2, pMB->quant, Data.rrv)) { |
808 |
SkipMacroblockP(pMB, sad00); |
SkipMacroblockP(pMB, sad00); |
809 |
continue; |
continue; |
810 |
} |
} |
816 |
current->global_flags & XVID_INTER4V, pMB); |
current->global_flags & XVID_INTER4V, pMB); |
817 |
|
|
818 |
/* final skip decision, a.k.a. "the vector you found, really that good?" */ |
/* final skip decision, a.k.a. "the vector you found, really that good?" */ |
819 |
if (current->coding_type == P_VOP) { |
if (!(current->global_flags & XVID_GMC)) { |
820 |
if ( (pMB->dquant == NO_CHANGE) && (sad00 < pMB->quant * MAX_SAD00_FOR_SKIP) |
if ( (pMB->dquant == NO_CHANGE) && (sad00 < pMB->quant * MAX_SAD00_FOR_SKIP) |
821 |
&& ((100*pMB->sad16)/(sad00+1) > FINAL_SKIP_THRESH * (Data.rrv ? 4:1)) ) |
&& ((100*pMB->sad16)/(sad00+1) > FINAL_SKIP_THRESH * (Data.rrv ? 4:1)) ) |
822 |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, pParam->edged_width, pMB->quant, Data.rrv)) { |
if (Data.chroma || SkipDecisionP(pCurrent, pRef, x, y, iEdgedWidth/2, pMB->quant, Data.rrv)) { |
823 |
SkipMacroblockP(pMB, sad00); |
SkipMacroblockP(pMB, sad00); |
824 |
continue; |
continue; |
825 |
} |
} |
835 |
if ((pMB - 1)->mode == MODE_INTRA ) InterBias -= 80; |
if ((pMB - 1)->mode == MODE_INTRA ) InterBias -= 80; |
836 |
|
|
837 |
if (Data.chroma) InterBias += 50; // to compensate bigger SAD |
if (Data.chroma) InterBias += 50; // to compensate bigger SAD |
838 |
if (Data.rrv) InterBias *= 4; //?? |
if (Data.rrv) InterBias *= 4; |
839 |
|
|
840 |
if (InterBias < pMB->sad16) { |
if (InterBias < pMB->sad16) { |
841 |
int32_t deviation; |
int32_t deviation; |
842 |
if (Data.rrv) { |
if (!Data.rrv) |
843 |
deviation = dev16(pCurrent->y + (x + y * pParam->edged_width) * 32, |
deviation = dev16(pCurrent->y + (x + y * iEdgedWidth) * 16, iEdgedWidth); |
844 |
pParam->edged_width) |
else { |
845 |
+ dev16(pCurrent->y + (x + y * pParam->edged_width) * 32 + 16, |
deviation = dev16(pCurrent->y + (x + y * iEdgedWidth) * 32, iEdgedWidth) |
846 |
pParam->edged_width) |
+ dev16(pCurrent->y + (x + y * iEdgedWidth) * 32 + 16, iEdgedWidth) |
847 |
+ dev16(pCurrent->y + (x + y * pParam->edged_width) * 32 + 16 * pParam->edged_width, |
+ dev16(pCurrent->y + (x + y * iEdgedWidth) * 32 + 16 * iEdgedWidth, iEdgedWidth) |
848 |
pParam->edged_width) |
+ dev16(pCurrent->y + (x + y * iEdgedWidth) * 32 + 16 * (iEdgedWidth+1), iEdgedWidth); |
849 |
+ dev16(pCurrent->y + (x + y * pParam->edged_width) * 32 + 16 * (pParam->edged_width+1), |
} |
|
pParam->edged_width); |
|
|
} else |
|
|
deviation = dev16(pCurrent->y + (x + y * pParam->edged_width) * 16, |
|
|
pParam->edged_width); |
|
|
|
|
850 |
if (deviation < (pMB->sad16 - InterBias)) { |
if (deviation < (pMB->sad16 - InterBias)) { |
851 |
if (++iIntra >= iLimit) { free(qimage); return 1; } |
if (++iIntra >= iLimit) return 1; |
852 |
|
SkipMacroblockP(pMB, 0); //same thing |
853 |
pMB->mode = MODE_INTRA; |
pMB->mode = MODE_INTRA; |
|
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = |
|
|
pMB->mvs[3] = zeroMV; |
|
|
pMB->qmvs[0] = pMB->qmvs[1] = pMB->qmvs[2] = |
|
|
pMB->qmvs[3] = zeroMV; |
|
|
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = pMB->sad8[2] = |
|
|
pMB->sad8[3] = 0; |
|
854 |
} |
} |
855 |
} |
} |
856 |
} |
} |
857 |
} |
} |
|
free(qimage); |
|
858 |
|
|
859 |
if (current->coding_type == S_VOP) /* first GMC step only for S(GMC)-VOPs */ |
if (current->global_flags & XVID_GMC ) /* GMC only for S(GMC)-VOPs */ |
860 |
current->GMC_MV = GlobalMotionEst( pMBs, pParam, current->fcode ); |
{ |
861 |
else |
current->warp = GlobalMotionEst( pMBs, pParam, current, reference, pRefH, pRefV, pRefHV); |
862 |
current->GMC_MV = zeroMV; |
} |
863 |
|
|
864 |
return 0; |
return 0; |
865 |
} |
} |
866 |
|
|
867 |
|
|
|
#define PMV_HALFPEL16 (PMV_HALFPELDIAMOND16|PMV_HALFPELREFINE16) |
|
|
|
|
868 |
static __inline int |
static __inline int |
869 |
make_mask(const VECTOR * const pmv, const int i) |
make_mask(const VECTOR * const pmv, const int i) |
870 |
{ |
{ |
872 |
for (j = 0; j < i; j++) { |
for (j = 0; j < i; j++) { |
873 |
if (MVequal(pmv[i], pmv[j])) return 0; // same vector has been checked already |
if (MVequal(pmv[i], pmv[j])) return 0; // same vector has been checked already |
874 |
if (pmv[i].x == pmv[j].x) { |
if (pmv[i].x == pmv[j].x) { |
875 |
if (pmv[i].y == pmv[j].y + iDiamondSize) { mask &= ~4; continue; } |
if (pmv[i].y == pmv[j].y + iDiamondSize) mask &= ~4; |
876 |
if (pmv[i].y == pmv[j].y - iDiamondSize) { mask &= ~8; continue; } |
else if (pmv[i].y == pmv[j].y - iDiamondSize) mask &= ~8; |
877 |
} else |
} else |
878 |
if (pmv[i].y == pmv[j].y) { |
if (pmv[i].y == pmv[j].y) { |
879 |
if (pmv[i].x == pmv[j].x + iDiamondSize) { mask &= ~1; continue; } |
if (pmv[i].x == pmv[j].x + iDiamondSize) mask &= ~1; |
880 |
if (pmv[i].x == pmv[j].x - iDiamondSize) { mask &= ~2; continue; } |
else if (pmv[i].x == pmv[j].x - iDiamondSize) mask &= ~2; |
881 |
} |
} |
882 |
} |
} |
883 |
return mask; |
return mask; |
903 |
else pmv[4].x = pmv[4].y = 0; |
else pmv[4].x = pmv[4].y = 0; |
904 |
|
|
905 |
// [1] median prediction |
// [1] median prediction |
906 |
if (rrv) { //median is in halfzero-precision |
pmv[1].x = EVEN(pmv[0].x); pmv[1].y = EVEN(pmv[0].y); |
|
pmv[1].x = RRV_MV_SCALEUP(pmv[0].x); |
|
|
pmv[1].y = RRV_MV_SCALEUP(pmv[0].y); |
|
|
} else { pmv[1].x = EVEN(pmv[0].x); pmv[1].y = EVEN(pmv[0].y); } |
|
907 |
|
|
908 |
pmv[0].x = pmv[0].y = 0; // [0] is zero; not used in the loop (checked before) but needed here for make_mask |
pmv[0].x = pmv[0].y = 0; // [0] is zero; not used in the loop (checked before) but needed here for make_mask |
909 |
|
|
918 |
if (rrv) { |
if (rrv) { |
919 |
int i; |
int i; |
920 |
for (i = 0; i < 7; i++) { |
for (i = 0; i < 7; i++) { |
921 |
pmv[i].x = RRV_MV_SCALEDOWN(pmv[i].x); |
pmv[i].x = RRV_MV_SCALEUP(pmv[i].x); |
922 |
pmv[i].x = RRV_MV_SCALEUP(pmv[i].x); // a trick |
pmv[i].y = RRV_MV_SCALEUP(pmv[i].y); |
923 |
} |
} |
924 |
} |
} |
925 |
} |
} |
948 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
949 |
pParam->width, pParam->height, Data->iFcode - Data->qpel, 0, Data->rrv); |
pParam->width, pParam->height, Data->iFcode - Data->qpel, 0, Data->rrv); |
950 |
|
|
951 |
get_pmvdata2(pMBs, pParam->mb_width, 0, x, y, 0, pmv, Data->temp); //has to be changed to get_pmv(2)() |
get_pmvdata2(pMBs, pParam->mb_width, 0, x, y, 0, pmv, Data->temp); |
952 |
|
|
953 |
Data->temp[5] = Data->temp[7] = 256*4096; // to reset chroma-sad cache |
Data->temp[5] = Data->temp[6] = 0; // chroma-sad cache |
954 |
if (Data->rrv) i = 2; else i = 1; |
i = Data->rrv ? 2 : 1; |
955 |
Data->Cur = pCur->y + (x + y * Data->iEdgedWidth) * 16*i; |
Data->Cur = pCur->y + (x + y * Data->iEdgedWidth) * 16*i; |
956 |
Data->CurV = pCur->v + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
Data->CurV = pCur->v + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
957 |
Data->CurU = pCur->u + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
Data->CurU = pCur->u + (x + y * (Data->iEdgedWidth/2)) * 8*i; |
972 |
for(i = 0; i < 5; i++) |
for(i = 0; i < 5; i++) |
973 |
Data->currentMV[i].x = Data->currentMV[i].y = 0; |
Data->currentMV[i].x = Data->currentMV[i].y = 0; |
974 |
|
|
975 |
if (pParam->m_quarterpel) Data->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
if (Data->qpel) Data->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
976 |
else Data->predMV = pmv[0]; |
else Data->predMV = pmv[0]; |
977 |
|
|
978 |
i = d_mv_bits(Data->predMV.x, Data->predMV.y, Data->iFcode, 0, 0); |
i = d_mv_bits(0, 0, Data->predMV, Data->iFcode, 0, 0); |
979 |
Data->iMinSAD[0] = pMB->sad16 + (Data->lambda16 * i * pMB->sad16)/1000; |
Data->iMinSAD[0] = pMB->sad16 + ((Data->lambda16 * i * pMB->sad16)>>10); |
980 |
Data->iMinSAD[1] = pMB->sad8[0] + (Data->lambda8 * i * (pMB->sad8[0]+NEIGH_8X8_BIAS))/100; |
Data->iMinSAD[1] = pMB->sad8[0] + ((Data->lambda8 * i * (pMB->sad8[0]+NEIGH_8X8_BIAS)) >> 10); |
981 |
Data->iMinSAD[2] = pMB->sad8[1]; |
Data->iMinSAD[2] = pMB->sad8[1]; |
982 |
Data->iMinSAD[3] = pMB->sad8[2]; |
Data->iMinSAD[3] = pMB->sad8[2]; |
983 |
Data->iMinSAD[4] = pMB->sad8[3]; |
Data->iMinSAD[4] = pMB->sad8[3]; |
984 |
|
|
985 |
if ((x == 0) && (y == 0)) threshA = 512; |
if (x | y) { |
|
else { |
|
986 |
threshA = Data->temp[0]; // that's when we keep this SAD atm |
threshA = Data->temp[0]; // that's when we keep this SAD atm |
987 |
if (threshA < 512) threshA = 512; |
if (threshA < 512) threshA = 512; |
988 |
if (threshA > 1024) threshA = 1024; } |
else if (threshA > 1024) threshA = 1024; |
989 |
|
} else threshA = 512; |
990 |
|
|
991 |
PreparePredictionsP(pmv, x, y, pParam->mb_width, pParam->mb_height, |
PreparePredictionsP(pmv, x, y, pParam->mb_width, pParam->mb_height, |
992 |
prevMBs + x + y * pParam->mb_width, Data->rrv); |
prevMBs + x + y * pParam->mb_width, Data->rrv); |
993 |
|
|
994 |
if (Data->rrv) CheckCandidate = CheckCandidate32; |
if (!Data->rrv) { |
995 |
else if (inter4v || Data->chroma) CheckCandidate = CheckCandidate16; |
if (inter4v | Data->chroma) CheckCandidate = CheckCandidate16; |
996 |
else CheckCandidate = CheckCandidate16no4v; //for extra speed |
else CheckCandidate = CheckCandidate16no4v; //for extra speed |
997 |
|
} else CheckCandidate = CheckCandidate32; |
998 |
|
|
999 |
/* main loop. checking all predictions */ |
/* main loop. checking all predictions (but first, which is 0,0 and has been checked in MotionEstimation())*/ |
1000 |
|
|
1001 |
for (i = 1; i < 7; i++) { |
for (i = 1; i < 7; i++) { |
1002 |
if (!(mask = make_mask(pmv, i)) ) continue; |
if (!(mask = make_mask(pmv, i)) ) continue; |
1006 |
|
|
1007 |
if ((Data->iMinSAD[0] <= threshA) || |
if ((Data->iMinSAD[0] <= threshA) || |
1008 |
(MVequal(Data->currentMV[0], (prevMBs+x+y*pParam->mb_width)->mvs[0]) && |
(MVequal(Data->currentMV[0], (prevMBs+x+y*pParam->mb_width)->mvs[0]) && |
1009 |
(Data->iMinSAD[0] < (prevMBs+x+y*pParam->mb_width)->sad16))) { |
(Data->iMinSAD[0] < (prevMBs+x+y*pParam->mb_width)->sad16))) |
1010 |
inter4v = 0; |
inter4v = 0; |
1011 |
} else { |
else { |
1012 |
|
|
1013 |
MainSearchFunc * MainSearchPtr; |
MainSearchFunc * MainSearchPtr; |
1014 |
if (MotionFlags & PMV_USESQUARES16) MainSearchPtr = SquareSearch; |
if (MotionFlags & PMV_USESQUARES16) MainSearchPtr = SquareSearch; |
1041 |
} |
} |
1042 |
|
|
1043 |
backupMV = Data->currentMV[0]; |
backupMV = Data->currentMV[0]; |
1044 |
if (!MotionFlags & PMV_HALFPELREFINE16 || Data->rrv) startMV.x = startMV.y = 0; |
if (MotionFlags & PMV_HALFPELREFINE16 && !Data->rrv) startMV.x = startMV.y = 1; |
1045 |
else startMV.x = startMV.y = 1; |
else startMV.x = startMV.y = 0; |
1046 |
if (!(MVequal(startMV, backupMV))) { |
if (!(MVequal(startMV, backupMV))) { |
1047 |
bSAD = Data->iMinSAD[0]; Data->iMinSAD[0] = MV_MAX_ERROR; |
bSAD = Data->iMinSAD[0]; Data->iMinSAD[0] = MV_MAX_ERROR; |
1048 |
|
|
1062 |
Data->currentQMV[i].y = 2 * Data->currentMV[i].y; |
Data->currentQMV[i].y = 2 * Data->currentMV[i].y; |
1063 |
} |
} |
1064 |
|
|
1065 |
if((!Data->rrv) && (pParam->m_quarterpel) && (MotionFlags & PMV_QUARTERPELREFINE16)) { |
if (Data->qpel && MotionFlags & PMV_QUARTERPELREFINE16) { |
|
|
|
1066 |
Data->qpel_precision = 1; |
Data->qpel_precision = 1; |
1067 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
1068 |
pParam->width, pParam->height, Data->iFcode, 1, 0); |
pParam->width, pParam->height, Data->iFcode, 1, 0); |
1073 |
if (Data->iMinSAD[0] < (int32_t)iQuant * 30 ) inter4v = 0; |
if (Data->iMinSAD[0] < (int32_t)iQuant * 30 ) inter4v = 0; |
1074 |
if (inter4v) { |
if (inter4v) { |
1075 |
SearchData Data8; |
SearchData Data8; |
1076 |
Data8.iFcode = Data->iFcode; |
memcpy(&Data8, Data, sizeof(SearchData)); //quick copy of common data |
1077 |
Data8.lambda8 = Data->lambda8; |
|
|
Data8.iEdgedWidth = Data->iEdgedWidth; |
|
|
Data8.RefQ = Data->RefQ; |
|
|
Data8.qpel = Data->qpel; |
|
|
Data8.rrv = Data->rrv; |
|
1078 |
Search8(Data, 2*x, 2*y, MotionFlags, pParam, pMB, pMBs, 0, &Data8); |
Search8(Data, 2*x, 2*y, MotionFlags, pParam, pMB, pMBs, 0, &Data8); |
1079 |
Search8(Data, 2*x + 1, 2*y, MotionFlags, pParam, pMB, pMBs, 1, &Data8); |
Search8(Data, 2*x + 1, 2*y, MotionFlags, pParam, pMB, pMBs, 1, &Data8); |
1080 |
Search8(Data, 2*x, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 2, &Data8); |
Search8(Data, 2*x, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 2, &Data8); |
1081 |
Search8(Data, 2*x + 1, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 3, &Data8); |
Search8(Data, 2*x + 1, 2*y + 1, MotionFlags, pParam, pMB, pMBs, 3, &Data8); |
1082 |
|
|
1083 |
if (Data->chroma) { |
if (Data->chroma) { |
1084 |
int sumx, sumy, dx, dy; |
int sumx = 0, sumy = 0; |
1085 |
|
const int div = 1 + Data->qpel; |
1086 |
|
const VECTOR * const mv = Data->qpel ? pMB->qmvs : pMB->mvs; |
1087 |
|
|
1088 |
if(pParam->m_quarterpel) { |
for (i = 0; i < 4; i++) { |
1089 |
sumx= pMB->qmvs[0].x/2 + pMB->qmvs[1].x/2 + pMB->qmvs[2].x/2 + pMB->qmvs[3].x/2; |
sumx += mv[i].x / div; |
1090 |
sumy = pMB->qmvs[0].y/2 + pMB->qmvs[1].y/2 + pMB->qmvs[2].y/2 + pMB->qmvs[3].y/2; |
sumy += mv[i].y / div; |
|
} else { |
|
|
sumx = pMB->mvs[0].x + pMB->mvs[1].x + pMB->mvs[2].x + pMB->mvs[3].x; |
|
|
sumy = pMB->mvs[0].y + pMB->mvs[1].y + pMB->mvs[2].y + pMB->mvs[3].y; |
|
1091 |
} |
} |
|
dx = (sumx >> 3) + roundtab_76[sumx & 0xf]; |
|
|
dy = (sumy >> 3) + roundtab_76[sumy & 0xf]; |
|
1092 |
|
|
1093 |
Data->iMinSAD[1] += ChromaSAD(dx, dy, Data); |
Data->iMinSAD[1] += ChromaSAD( (sumx >> 3) + roundtab_76[sumx & 0xf], |
1094 |
|
(sumy >> 3) + roundtab_76[sumy & 0xf], Data); |
1095 |
} |
} |
1096 |
} |
} |
1097 |
|
|
1099 |
Data->currentMV[0].x = RRV_MV_SCALEDOWN(Data->currentMV[0].x); |
Data->currentMV[0].x = RRV_MV_SCALEDOWN(Data->currentMV[0].x); |
1100 |
Data->currentMV[0].y = RRV_MV_SCALEDOWN(Data->currentMV[0].y); |
Data->currentMV[0].y = RRV_MV_SCALEDOWN(Data->currentMV[0].y); |
1101 |
} |
} |
1102 |
|
|
1103 |
if (!(inter4v) || |
if (!(inter4v) || |
1104 |
(Data->iMinSAD[0] < Data->iMinSAD[1] + Data->iMinSAD[2] + |
(Data->iMinSAD[0] < Data->iMinSAD[1] + Data->iMinSAD[2] + |
1105 |
Data->iMinSAD[3] + Data->iMinSAD[4] + IMV16X16 * (int32_t)iQuant )) { |
Data->iMinSAD[3] + Data->iMinSAD[4] + IMV16X16 * (int32_t)iQuant )) { |
1106 |
// INTER MODE |
// INTER MODE |
1107 |
pMB->mode = MODE_INTER; |
pMB->mode = MODE_INTER; |
1108 |
pMB->mvs[0] = pMB->mvs[1] |
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = Data->currentMV[0]; |
1109 |
= pMB->mvs[2] = pMB->mvs[3] = Data->currentMV[0]; |
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = pMB->sad8[2] = pMB->sad8[3] = Data->iMinSAD[0]; |
|
|
|
|
pMB->sad16 = pMB->sad8[0] = pMB->sad8[1] = |
|
|
pMB->sad8[2] = pMB->sad8[3] = Data->iMinSAD[0]; |
|
1110 |
|
|
1111 |
if(pParam->m_quarterpel) { |
if(Data->qpel) { |
1112 |
pMB->qmvs[0] = pMB->qmvs[1] |
pMB->qmvs[0] = pMB->qmvs[1] |
1113 |
= pMB->qmvs[2] = pMB->qmvs[3] = Data->currentQMV[0]; |
= pMB->qmvs[2] = pMB->qmvs[3] = Data->currentQMV[0]; |
1114 |
pMB->pmvs[0].x = Data->currentQMV[0].x - Data->predMV.x; |
pMB->pmvs[0].x = Data->currentQMV[0].x - Data->predMV.x; |
1140 |
Data->currentMV = OldData->currentMV + 1 + block; |
Data->currentMV = OldData->currentMV + 1 + block; |
1141 |
Data->currentQMV = OldData->currentQMV + 1 + block; |
Data->currentQMV = OldData->currentQMV + 1 + block; |
1142 |
|
|
1143 |
if(pParam->m_quarterpel) { |
if(Data->qpel) { |
1144 |
Data->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x/2, y/2, block); |
Data->predMV = get_qpmv2(pMBs, pParam->mb_width, 0, x/2, y/2, block); |
1145 |
if (block != 0) i = d_mv_bits( Data->currentQMV->x - Data->predMV.x, |
if (block != 0) i = d_mv_bits( Data->currentQMV->x, Data->currentQMV->y, |
1146 |
Data->currentQMV->y - Data->predMV.y, Data->iFcode, 0, 0); |
Data->predMV, Data->iFcode, 0, 0); |
|
|
|
1147 |
} else { |
} else { |
1148 |
Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x/2, y/2, block); |
Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x/2, y/2, block); |
1149 |
if (block != 0) { |
if (block != 0) i = d_mv_bits( Data->currentMV->x, Data->currentMV->y, |
1150 |
if (block != 0) i = d_mv_bits( Data->currentMV->x - Data->predMV.x, |
Data->predMV, Data->iFcode, 0, Data->rrv); |
|
Data->currentMV->y - Data->predMV.y, Data->iFcode, 0, Data->rrv); |
|
|
} |
|
1151 |
} |
} |
1152 |
|
|
1153 |
*(Data->iMinSAD) += (Data->lambda8 * i * (*Data->iMinSAD + NEIGH_8X8_BIAS))/100; |
*(Data->iMinSAD) += (Data->lambda8 * i * (*Data->iMinSAD + NEIGH_8X8_BIAS))>>10; |
1154 |
|
|
1155 |
if (MotionFlags & (PMV_EXTSEARCH8|PMV_HALFPELREFINE8)) { |
if (MotionFlags & (PMV_EXTSEARCH8|PMV_HALFPELREFINE8|PMV_QUARTERPELREFINE8)) { |
1156 |
if (Data->rrv) i = 2; else i = 1; |
if (Data->rrv) i = 2; else i = 1; |
1157 |
|
|
1158 |
Data->Ref = OldData->Ref + i*8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->Ref = OldData->Ref + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1159 |
Data->RefH = OldData->RefH + i*8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->RefH = OldData->RefH + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1160 |
Data->RefV = OldData->RefV + i*8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->RefV = OldData->RefV + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1161 |
Data->RefHV = OldData->RefHV + i*8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->RefHV = OldData->RefHV + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1162 |
|
|
1163 |
Data->Cur = OldData->Cur + i*8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->Cur = OldData->Cur + i * 8 * ((block&1) + Data->iEdgedWidth*(block>>1)); |
1164 |
Data->qpel_precision = 0; |
Data->qpel_precision = 0; |
1165 |
|
|
1166 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
1167 |
pParam->width, pParam->height, OldData->iFcode - Data->qpel, 0, Data->rrv); |
pParam->width, pParam->height, Data->iFcode - Data->qpel, 0, Data->rrv); |
1168 |
|
|
1169 |
if (Data->rrv) CheckCandidate = CheckCandidate16no4v; |
if (!Data->rrv) CheckCandidate = CheckCandidate8; |
1170 |
else CheckCandidate = CheckCandidate8; |
else CheckCandidate = CheckCandidate16no4v; |
1171 |
|
|
1172 |
if (MotionFlags & PMV_EXTSEARCH8) { |
if (MotionFlags & PMV_EXTSEARCH8) { |
1173 |
int32_t temp_sad = *(Data->iMinSAD); // store current MinSAD |
int32_t temp_sad = *(Data->iMinSAD); // store current MinSAD |
1196 |
} |
} |
1197 |
} |
} |
1198 |
|
|
1199 |
if(!Data->rrv && Data->qpel) { |
if (Data->qpel && MotionFlags & PMV_QUARTERPELREFINE8) { |
|
if((!(Data->currentQMV->x & 1)) && (!(Data->currentQMV->y & 1)) && |
|
|
(MotionFlags & PMV_QUARTERPELREFINE8)) { |
|
1200 |
Data->qpel_precision = 1; |
Data->qpel_precision = 1; |
1201 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
1202 |
pParam->width, pParam->height, OldData->iFcode, 1, 0); |
pParam->width, pParam->height, Data->iFcode, 1, 0); |
1203 |
SubpelRefine(Data); |
SubpelRefine(Data); |
1204 |
} |
} |
1205 |
} |
} |
|
} |
|
1206 |
|
|
1207 |
if (Data->rrv) { |
if (Data->rrv) { |
1208 |
Data->currentMV->x = RRV_MV_SCALEDOWN(Data->currentMV->x); |
Data->currentMV->x = RRV_MV_SCALEDOWN(Data->currentMV->x); |
1212 |
if(Data->qpel) { |
if(Data->qpel) { |
1213 |
pMB->pmvs[block].x = Data->currentQMV->x - Data->predMV.x; |
pMB->pmvs[block].x = Data->currentQMV->x - Data->predMV.x; |
1214 |
pMB->pmvs[block].y = Data->currentQMV->y - Data->predMV.y; |
pMB->pmvs[block].y = Data->currentQMV->y - Data->predMV.y; |
1215 |
pMB->qmvs[block] = *(Data->currentQMV); |
pMB->qmvs[block] = *Data->currentQMV; |
1216 |
} else { |
} else { |
1217 |
pMB->pmvs[block].x = Data->currentMV->x - Data->predMV.x; |
pMB->pmvs[block].x = Data->currentMV->x - Data->predMV.x; |
1218 |
pMB->pmvs[block].y = Data->currentMV->y - Data->predMV.y; |
pMB->pmvs[block].y = Data->currentMV->y - Data->predMV.y; |
1219 |
} |
} |
1220 |
|
|
1221 |
pMB->mvs[block] = *(Data->currentMV); |
pMB->mvs[block] = *Data->currentMV; |
1222 |
pMB->sad8[block] = 4 * (*Data->iMinSAD); |
pMB->sad8[block] = 4 * *Data->iMinSAD; |
1223 |
} |
} |
1224 |
|
|
1225 |
/* B-frames code starts here */ |
/* motion estimation for B-frames */ |
1226 |
|
|
1227 |
static __inline VECTOR |
static __inline VECTOR |
1228 |
ChoosePred(const MACROBLOCK * const pMB, const uint32_t mode) |
ChoosePred(const MACROBLOCK * const pMB, const uint32_t mode) |
1229 |
{ |
{ |
1230 |
/* the stupidiest function ever */ |
/* the stupidiest function ever */ |
1231 |
if (mode == MODE_FORWARD) return pMB->mvs[0]; |
return (mode == MODE_FORWARD ? pMB->mvs[0] : pMB->b_mvs[0]); |
|
else return pMB->b_mvs[0]; |
|
1232 |
} |
} |
1233 |
|
|
1234 |
static void __inline |
static void __inline |
1261 |
pmv[5].x = EVEN(pmv[5].x); pmv[5].y = EVEN(pmv[5].y); |
pmv[5].x = EVEN(pmv[5].x); pmv[5].y = EVEN(pmv[5].y); |
1262 |
} else pmv[5].x = pmv[5].y = 0; |
} else pmv[5].x = pmv[5].y = 0; |
1263 |
|
|
1264 |
if ((x != 0)&&(y != 0)) { |
if (x != 0 && y != 0) { |
1265 |
pmv[6] = ChoosePred(pMB-1-iWcount, mode_curr); |
pmv[6] = ChoosePred(pMB-1-iWcount, mode_curr); |
1266 |
pmv[6].x = EVEN(pmv[5].x); pmv[5].y = EVEN(pmv[5].y); |
pmv[6].x = EVEN(pmv[6].x); pmv[6].y = EVEN(pmv[6].y); |
1267 |
} else pmv[6].x = pmv[6].y = 0; |
} else pmv[6].x = pmv[6].y = 0; |
|
|
|
|
// more? |
|
1268 |
} |
} |
1269 |
|
|
1270 |
|
|
1271 |
/* search backward or forward, for b-frames */ |
/* search backward or forward */ |
1272 |
static void |
static void |
1273 |
SearchBF( const uint8_t * const pRef, |
SearchBF( const uint8_t * const pRef, |
1274 |
const uint8_t * const pRefH, |
const uint8_t * const pRefH, |
1286 |
SearchData * const Data) |
SearchData * const Data) |
1287 |
{ |
{ |
1288 |
|
|
1289 |
const int32_t iEdgedWidth = pParam->edged_width; |
int i, iDirection = 255, mask; |
|
|
|
|
int i, iDirection, mask; |
|
1290 |
VECTOR pmv[7]; |
VECTOR pmv[7]; |
1291 |
MainSearchFunc *MainSearchPtr; |
MainSearchFunc *MainSearchPtr; |
1292 |
*Data->iMinSAD = MV_MAX_ERROR; |
*Data->iMinSAD = MV_MAX_ERROR; |
1293 |
Data->iFcode = iFcode; |
Data->iFcode = iFcode; |
1294 |
Data->qpel_precision = 0; |
Data->qpel_precision = 0; |
1295 |
|
|
1296 |
Data->Ref = pRef + (x + y * iEdgedWidth) * 16; |
Data->Ref = pRef + (x + y * Data->iEdgedWidth) * 16; |
1297 |
Data->RefH = pRefH + (x + y * iEdgedWidth) * 16; |
Data->RefH = pRefH + (x + y * Data->iEdgedWidth) * 16; |
1298 |
Data->RefV = pRefV + (x + y * iEdgedWidth) * 16; |
Data->RefV = pRefV + (x + y * Data->iEdgedWidth) * 16; |
1299 |
Data->RefHV = pRefHV + (x + y * iEdgedWidth) * 16; |
Data->RefHV = pRefHV + (x + y * Data->iEdgedWidth) * 16; |
1300 |
|
|
1301 |
Data->predMV = *predMV; |
Data->predMV = *predMV; |
1302 |
|
|
1305 |
|
|
1306 |
pmv[0] = Data->predMV; |
pmv[0] = Data->predMV; |
1307 |
if (Data->qpel) { pmv[0].x /= 2; pmv[0].y /= 2; } |
if (Data->qpel) { pmv[0].x /= 2; pmv[0].y /= 2; } |
1308 |
|
|
1309 |
PreparePredictionsBF(pmv, x, y, pParam->mb_width, pMB, mode_current); |
PreparePredictionsBF(pmv, x, y, pParam->mb_width, pMB, mode_current); |
1310 |
|
|
1311 |
Data->currentMV->x = Data->currentMV->y = 0; |
Data->currentMV->x = Data->currentMV->y = 0; |
1312 |
CheckCandidate = CheckCandidate16no4v; |
CheckCandidate = CheckCandidate16no4v; |
1313 |
|
|
1314 |
// main loop. checking all predictions |
// main loop. checking all predictions |
1315 |
for (i = 0; i < 8; i++) { |
for (i = 0; i < 7; i++) { |
1316 |
if (!(mask = make_mask(pmv, i)) ) continue; |
if (!(mask = make_mask(pmv, i)) ) continue; |
1317 |
CheckCandidate16no4v(pmv[i].x, pmv[i].y, mask, &iDirection, Data); |
CheckCandidate16no4v(pmv[i].x, pmv[i].y, mask, &iDirection, Data); |
1318 |
} |
} |
1319 |
|
|
1320 |
if (MotionFlags & PMV_USESQUARES16) |
if (MotionFlags & PMV_USESQUARES16) MainSearchPtr = SquareSearch; |
1321 |
MainSearchPtr = SquareSearch; |
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) MainSearchPtr = AdvDiamondSearch; |
|
else if (MotionFlags & PMV_ADVANCEDDIAMOND16) |
|
|
MainSearchPtr = AdvDiamondSearch; |
|
1322 |
else MainSearchPtr = DiamondSearch; |
else MainSearchPtr = DiamondSearch; |
1323 |
|
|
1324 |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, 255); |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, iDirection); |
1325 |
|
|
1326 |
SubpelRefine(Data); |
SubpelRefine(Data); |
1327 |
|
|
1328 |
if (Data->qpel) { |
if (Data->qpel && *Data->iMinSAD < *best_sad + 300) { |
1329 |
Data->currentQMV->x = 2*Data->currentMV->x; |
Data->currentQMV->x = 2*Data->currentMV->x; |
1330 |
Data->currentQMV->y = 2*Data->currentMV->y; |
Data->currentQMV->y = 2*Data->currentMV->y; |
1331 |
Data->qpel_precision = 1; |
Data->qpel_precision = 1; |
1335 |
} |
} |
1336 |
|
|
1337 |
// three bits are needed to code backward mode. four for forward |
// three bits are needed to code backward mode. four for forward |
1338 |
// we treat the bits just like they were vector's |
|
1339 |
if (mode_current == MODE_FORWARD) *Data->iMinSAD += 4 * Data->lambda16; |
if (mode_current == MODE_FORWARD) *Data->iMinSAD += 4 * Data->lambda16; |
1340 |
else *Data->iMinSAD += 3 * Data->lambda16; |
else *Data->iMinSAD += 3 * Data->lambda16; |
1341 |
|
|
1353 |
pMB->pmvs[0].x = Data->currentMV->x - predMV->x; |
pMB->pmvs[0].x = Data->currentMV->x - predMV->x; |
1354 |
pMB->pmvs[0].y = Data->currentMV->y - predMV->y; |
pMB->pmvs[0].y = Data->currentMV->y - predMV->y; |
1355 |
} |
} |
1356 |
if (mode_current == MODE_FORWARD) |
if (mode_current == MODE_FORWARD) pMB->mvs[0] = *Data->currentMV; |
1357 |
pMB->mvs[0] = *(Data->currentMV+2) = *Data->currentMV; |
else pMB->b_mvs[0] = *Data->currentMV; |
|
else |
|
|
pMB->b_mvs[0] = *(Data->currentMV+1) = *Data->currentMV; //we store currmv for interpolate search |
|
|
|
|
1358 |
} |
} |
1359 |
|
|
1360 |
|
if (mode_current == MODE_FORWARD) *(Data->currentMV+2) = *Data->currentMV; |
1361 |
|
else *(Data->currentMV+1) = *Data->currentMV; //we store currmv for interpolate search |
1362 |
} |
} |
1363 |
|
|
1364 |
static void |
static void |
1366 |
const IMAGE * const f_Ref, |
const IMAGE * const f_Ref, |
1367 |
const IMAGE * const b_Ref, |
const IMAGE * const b_Ref, |
1368 |
MACROBLOCK * const pMB, |
MACROBLOCK * const pMB, |
|
const uint32_t quant, |
|
1369 |
const uint32_t x, const uint32_t y, |
const uint32_t x, const uint32_t y, |
1370 |
const SearchData * const Data) |
const SearchData * const Data) |
1371 |
{ |
{ |
1372 |
int dx, dy, b_dx, b_dy; |
int dx = 0, dy = 0, b_dx = 0, b_dy = 0; |
1373 |
uint32_t sum; |
int32_t sum; |
1374 |
|
const int div = 1 + Data->qpel; |
1375 |
|
int k; |
1376 |
|
const uint32_t stride = Data->iEdgedWidth/2; |
1377 |
//this is not full chroma compensation, only it's fullpel approximation. should work though |
//this is not full chroma compensation, only it's fullpel approximation. should work though |
|
if (Data->qpel) { |
|
|
dy = Data->directmvF[0].y/2 + Data->directmvF[1].y/2 + |
|
|
Data->directmvF[2].y/2 + Data->directmvF[3].y/2; |
|
|
|
|
|
dx = Data->directmvF[0].x/2 + Data->directmvF[1].x/2 + |
|
|
Data->directmvF[2].x/2 + Data->directmvF[3].x/2; |
|
|
|
|
|
b_dy = Data->directmvB[0].y/2 + Data->directmvB[1].y/2 + |
|
|
Data->directmvB[2].y/2 + Data->directmvB[3].y/2; |
|
|
|
|
|
b_dx = Data->directmvB[0].x/2 + Data->directmvB[1].x/2 + |
|
|
Data->directmvB[2].x/2 + Data->directmvB[3].x/2; |
|
|
|
|
|
} else { |
|
|
dy = Data->directmvF[0].y + Data->directmvF[1].y + |
|
|
Data->directmvF[2].y + Data->directmvF[3].y; |
|
|
|
|
|
dx = Data->directmvF[0].x + Data->directmvF[1].x + |
|
|
Data->directmvF[2].x + Data->directmvF[3].x; |
|
|
|
|
|
b_dy = Data->directmvB[0].y + Data->directmvB[1].y + |
|
|
Data->directmvB[2].y + Data->directmvB[3].y; |
|
1378 |
|
|
1379 |
b_dx = Data->directmvB[0].x + Data->directmvB[1].x + |
for (k = 0; k < 4; k++) { |
1380 |
Data->directmvB[2].x + Data->directmvB[3].x; |
dy += Data->directmvF[k].y / div; |
1381 |
|
dx += Data->directmvF[0].x / div; |
1382 |
|
b_dy += Data->directmvB[0].y / div; |
1383 |
|
b_dx += Data->directmvB[0].x / div; |
1384 |
} |
} |
1385 |
|
|
|
|
|
1386 |
dy = (dy >> 3) + roundtab_76[dy & 0xf]; |
dy = (dy >> 3) + roundtab_76[dy & 0xf]; |
1387 |
dx = (dx >> 3) + roundtab_76[dx & 0xf]; |
dx = (dx >> 3) + roundtab_76[dx & 0xf]; |
1388 |
b_dy = (b_dy >> 3) + roundtab_76[b_dy & 0xf]; |
b_dy = (b_dy >> 3) + roundtab_76[b_dy & 0xf]; |
1389 |
b_dx = (b_dx >> 3) + roundtab_76[b_dx & 0xf]; |
b_dx = (b_dx >> 3) + roundtab_76[b_dx & 0xf]; |
1390 |
|
|
1391 |
sum = sad8bi(pCur->u + 8*x + 8*y*(Data->iEdgedWidth/2), |
sum = sad8bi(pCur->u + 8 * x + 8 * y * stride, |
1392 |
f_Ref->u + (y*8 + dy/2) * (Data->iEdgedWidth/2) + x*8 + dx/2, |
f_Ref->u + (y*8 + dy/2) * stride + x*8 + dx/2, |
1393 |
b_Ref->u + (y*8 + b_dy/2) * (Data->iEdgedWidth/2) + x*8 + b_dx/2, |
b_Ref->u + (y*8 + b_dy/2) * stride + x*8 + b_dx/2, |
1394 |
Data->iEdgedWidth/2); |
stride); |
1395 |
sum += sad8bi(pCur->v + 8*x + 8*y*(Data->iEdgedWidth/2), |
|
1396 |
f_Ref->v + (y*8 + dy/2) * (Data->iEdgedWidth/2) + x*8 + dx/2, |
if (sum >= 2 * MAX_CHROMA_SAD_FOR_SKIP * pMB->quant) return; //no skip |
1397 |
b_Ref->v + (y*8 + b_dy/2) * (Data->iEdgedWidth/2) + x*8 + b_dx/2, |
|
1398 |
Data->iEdgedWidth/2); |
sum += sad8bi(pCur->v + 8*x + 8 * y * stride, |
1399 |
|
f_Ref->v + (y*8 + dy/2) * stride + x*8 + dx/2, |
1400 |
|
b_Ref->v + (y*8 + b_dy/2) * stride + x*8 + b_dx/2, |
1401 |
|
stride); |
1402 |
|
|
1403 |
if (sum < 2*MAX_CHROMA_SAD_FOR_SKIP * quant) pMB->mode = MODE_DIRECT_NONE_MV; //skipped |
if (sum < 2 * MAX_CHROMA_SAD_FOR_SKIP * pMB->quant) pMB->mode = MODE_DIRECT_NONE_MV; //skipped |
1404 |
} |
} |
1405 |
|
|
|
|
|
|
|
|
1406 |
static __inline uint32_t |
static __inline uint32_t |
1407 |
SearchDirect(const IMAGE * const f_Ref, |
SearchDirect(const IMAGE * const f_Ref, |
1408 |
const uint8_t * const f_RefH, |
const uint8_t * const f_RefH, |
1424 |
|
|
1425 |
{ |
{ |
1426 |
int32_t skip_sad; |
int32_t skip_sad; |
1427 |
int k; |
int k = (x + Data->iEdgedWidth*y) * 16; |
|
|
|
1428 |
MainSearchFunc *MainSearchPtr; |
MainSearchFunc *MainSearchPtr; |
1429 |
|
|
1430 |
*Data->iMinSAD = 256*4096; |
*Data->iMinSAD = 256*4096; |
1431 |
|
Data->Ref = f_Ref->y + k; |
1432 |
|
Data->RefH = f_RefH + k; |
1433 |
|
Data->RefV = f_RefV + k; |
1434 |
|
Data->RefHV = f_RefHV + k; |
1435 |
|
Data->bRef = b_Ref->y + k; |
1436 |
|
Data->bRefH = b_RefH + k; |
1437 |
|
Data->bRefV = b_RefV + k; |
1438 |
|
Data->bRefHV = b_RefHV + k; |
1439 |
|
|
1440 |
|
k = Data->qpel ? 4 : 2; |
1441 |
|
Data->max_dx = k * (pParam->width - x * 16); |
1442 |
|
Data->max_dy = k * (pParam->height - y * 16); |
1443 |
|
Data->min_dx = -k * (16 + x * 16); |
1444 |
|
Data->min_dy = -k * (16 + y * 16); |
1445 |
|
|
1446 |
Data->Ref = f_Ref->y + (x + Data->iEdgedWidth*y) * 16; |
Data->referencemv = Data->qpel ? b_mb->qmvs : b_mb->mvs; |
1447 |
Data->RefH = f_RefH + (x + Data->iEdgedWidth*y) * 16; |
Data->qpel_precision = 0; |
|
Data->RefV = f_RefV + (x + Data->iEdgedWidth*y) * 16; |
|
|
Data->RefHV = f_RefHV + (x + Data->iEdgedWidth*y) * 16; |
|
|
Data->bRef = b_Ref->y + (x + Data->iEdgedWidth*y) * 16; |
|
|
Data->bRefH = b_RefH + (x + Data->iEdgedWidth*y) * 16; |
|
|
Data->bRefV = b_RefV + (x + Data->iEdgedWidth*y) * 16; |
|
|
Data->bRefHV = b_RefHV + (x + Data->iEdgedWidth*y) * 16; |
|
|
|
|
|
Data->max_dx = 2 * pParam->width - 2 * (x) * 16; |
|
|
Data->max_dy = 2 * pParam->height - 2 * (y) * 16; |
|
|
Data->min_dx = -(2 * 16 + 2 * (x) * 16); |
|
|
Data->min_dy = -(2 * 16 + 2 * (y) * 16); |
|
|
if (Data->qpel) { //we measure in qpixels |
|
|
Data->max_dx *= 2; |
|
|
Data->max_dy *= 2; |
|
|
Data->min_dx *= 2; |
|
|
Data->min_dy *= 2; |
|
|
Data->referencemv = b_mb->qmvs; |
|
|
} else Data->referencemv = b_mb->mvs; |
|
|
Data->qpel_precision = 0; // it's a trick. it's 1 not 0, but we need 0 here |
|
1448 |
|
|
1449 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
1450 |
pMB->mvs[k].x = Data->directmvF[k].x = ((TRB * Data->referencemv[k].x) / TRD); |
pMB->mvs[k].x = Data->directmvF[k].x = ((TRB * Data->referencemv[k].x) / TRD); |
1452 |
pMB->mvs[k].y = Data->directmvF[k].y = ((TRB * Data->referencemv[k].y) / TRD); |
pMB->mvs[k].y = Data->directmvF[k].y = ((TRB * Data->referencemv[k].y) / TRD); |
1453 |
pMB->b_mvs[k].y = Data->directmvB[k].y = ((TRB - TRD) * Data->referencemv[k].y) / TRD; |
pMB->b_mvs[k].y = Data->directmvB[k].y = ((TRB - TRD) * Data->referencemv[k].y) / TRD; |
1454 |
|
|
1455 |
if ( ( pMB->b_mvs[k].x > Data->max_dx ) || ( pMB->b_mvs[k].x < Data->min_dx ) |
if ( (pMB->b_mvs[k].x > Data->max_dx) | (pMB->b_mvs[k].x < Data->min_dx) |
1456 |
|| ( pMB->b_mvs[k].y > Data->max_dy ) || ( pMB->b_mvs[k].y < Data->min_dy )) { |
| (pMB->b_mvs[k].y > Data->max_dy) | (pMB->b_mvs[k].y < Data->min_dy) ) { |
1457 |
|
|
1458 |
*best_sad = 256*4096; // in that case, we won't use direct mode |
*best_sad = 256*4096; // in that case, we won't use direct mode |
1459 |
pMB->mode = MODE_DIRECT; // just to make sure it doesn't say "MODE_DIRECT_NONE_MV" |
pMB->mode = MODE_DIRECT; // just to make sure it doesn't say "MODE_DIRECT_NONE_MV" |
1469 |
} |
} |
1470 |
} |
} |
1471 |
|
|
1472 |
|
CheckCandidate = b_mb->mode == MODE_INTER4V ? CheckCandidateDirect : CheckCandidateDirectno4v; |
|
if (b_mb->mode == MODE_INTER4V) CheckCandidate = CheckCandidateDirect; |
|
|
else CheckCandidate = CheckCandidateDirectno4v; |
|
1473 |
|
|
1474 |
(*CheckCandidate)(0, 0, 255, &k, Data); |
(*CheckCandidate)(0, 0, 255, &k, Data); |
1475 |
|
|
1476 |
// initial (fast) skip decision |
// initial (fast) skip decision |
1477 |
if (*Data->iMinSAD < pMB->quant * INITIAL_SKIP_THRESH*2) { |
if (*Data->iMinSAD < pMB->quant * INITIAL_SKIP_THRESH*2) { |
1478 |
SkipDecisionB(pCur, f_Ref, b_Ref, pMB, x, y, Data->chroma, Data); //possible skip - checking chroma |
//possible skip - checking chroma |
1479 |
|
SkipDecisionB(pCur, f_Ref, b_Ref, pMB, x, y, Data); |
1480 |
if (pMB->mode == MODE_DIRECT_NONE_MV) return *Data->iMinSAD; // skip. |
if (pMB->mode == MODE_DIRECT_NONE_MV) return *Data->iMinSAD; // skip. |
1481 |
} |
} |
1482 |
|
|
1495 |
|
|
1496 |
*best_sad = *Data->iMinSAD; |
*best_sad = *Data->iMinSAD; |
1497 |
|
|
1498 |
if (b_mb->mode == MODE_INTER4V) |
if (Data->qpel || b_mb->mode == MODE_INTER4V) pMB->mode = MODE_DIRECT; |
|
pMB->mode = MODE_DIRECT; |
|
1499 |
else pMB->mode = MODE_DIRECT_NO4V; //for faster compensation |
else pMB->mode = MODE_DIRECT_NO4V; //for faster compensation |
1500 |
|
|
1501 |
pMB->pmvs[3] = *Data->currentMV; |
pMB->pmvs[3] = *Data->currentMV; |
1527 |
return skip_sad; |
return skip_sad; |
1528 |
} |
} |
1529 |
|
|
|
|
|
1530 |
static void |
static void |
1531 |
SearchInterpolate(const uint8_t * const f_Ref, |
SearchInterpolate(const uint8_t * const f_Ref, |
1532 |
const uint8_t * const f_RefH, |
const uint8_t * const f_RefH, |
1588 |
|
|
1589 |
CheckCandidateInt(fData->currentMV[0].x, fData->currentMV[0].y, 255, &iDirection, fData); |
CheckCandidateInt(fData->currentMV[0].x, fData->currentMV[0].y, 255, &iDirection, fData); |
1590 |
|
|
1591 |
//diamond. I wish we could use normal mainsearch functions (square, advdiamond) |
//diamond |
|
|
|
1592 |
do { |
do { |
1593 |
iDirection = 255; |
iDirection = 255; |
1594 |
// forward MV moves |
// forward MV moves |
1609 |
|
|
1610 |
} while (!(iDirection)); |
} while (!(iDirection)); |
1611 |
|
|
1612 |
|
//qpel refinement |
1613 |
if (fData->qpel) { |
if (fData->qpel) { |
1614 |
|
if (*fData->iMinSAD > *best_sad + 500) return; |
1615 |
CheckCandidate = CheckCandidateInt; |
CheckCandidate = CheckCandidateInt; |
1616 |
fData->qpel_precision = bData.qpel_precision = 1; |
fData->qpel_precision = bData.qpel_precision = 1; |
1617 |
get_range(&fData->min_dx, &fData->max_dx, &fData->min_dy, &fData->max_dy, x, y, 16, pParam->width, pParam->height, fcode, 1, 0); |
get_range(&fData->min_dx, &fData->max_dx, &fData->min_dy, &fData->max_dy, x, y, 16, pParam->width, pParam->height, fcode, 1, 0); |
1621 |
fData->currentQMV[1].x = 2 * fData->currentMV[1].x; |
fData->currentQMV[1].x = 2 * fData->currentMV[1].x; |
1622 |
fData->currentQMV[1].y = 2 * fData->currentMV[1].y; |
fData->currentQMV[1].y = 2 * fData->currentMV[1].y; |
1623 |
SubpelRefine(fData); |
SubpelRefine(fData); |
1624 |
|
if (*fData->iMinSAD > *best_sad + 300) return; |
1625 |
fData->currentQMV[2] = fData->currentQMV[0]; |
fData->currentQMV[2] = fData->currentQMV[0]; |
1626 |
SubpelRefine(&bData); |
SubpelRefine(&bData); |
1627 |
} |
} |
1628 |
|
|
1629 |
*fData->iMinSAD += (2+2) * fData->lambda16; // two bits are needed to code interpolate mode. |
*fData->iMinSAD += (2+3) * fData->lambda16; // two bits are needed to code interpolate mode. |
1630 |
|
|
1631 |
if (*fData->iMinSAD < *best_sad) { |
if (*fData->iMinSAD < *best_sad) { |
1632 |
*best_sad = *fData->iMinSAD; |
*best_sad = *fData->iMinSAD; |
1671 |
int32_t best_sad; |
int32_t best_sad; |
1672 |
uint32_t skip_sad; |
uint32_t skip_sad; |
1673 |
int f_count = 0, b_count = 0, i_count = 0, d_count = 0, n_count = 0; |
int f_count = 0, b_count = 0, i_count = 0, d_count = 0, n_count = 0; |
|
static const VECTOR zeroMV={0,0}; |
|
1674 |
const MACROBLOCK * const b_mbs = b_reference->mbs; |
const MACROBLOCK * const b_mbs = b_reference->mbs; |
1675 |
|
|
1676 |
VECTOR f_predMV, b_predMV; /* there is no prediction for direct mode*/ |
VECTOR f_predMV, b_predMV; /* there is no prediction for direct mode*/ |
1677 |
|
|
1678 |
const int32_t TRB = time_pp - time_bp; |
const int32_t TRB = time_pp - time_bp; |
1679 |
const int32_t TRD = time_pp; |
const int32_t TRD = time_pp; |
|
uint8_t * qimage; |
|
1680 |
|
|
1681 |
// some pre-inintialized data for the rest of the search |
// some pre-inintialized data for the rest of the search |
1682 |
|
|
1693 |
Data.qpel = pParam->m_quarterpel; |
Data.qpel = pParam->m_quarterpel; |
1694 |
Data.rounding = 0; |
Data.rounding = 0; |
1695 |
|
|
1696 |
if((qimage = (uint8_t *) malloc(32 * pParam->edged_width)) == NULL) |
Data.RefQ = f_refV->u; // a good place, also used in MC (for similar purpose) |
|
return; // allocate some mem for qpel interpolated blocks |
|
|
// somehow this is dirty since I think we shouldn't use malloc outside |
|
|
// encoder_create() - so please fix me! |
|
|
Data.RefQ = qimage; |
|
|
|
|
1697 |
// note: i==horizontal, j==vertical |
// note: i==horizontal, j==vertical |
1698 |
for (j = 0; j < pParam->mb_height; j++) { |
for (j = 0; j < pParam->mb_height; j++) { |
1699 |
|
|
1745 |
MODE_BACKWARD, &Data); |
MODE_BACKWARD, &Data); |
1746 |
|
|
1747 |
// interpolate search comes last, because it uses data from forward and backward as prediction |
// interpolate search comes last, because it uses data from forward and backward as prediction |
|
|
|
1748 |
SearchInterpolate(f_ref->y, f_refH->y, f_refV->y, f_refHV->y, |
SearchInterpolate(f_ref->y, f_refH->y, f_refV->y, f_refHV->y, |
1749 |
b_ref->y, b_refH->y, b_refV->y, b_refHV->y, |
b_ref->y, b_refH->y, b_refV->y, b_refHV->y, |
1750 |
&frame->image, |
&frame->image, |
1759 |
// final skip decision |
// final skip decision |
1760 |
if ( (skip_sad < frame->quant * MAX_SAD00_FOR_SKIP*2) |
if ( (skip_sad < frame->quant * MAX_SAD00_FOR_SKIP*2) |
1761 |
&& ((100*best_sad)/(skip_sad+1) > FINAL_SKIP_THRESH) ) |
&& ((100*best_sad)/(skip_sad+1) > FINAL_SKIP_THRESH) ) |
1762 |
SkipDecisionB(&frame->image, f_ref, b_ref, pMB,frame->quant, i, j, &Data); |
SkipDecisionB(&frame->image, f_ref, b_ref, pMB, i, j, &Data); |
1763 |
|
|
1764 |
switch (pMB->mode) { |
switch (pMB->mode) { |
1765 |
case MODE_FORWARD: |
case MODE_FORWARD: |
1766 |
f_count++; |
f_count++; |
1767 |
if (Data.qpel) f_predMV = pMB->qmvs[0]; |
f_predMV = Data.qpel ? pMB->qmvs[0] : pMB->mvs[0]; |
|
else f_predMV = pMB->mvs[0]; |
|
1768 |
break; |
break; |
1769 |
case MODE_BACKWARD: |
case MODE_BACKWARD: |
1770 |
b_count++; |
b_count++; |
1771 |
if (Data.qpel) b_predMV = pMB->b_qmvs[0]; |
b_predMV = Data.qpel ? pMB->b_qmvs[0] : pMB->b_mvs[0]; |
|
else b_predMV = pMB->b_mvs[0]; |
|
1772 |
break; |
break; |
1773 |
case MODE_INTERPOLATE: |
case MODE_INTERPOLATE: |
1774 |
i_count++; |
i_count++; |
1775 |
if (Data.qpel) { |
f_predMV = Data.qpel ? pMB->qmvs[0] : pMB->mvs[0]; |
1776 |
f_predMV = pMB->qmvs[0]; |
b_predMV = Data.qpel ? pMB->b_qmvs[0] : pMB->b_mvs[0]; |
|
b_predMV = pMB->b_qmvs[0]; |
|
|
} else { |
|
|
f_predMV = pMB->mvs[0]; |
|
|
b_predMV = pMB->b_mvs[0]; |
|
|
} |
|
1777 |
break; |
break; |
1778 |
case MODE_DIRECT: |
case MODE_DIRECT: |
1779 |
case MODE_DIRECT_NO4V: |
case MODE_DIRECT_NO4V: |
1783 |
} |
} |
1784 |
} |
} |
1785 |
} |
} |
|
free(qimage); |
|
1786 |
} |
} |
1787 |
|
|
1788 |
static __inline int |
static __inline void |
1789 |
MEanalyzeMB ( const uint8_t * const pRef, |
MEanalyzeMB ( const uint8_t * const pRef, |
1790 |
const uint8_t * const pCur, |
const uint8_t * const pCur, |
1791 |
const int x, |
const int x, |
1792 |
const int y, |
const int y, |
1793 |
const MBParam * const pParam, |
const MBParam * const pParam, |
1794 |
const MACROBLOCK * const pMBs, |
MACROBLOCK * const pMBs, |
|
MACROBLOCK * const pMB, |
|
1795 |
SearchData * const Data) |
SearchData * const Data) |
1796 |
{ |
{ |
1797 |
|
|
1798 |
int i = 255, mask; |
int i, mask; |
1799 |
VECTOR pmv[3]; |
VECTOR pmv[3]; |
1800 |
*(Data->iMinSAD) = MV_MAX_ERROR; |
MACROBLOCK * pMB = &pMBs[x + y * pParam->mb_width]; |
1801 |
|
|
1802 |
|
for (i = 0; i < 5; i++) Data->iMinSAD[i] = MV_MAX_ERROR; |
1803 |
|
|
1804 |
//median is only used as prediction. it doesn't have to be real |
//median is only used as prediction. it doesn't have to be real |
1805 |
if (x == 1 && y == 1) Data->predMV.x = Data->predMV.y = 0; |
if (x == 1 && y == 1) Data->predMV.x = Data->predMV.y = 0; |
1806 |
else |
else |
1807 |
if (x == 1) //left macroblock does not have any vector now |
if (x == 1) //left macroblock does not have any vector now |
1808 |
Data->predMV = (pMB - pParam->mb_width)->mvs[0]; // top instead of median |
Data->predMV = (pMB - pParam->mb_width)->mvs[0]; // top instead of median |
1809 |
else if (y == 1) // top macroblock don't have it's vector |
else if (y == 1) // top macroblock doesn't have it's vector |
1810 |
Data->predMV = (pMB - 1)->mvs[0]; // left instead of median |
Data->predMV = (pMB - 1)->mvs[0]; // left instead of median |
1811 |
else Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, 0); //else median |
else Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, 0); //else median |
1812 |
|
|
1813 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
1814 |
pParam->width, pParam->height, Data->iFcode - pParam->m_quarterpel, 0, 0); |
pParam->width, pParam->height, Data->iFcode - pParam->m_quarterpel, 0, Data->rrv); |
1815 |
|
|
1816 |
Data->Cur = pCur + (x + y * pParam->edged_width) * 16; |
Data->Cur = pCur + (x + y * pParam->edged_width) * 16; |
1817 |
Data->Ref = pRef + (x + y * pParam->edged_width) * 16; |
Data->Ref = pRef + (x + y * pParam->edged_width) * 16; |
1822 |
pmv[2].y = EVEN(Data->predMV.y); |
pmv[2].y = EVEN(Data->predMV.y); |
1823 |
pmv[0].x = pmv[0].y = 0; |
pmv[0].x = pmv[0].y = 0; |
1824 |
|
|
1825 |
CheckCandidate16no4vI(0, 0, 255, &i, Data); |
CheckCandidate32I(0, 0, 255, &i, Data); |
1826 |
|
|
1827 |
//early skip for 0,0 |
if (*Data->iMinSAD > 4 * MAX_SAD00_FOR_SKIP * 4) { |
|
if (*Data->iMinSAD < MAX_SAD00_FOR_SKIP * 4) { |
|
|
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = Data->currentMV[0]; |
|
|
pMB->mode = MODE_NOT_CODED; |
|
|
return 0; |
|
|
} |
|
1828 |
|
|
1829 |
if (!(mask = make_mask(pmv, 1))) |
if (!(mask = make_mask(pmv, 1))) |
1830 |
CheckCandidate16no4vI(pmv[1].x, pmv[1].y, mask, &i, Data); |
CheckCandidate32I(pmv[1].x, pmv[1].y, mask, &i, Data); |
1831 |
if (!(mask = make_mask(pmv, 2))) |
if (!(mask = make_mask(pmv, 2))) |
1832 |
CheckCandidate16no4vI(pmv[2].x, pmv[2].y, mask, &i, Data); |
CheckCandidate32I(pmv[2].x, pmv[2].y, mask, &i, Data); |
1833 |
|
|
1834 |
if (*Data->iMinSAD > MAX_SAD00_FOR_SKIP * 6) // diamond only if needed |
if (*Data->iMinSAD > 4 * MAX_SAD00_FOR_SKIP * 4) // diamond only if needed |
1835 |
DiamondSearch(Data->currentMV->x, Data->currentMV->y, Data, i); |
DiamondSearch(Data->currentMV->x, Data->currentMV->y, Data, i); |
1836 |
|
|
1837 |
pMB->mvs[0] = pMB->mvs[1] = pMB->mvs[2] = pMB->mvs[3] = Data->currentMV[0]; |
for (i = 0; i < 4; i++) { |
1838 |
pMB->mode = MODE_INTER; |
MACROBLOCK * MB = &pMBs[x + (i&1) + (y+(i>>1)) * pParam->mb_width]; |
1839 |
return *(Data->iMinSAD); |
MB->mvs[0] = MB->mvs[1] = MB->mvs[2] = MB->mvs[3] = Data->currentMV[i]; |
1840 |
|
MB->mode = MODE_INTER; |
1841 |
|
MB->sad16 = Data->iMinSAD[i+1]; |
1842 |
|
} |
1843 |
|
} |
1844 |
} |
} |
1845 |
|
|
1846 |
#define INTRA_THRESH 1350 |
#define INTRA_BIAS 2500 |
1847 |
#define INTER_THRESH 1200 |
#define INTRA_THRESH 1500 |
1848 |
|
#define INTER_THRESH 1400 |
1849 |
|
|
1850 |
int |
int |
1851 |
MEanalysis( const IMAGE * const pRef, |
MEanalysis( const IMAGE * const pRef, |
1853 |
MBParam * const pParam, |
MBParam * const pParam, |
1854 |
int maxIntra, //maximum number if non-I frames |
int maxIntra, //maximum number if non-I frames |
1855 |
int intraCount, //number of non-I frames after last I frame; 0 if we force P/B frame |
int intraCount, //number of non-I frames after last I frame; 0 if we force P/B frame |
1856 |
int bCount) // number if B frames in a row |
int bCount) // number of B frames in a row |
1857 |
{ |
{ |
1858 |
uint32_t x, y, intra = 0; |
uint32_t x, y, intra = 0; |
1859 |
int sSAD = 0; |
int sSAD = 0; |
1861 |
const IMAGE * const pCurrent = &Current->image; |
const IMAGE * const pCurrent = &Current->image; |
1862 |
int IntraThresh = INTRA_THRESH, InterThresh = INTER_THRESH; |
int IntraThresh = INTRA_THRESH, InterThresh = INTER_THRESH; |
1863 |
|
|
1864 |
VECTOR currentMV; |
int32_t iMinSAD[5], temp[5]; |
1865 |
int32_t iMinSAD; |
VECTOR currentMV[5]; |
1866 |
SearchData Data; |
SearchData Data; |
1867 |
Data.iEdgedWidth = pParam->edged_width; |
Data.iEdgedWidth = pParam->edged_width; |
1868 |
Data.currentMV = ¤tMV; |
Data.currentMV = currentMV; |
1869 |
Data.iMinSAD = &iMinSAD; |
Data.iMinSAD = iMinSAD; |
1870 |
Data.iFcode = Current->fcode; |
Data.iFcode = Current->fcode; |
1871 |
CheckCandidate = CheckCandidate16no4vI; |
Data.rrv = Current->global_flags & XVID_REDUCED; |
1872 |
|
Data.temp = temp; |
1873 |
|
CheckCandidate = CheckCandidate32I; |
1874 |
|
|
1875 |
if (intraCount < 10) // we're right after an I frame |
if (intraCount != 0 && intraCount < 10) // we're right after an I frame |
1876 |
IntraThresh += 4 * (intraCount - 10) * (intraCount - 10); |
IntraThresh += 4 * (intraCount - 10) * (intraCount - 10); |
1877 |
else |
else |
1878 |
if ( 5*(maxIntra - intraCount) < maxIntra) // we're close to maximum. 2 sec when max is 10 sec |
if ( 5*(maxIntra - intraCount) < maxIntra) // we're close to maximum. 2 sec when max is 10 sec |
1879 |
IntraThresh -= (IntraThresh * (maxIntra - 5*(maxIntra - intraCount)))/maxIntra; |
IntraThresh -= (IntraThresh * (maxIntra - 5*(maxIntra - intraCount)))/maxIntra; |
1880 |
|
|
|
|
|
1881 |
InterThresh += 400 * (1 - bCount); |
InterThresh += 400 * (1 - bCount); |
1882 |
if (InterThresh < 200) InterThresh = 200; |
if (InterThresh < 300) InterThresh = 300; |
1883 |
|
|
1884 |
if (sadInit) (*sadInit) (); |
if (sadInit) (*sadInit) (); |
1885 |
|
|
1886 |
for (y = 1; y < pParam->mb_height-1; y++) { |
for (y = 1; y < pParam->mb_height-1; y += 2) { |
1887 |
for (x = 1; x < pParam->mb_width-1; x++) { |
for (x = 1; x < pParam->mb_width-1; x += 2) { |
1888 |
int sad, dev; |
int i; |
1889 |
MACROBLOCK *pMB = &pMBs[x + y * pParam->mb_width]; |
|
1890 |
|
if (bCount == 0) pMBs[x + y * pParam->mb_width].mvs[0] = zeroMV; |
1891 |
|
|
1892 |
sad = MEanalyzeMB(pRef->y, pCurrent->y, x, y, |
MEanalyzeMB(pRef->y, pCurrent->y, x, y, pParam, pMBs, &Data); |
|
pParam, pMBs, pMB, &Data); |
|
1893 |
|
|
1894 |
if (sad > IntraThresh) { |
for (i = 0; i < 4; i++) { |
1895 |
dev = dev16(pCurrent->y + (x + y * pParam->edged_width) * 16, |
int dev; |
1896 |
|
MACROBLOCK *pMB = &pMBs[x+(i&1) + (y+(i>>1)) * pParam->mb_width]; |
1897 |
|
if (pMB->sad16 > IntraThresh) { |
1898 |
|
dev = dev16(pCurrent->y + (x + (i&1) + (y + (i>>1)) * pParam->edged_width) * 16, |
1899 |
pParam->edged_width); |
pParam->edged_width); |
1900 |
if (dev + IntraThresh < sad) { |
if (dev + IntraThresh < pMB->sad16) { |
1901 |
pMB->mode = MODE_INTRA; |
pMB->mode = MODE_INTRA; |
1902 |
if (++intra > (pParam->mb_height-2)*(pParam->mb_width-2)/2) return I_VOP; |
if (++intra > (pParam->mb_height-2)*(pParam->mb_width-2)/2) return I_VOP; |
1903 |
} |
} |
1904 |
} |
} |
1905 |
sSAD += sad; |
sSAD += pMB->sad16; |
1906 |
|
} |
1907 |
} |
} |
1908 |
} |
} |
1909 |
sSAD /= (pParam->mb_height-2)*(pParam->mb_width-2); |
sSAD /= (pParam->mb_height-2)*(pParam->mb_width-2); |
1910 |
|
if (sSAD > IntraThresh + INTRA_BIAS) return I_VOP; |
1911 |
if (sSAD > InterThresh ) return P_VOP; |
if (sSAD > InterThresh ) return P_VOP; |
1912 |
emms(); |
emms(); |
1913 |
return B_VOP; |
return B_VOP; |
1914 |
|
|
1915 |
} |
} |
1916 |
|
|
|
static void |
|
|
CheckGMC(int x, int y, const int dir, int * iDirection, |
|
|
const MACROBLOCK * const pMBs, uint32_t * bestcount, VECTOR * GMC, |
|
|
const MBParam * const pParam) |
|
|
{ |
|
|
uint32_t mx, my, a, count = 0; |
|
|
|
|
|
for (my = 1; my < pParam->mb_height-1; my++) |
|
|
for (mx = 1; mx < pParam->mb_width-1; mx++) { |
|
|
VECTOR mv; |
|
|
const MACROBLOCK *pMB = &pMBs[mx + my * pParam->mb_width]; |
|
|
if (pMB->mode == MODE_INTRA || pMB->mode == MODE_NOT_CODED) continue; |
|
|
mv = pMB->mvs[0]; |
|
|
a = ABS(mv.x - x) + ABS(mv.y - y); |
|
|
if (a < 6) count += 6 - a; |
|
|
} |
|
|
|
|
|
if (count > *bestcount) { |
|
|
*bestcount = count; |
|
|
*iDirection = dir; |
|
|
GMC->x = x; GMC->y = y; |
|
|
} |
|
|
} |
|
|
|
|
1917 |
|
|
1918 |
static __inline VECTOR |
static WARPPOINTS |
1919 |
GlobalMotionEst(const MACROBLOCK * const pMBs, const MBParam * const pParam, const uint32_t iFcode) |
GlobalMotionEst(const MACROBLOCK * const pMBs, |
1920 |
|
const MBParam * const pParam, |
1921 |
|
const FRAMEINFO * const current, |
1922 |
|
const FRAMEINFO * const reference, |
1923 |
|
const IMAGE * const pRefH, |
1924 |
|
const IMAGE * const pRefV, |
1925 |
|
const IMAGE * const pRefHV ) |
1926 |
{ |
{ |
1927 |
|
|
1928 |
uint32_t count, bestcount = 0; |
const int deltax=8; // upper bound for difference between a MV and it's neighbour MVs |
1929 |
int x, y; |
const int deltay=8; |
1930 |
VECTOR gmc = {0,0}; |
const int grad=512; // lower bound for deviation in MB |
1931 |
int step, min_x, max_x, min_y, max_y; |
|
1932 |
|
WARPPOINTS gmc; |
1933 |
|
|
1934 |
uint32_t mx, my; |
uint32_t mx, my; |
|
int iDirection, bDirection; |
|
1935 |
|
|
1936 |
min_x = min_y = -32<<iFcode; |
int MBh = pParam->mb_height; |
1937 |
max_x = max_y = 32<<iFcode; |
int MBw = pParam->mb_width; |
1938 |
|
|
1939 |
//step1: let's find a rough camera panning |
int *MBmask= calloc(MBh*MBw,sizeof(int)); |
1940 |
for (step = 32; step >= 2; step /= 2) { |
double DtimesF[4] = { 0.,0., 0., 0. }; |
1941 |
bestcount = 0; |
double sol[4] = { 0., 0., 0., 0. }; |
1942 |
for (y = min_y; y <= max_y; y += step) |
double a,b,c,n,denom; |
1943 |
for (x = min_x ; x <= max_x; x += step) { |
double meanx,meany; |
1944 |
count = 0; |
int num,oldnum; |
1945 |
//for all macroblocks |
|
1946 |
for (my = 1; my < pParam->mb_height-1; my++) |
if (!MBmask) { fprintf(stderr,"Mem error\n"); return gmc;} |
1947 |
for (mx = 1; mx < pParam->mb_width-1; mx++) { |
|
1948 |
const MACROBLOCK *pMB = &pMBs[mx + my * pParam->mb_width]; |
// filter mask of all blocks |
1949 |
VECTOR mv; |
|
1950 |
|
for (my = 1; my < MBh-1; my++) |
1951 |
|
for (mx = 1; mx < MBw-1; mx++) |
1952 |
|
{ |
1953 |
|
const int mbnum = mx + my * MBw; |
1954 |
|
const MACROBLOCK *pMB = &pMBs[mbnum]; |
1955 |
|
const VECTOR mv = pMB->mvs[0]; |
1956 |
|
|
1957 |
if (pMB->mode == MODE_INTRA || pMB->mode == MODE_NOT_CODED) |
if (pMB->mode == MODE_INTRA || pMB->mode == MODE_NOT_CODED) |
1958 |
continue; |
continue; |
1959 |
|
|
1960 |
mv = pMB->mvs[0]; |
if ( ( (ABS(mv.x - (pMB-1)->mvs[0].x) < deltax) && (ABS(mv.y - (pMB-1)->mvs[0].y) < deltay) ) |
1961 |
if ( ABS(mv.x - x) <= step && ABS(mv.y - y) <= step ) /* GMC translation is always halfpel-res */ |
&& ( (ABS(mv.x - (pMB+1)->mvs[0].x) < deltax) && (ABS(mv.y - (pMB+1)->mvs[0].y) < deltay) ) |
1962 |
count++; |
&& ( (ABS(mv.x - (pMB-MBw)->mvs[0].x) < deltax) && (ABS(mv.y - (pMB-MBw)->mvs[0].y) < deltay) ) |
1963 |
|
&& ( (ABS(mv.x - (pMB+MBw)->mvs[0].x) < deltax) && (ABS(mv.y - (pMB+MBw)->mvs[0].y) < deltay) ) ) |
1964 |
|
MBmask[mbnum]=1; |
1965 |
|
} |
1966 |
|
|
1967 |
|
for (my = 1; my < MBh-1; my++) |
1968 |
|
for (mx = 1; mx < MBw-1; mx++) |
1969 |
|
{ |
1970 |
|
const uint8_t *const pCur = current->image.y + 16*my*pParam->edged_width + 16*mx; |
1971 |
|
|
1972 |
|
const int mbnum = mx + my * MBw; |
1973 |
|
if (!MBmask[mbnum]) |
1974 |
|
continue; |
1975 |
|
|
1976 |
|
if (sad16 ( pCur, pCur+1 , pParam->edged_width, 65536) <= grad ) |
1977 |
|
MBmask[mbnum] = 0; |
1978 |
|
if (sad16 ( pCur, pCur+pParam->edged_width, pParam->edged_width, 65536) <= grad ) |
1979 |
|
MBmask[mbnum] = 0; |
1980 |
|
|
1981 |
} |
} |
1982 |
if (count >= bestcount) { bestcount = count; gmc.x = x; gmc.y = y; } |
|
1983 |
|
emms(); |
1984 |
|
|
1985 |
|
do { /* until convergence */ |
1986 |
|
|
1987 |
|
a = b = c = n = 0; |
1988 |
|
DtimesF[0] = DtimesF[1] = DtimesF[2] = DtimesF[3] = 0.; |
1989 |
|
for (my = 0; my < MBh; my++) |
1990 |
|
for (mx = 0; mx < MBw; mx++) |
1991 |
|
{ |
1992 |
|
const int mbnum = mx + my * MBw; |
1993 |
|
const MACROBLOCK *pMB = &pMBs[mbnum]; |
1994 |
|
const VECTOR mv = pMB->mvs[0]; |
1995 |
|
|
1996 |
|
if (!MBmask[mbnum]) |
1997 |
|
continue; |
1998 |
|
|
1999 |
|
n++; |
2000 |
|
a += 16*mx+8; |
2001 |
|
b += 16*my+8; |
2002 |
|
c += (16*mx+8)*(16*mx+8)+(16*my+8)*(16*my+8); |
2003 |
|
|
2004 |
|
DtimesF[0] += (double)mv.x; |
2005 |
|
DtimesF[1] += (double)mv.x*(16*mx+8) + (double)mv.y*(16*my+8); |
2006 |
|
DtimesF[2] += (double)mv.x*(16*my+8) - (double)mv.y*(16*mx+8); |
2007 |
|
DtimesF[3] += (double)mv.y; |
2008 |
|
} |
2009 |
|
|
2010 |
|
denom = a*a+b*b-c*n; |
2011 |
|
|
2012 |
|
/* Solve the system: sol = (D'*E*D)^{-1} D'*E*F */ |
2013 |
|
/* D'*E*F has been calculated in the same loop as matrix */ |
2014 |
|
|
2015 |
|
sol[0] = -c*DtimesF[0] + a*DtimesF[1] + b*DtimesF[2]; |
2016 |
|
sol[1] = a*DtimesF[0] - n*DtimesF[1] + b*DtimesF[3]; |
2017 |
|
sol[2] = b*DtimesF[0] - n*DtimesF[2] - a*DtimesF[3]; |
2018 |
|
sol[3] = b*DtimesF[1] - a*DtimesF[2] - c*DtimesF[3]; |
2019 |
|
|
2020 |
|
sol[0] /= denom; |
2021 |
|
sol[1] /= denom; |
2022 |
|
sol[2] /= denom; |
2023 |
|
sol[3] /= denom; |
2024 |
|
|
2025 |
|
meanx = meany = 0.; |
2026 |
|
oldnum = 0; |
2027 |
|
for (my = 0; my < MBh; my++) |
2028 |
|
for (mx = 0; mx < MBw; mx++) |
2029 |
|
{ |
2030 |
|
const int mbnum = mx + my * MBw; |
2031 |
|
const MACROBLOCK *pMB = &pMBs[mbnum]; |
2032 |
|
const VECTOR mv = pMB->mvs[0]; |
2033 |
|
|
2034 |
|
if (!MBmask[mbnum]) |
2035 |
|
continue; |
2036 |
|
|
2037 |
|
oldnum++; |
2038 |
|
meanx += ABS(( sol[0] + (16*mx+8)*sol[1] + (16*my+8)*sol[2] ) - mv.x ); |
2039 |
|
meany += ABS(( sol[3] - (16*mx+8)*sol[2] + (16*my+8)*sol[1] ) - mv.y ); |
2040 |
} |
} |
|
min_x = gmc.x - step; |
|
|
max_x = gmc.x + step; |
|
|
min_y = gmc.y - step; |
|
|
max_y = gmc.y + step; |
|
2041 |
|
|
2042 |
|
if (4*meanx > oldnum) /* better fit than 0.25 is useless */ |
2043 |
|
meanx /= oldnum; |
2044 |
|
else |
2045 |
|
meanx = 0.25; |
2046 |
|
|
2047 |
|
if (4*meany > oldnum) |
2048 |
|
meany /= oldnum; |
2049 |
|
else |
2050 |
|
meany = 0.25; |
2051 |
|
|
2052 |
|
/* fprintf(stderr,"sol = (%8.5f, %8.5f, %8.5f, %8.5f)\n",sol[0],sol[1],sol[2],sol[3]); |
2053 |
|
fprintf(stderr,"meanx = %8.5f meany = %8.5f %d\n",meanx,meany, oldnum); |
2054 |
|
*/ |
2055 |
|
num = 0; |
2056 |
|
for (my = 0; my < MBh; my++) |
2057 |
|
for (mx = 0; mx < MBw; mx++) |
2058 |
|
{ |
2059 |
|
const int mbnum = mx + my * MBw; |
2060 |
|
const MACROBLOCK *pMB = &pMBs[mbnum]; |
2061 |
|
const VECTOR mv = pMB->mvs[0]; |
2062 |
|
|
2063 |
|
if (!MBmask[mbnum]) |
2064 |
|
continue; |
2065 |
|
|
2066 |
|
if ( ( ABS(( sol[0] + (16*mx+8)*sol[1] + (16*my+8)*sol[2] ) - mv.x ) > meanx ) |
2067 |
|
|| ( ABS(( sol[3] - (16*mx+8)*sol[2] + (16*my+8)*sol[1] ) - mv.y ) > meany ) ) |
2068 |
|
MBmask[mbnum]=0; |
2069 |
|
else |
2070 |
|
num++; |
2071 |
} |
} |
2072 |
|
|
2073 |
if (bestcount < (pParam->mb_height-2)*(pParam->mb_width-2)/10) |
} while ( (oldnum != num) && (num>=4) ); |
|
gmc.x = gmc.y = 0; //no camara pan, no GMC |
|
2074 |
|
|
2075 |
// step2: let's refine camera panning using gradiend-descent approach. |
if (num < 4) |
2076 |
// TODO: more warping points may be evaluated here (like in interpolate mode search - two vectors in one diamond) |
{ |
2077 |
bestcount = 0; |
gmc.duv[0].x= gmc.duv[0].y= gmc.duv[1].x= gmc.duv[1].y= gmc.duv[2].x= gmc.duv[2].y=0; |
2078 |
CheckGMC(gmc.x, gmc.y, 255, &iDirection, pMBs, &bestcount, &gmc, pParam); |
} else { |
|
do { |
|
|
x = gmc.x; y = gmc.y; |
|
|
bDirection = iDirection; iDirection = 0; |
|
|
if (bDirection & 1) CheckGMC(x - 1, y, 1+4+8, &iDirection, pMBs, &bestcount, &gmc, pParam); |
|
|
if (bDirection & 2) CheckGMC(x + 1, y, 2+4+8, &iDirection, pMBs, &bestcount, &gmc, pParam); |
|
|
if (bDirection & 4) CheckGMC(x, y - 1, 1+2+4, &iDirection, pMBs, &bestcount, &gmc, pParam); |
|
|
if (bDirection & 8) CheckGMC(x, y + 1, 1+2+8, &iDirection, pMBs, &bestcount, &gmc, pParam); |
|
2079 |
|
|
2080 |
} while (iDirection); |
gmc.duv[0].x=(int)(sol[0]+0.5); |
2081 |
|
gmc.duv[0].y=(int)(sol[3]+0.5); |
2082 |
|
|
2083 |
|
gmc.duv[1].x=(int)(sol[1]*pParam->width+0.5); |
2084 |
|
gmc.duv[1].y=(int)(-sol[2]*pParam->width+0.5); |
2085 |
|
|
2086 |
if (pParam->m_quarterpel) { |
gmc.duv[2].x=0; |
2087 |
gmc.x *= 2; |
gmc.duv[2].y=0; |
|
gmc.y *= 2; /* we store the halfpel value as pseudo-qpel to make comparison easier */ |
|
2088 |
} |
} |
2089 |
|
// fprintf(stderr,"wp1 = ( %4d, %4d) wp2 = ( %4d, %4d) \n", gmc.duv[0].x, gmc.duv[0].y, gmc.duv[1].x, gmc.duv[1].y); |
2090 |
|
|
2091 |
|
free(MBmask); |
2092 |
|
|
2093 |
return gmc; |
return gmc; |
2094 |
} |
} |