1 |
/***************************************************************************** |
/***************************************************************************** |
2 |
* |
* |
3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* - Motion Compensation module - |
* - Motion Compensation related code - |
5 |
* |
* |
6 |
* Copyright(C) 2002 Peter Ross <pross@xvid.org> |
* Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 |
* |
* 2003 Christoph Lampert <gruel@web.de> |
|
* This program is an implementation of a part of one or more MPEG-4 |
|
|
* Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
|
|
* to use this software module in hardware or software products are |
|
|
* advised that its use may infringe existing patents or copyrights, and |
|
|
* any such use would be at such party's own risk. The original |
|
|
* developer of this software module and his/her company, and subsequent |
|
|
* editors and their companies, will have no liability for use of this |
|
|
* software or modifications or derivatives thereof. |
|
8 |
* |
* |
9 |
* This program is free software; you can redistribute it and/or modify |
* This program is free software; you can redistribute it and/or modify |
10 |
* it under the terms of the GNU General Public License as published by |
* it under the terms of the GNU General Public License as published by |
20 |
* along with this program; if not, write to the Free Software |
* along with this program; if not, write to the Free Software |
21 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
* |
* |
23 |
*************************************************************************/ |
* $Id$ |
24 |
|
* |
25 |
|
****************************************************************************/ |
26 |
|
|
27 |
|
#include <stdio.h> |
28 |
|
|
29 |
#include "../encoder.h" |
#include "../encoder.h" |
30 |
#include "../utils/mbfunctions.h" |
#include "../utils/mbfunctions.h" |
31 |
#include "../image/interpolate8x8.h" |
#include "../image/interpolate8x8.h" |
32 |
|
#include "../image/reduced.h" |
33 |
#include "../utils/timer.h" |
#include "../utils/timer.h" |
34 |
#include "motion.h" |
#include "motion.h" |
35 |
|
|
36 |
#define ABS(X) (((X)>0)?(X):-(X)) |
#ifndef RSHIFT |
37 |
#define SIGN(X) (((X)>0)?1:-1) |
#define RSHIFT(a,b) ((a) > 0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) |
38 |
|
#endif |
39 |
|
|
40 |
|
/* assume b>0 */ |
41 |
|
#ifndef RDIV |
42 |
|
#define RDIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) |
43 |
|
#endif |
44 |
|
|
45 |
|
|
46 |
|
/* This is borrowed from decoder.c */ |
47 |
|
static __inline int gmc_sanitize(int value, int quarterpel, int fcode) |
48 |
|
{ |
49 |
|
int length = 1 << (fcode+4); |
50 |
|
|
51 |
|
#if 0 |
52 |
|
if (quarterpel) value *= 2; |
53 |
|
#endif |
54 |
|
|
55 |
|
if (value < -length) |
56 |
|
return -length; |
57 |
|
else if (value >= length) |
58 |
|
return length-1; |
59 |
|
else return value; |
60 |
|
} |
61 |
|
|
62 |
|
/* And this is borrowed from bitstream.c until we find a common solution */ |
63 |
|
|
64 |
|
static uint32_t __inline |
65 |
|
log2bin(uint32_t value) |
66 |
|
{ |
67 |
|
/* Changed by Chenm001 */ |
68 |
|
#if !defined(_MSC_VER) |
69 |
|
int n = 0; |
70 |
|
|
71 |
|
while (value) { |
72 |
|
value >>= 1; |
73 |
|
n++; |
74 |
|
} |
75 |
|
return n; |
76 |
|
#else |
77 |
|
__asm { |
78 |
|
bsr eax, value |
79 |
|
inc eax |
80 |
|
} |
81 |
|
#endif |
82 |
|
} |
83 |
|
|
84 |
|
|
85 |
static __inline void |
static __inline void |
86 |
compensate8x8_halfpel(int16_t * const dct_codes, |
compensate16x16_interpolate(int16_t * const dct_codes, |
87 |
uint8_t * const cur, |
uint8_t * const cur, |
88 |
const uint8_t * const ref, |
const uint8_t * const ref, |
89 |
const uint8_t * const refh, |
const uint8_t * const refh, |
90 |
const uint8_t * const refv, |
const uint8_t * const refv, |
91 |
const uint8_t * const refhv, |
const uint8_t * const refhv, |
92 |
const uint32_t x, |
uint8_t * const tmp, |
93 |
const uint32_t y, |
uint32_t x, |
94 |
|
uint32_t y, |
95 |
const int32_t dx, |
const int32_t dx, |
96 |
const int dy, |
const int32_t dy, |
97 |
const uint32_t stride) |
const int32_t stride, |
98 |
|
const int quarterpel, |
99 |
|
const int reduced_resolution, |
100 |
|
const int32_t rounding) |
101 |
{ |
{ |
102 |
int32_t ddx, ddy; |
const uint8_t * ptr; |
103 |
|
|
104 |
switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) |
if (!reduced_resolution) { |
|
{ |
|
|
case 0: |
|
|
ddx = dx / 2; |
|
|
ddy = dy / 2; |
|
|
transfer_8to16sub(dct_codes, cur + y * stride + x, |
|
|
ref + (int) ((y + ddy) * stride + x + ddx), stride); |
|
|
break; |
|
105 |
|
|
106 |
case 1: |
if(quarterpel) { |
107 |
ddx = dx / 2; |
if ((dx&3) | (dy&3)) { |
108 |
ddy = (dy - 1) / 2; |
interpolate16x16_quarterpel(tmp - y * stride - x, |
109 |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
(uint8_t *) ref, tmp + 32, |
110 |
refv + (int) ((y + ddy) * stride + x + ddx), stride); |
tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); |
111 |
break; |
ptr = tmp; |
112 |
|
} else ptr = ref + (y + dy/4)*stride + x + dx/4; /* fullpixel position */ |
113 |
|
|
114 |
case 2: |
} else ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
|
ddx = (dx - 1) / 2; |
|
|
ddy = dy / 2; |
|
|
transfer_8to16sub(dct_codes, cur + y * stride + x, |
|
|
refh + (int) ((y + ddy) * stride + x + ddx), stride); |
|
|
break; |
|
115 |
|
|
|
default: // case 3: |
|
|
ddx = (dx - 1) / 2; |
|
|
ddy = (dy - 1) / 2; |
|
116 |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
117 |
refhv + (int) ((y + ddy) * stride + x + ddx), stride); |
ptr, stride); |
118 |
break; |
transfer_8to16sub(dct_codes+64, cur + y * stride + x + 8, |
119 |
|
ptr + 8, stride); |
120 |
|
transfer_8to16sub(dct_codes+128, cur + y * stride + x + 8*stride, |
121 |
|
ptr + 8*stride, stride); |
122 |
|
transfer_8to16sub(dct_codes+192, cur + y * stride + x + 8*stride+8, |
123 |
|
ptr + 8*stride + 8, stride); |
124 |
|
|
125 |
|
} else { /* reduced_resolution */ |
126 |
|
|
127 |
|
x *= 2; y *= 2; |
128 |
|
|
129 |
|
ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
130 |
|
|
131 |
|
filter_18x18_to_8x8(dct_codes, cur+y*stride + x, stride); |
132 |
|
filter_diff_18x18_to_8x8(dct_codes, ptr, stride); |
133 |
|
|
134 |
|
filter_18x18_to_8x8(dct_codes+64, cur+y*stride + x + 16, stride); |
135 |
|
filter_diff_18x18_to_8x8(dct_codes+64, ptr + 16, stride); |
136 |
|
|
137 |
|
filter_18x18_to_8x8(dct_codes+128, cur+(y+16)*stride + x, stride); |
138 |
|
filter_diff_18x18_to_8x8(dct_codes+128, ptr + 16*stride, stride); |
139 |
|
|
140 |
|
filter_18x18_to_8x8(dct_codes+192, cur+(y+16)*stride + x + 16, stride); |
141 |
|
filter_diff_18x18_to_8x8(dct_codes+192, ptr + 16*stride + 16, stride); |
142 |
|
|
143 |
|
transfer32x32_copy(cur + y*stride + x, ptr, stride); |
144 |
|
} |
145 |
|
} |
146 |
|
|
147 |
|
static __inline void |
148 |
|
compensate8x8_interpolate( int16_t * const dct_codes, |
149 |
|
uint8_t * const cur, |
150 |
|
const uint8_t * const ref, |
151 |
|
const uint8_t * const refh, |
152 |
|
const uint8_t * const refv, |
153 |
|
const uint8_t * const refhv, |
154 |
|
uint8_t * const tmp, |
155 |
|
uint32_t x, |
156 |
|
uint32_t y, |
157 |
|
const int32_t dx, |
158 |
|
const int32_t dy, |
159 |
|
const int32_t stride, |
160 |
|
const int32_t quarterpel, |
161 |
|
const int reduced_resolution, |
162 |
|
const int32_t rounding) |
163 |
|
{ |
164 |
|
const uint8_t * ptr; |
165 |
|
|
166 |
|
if (!reduced_resolution) { |
167 |
|
|
168 |
|
if(quarterpel) { |
169 |
|
if ((dx&3) | (dy&3)) { |
170 |
|
interpolate8x8_quarterpel(tmp - y*stride - x, |
171 |
|
(uint8_t *) ref, tmp + 32, |
172 |
|
tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); |
173 |
|
ptr = tmp; |
174 |
|
} else ptr = ref + (y + dy/4)*stride + x + dx/4; /* fullpixel position */ |
175 |
|
} else ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
176 |
|
|
177 |
|
transfer_8to16sub(dct_codes, cur + y * stride + x, ptr, stride); |
178 |
|
|
179 |
|
} else { /* reduced_resolution */ |
180 |
|
|
181 |
|
x *= 2; y *= 2; |
182 |
|
|
183 |
|
ptr = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
184 |
|
|
185 |
|
filter_18x18_to_8x8(dct_codes, cur+y*stride + x, stride); |
186 |
|
filter_diff_18x18_to_8x8(dct_codes, ptr, stride); |
187 |
|
|
188 |
|
transfer16x16_copy(cur + y*stride + x, ptr, stride); |
189 |
} |
} |
190 |
} |
} |
191 |
|
|
192 |
|
/* XXX: slow, inelegant... */ |
193 |
|
static void |
194 |
|
interpolate18x18_switch(uint8_t * const cur, |
195 |
|
const uint8_t * const refn, |
196 |
|
const uint32_t x, |
197 |
|
const uint32_t y, |
198 |
|
const int32_t dx, |
199 |
|
const int dy, |
200 |
|
const int32_t stride, |
201 |
|
const int32_t rounding) |
202 |
|
{ |
203 |
|
interpolate8x8_switch(cur, refn, x-1, y-1, dx, dy, stride, rounding); |
204 |
|
interpolate8x8_switch(cur, refn, x+7, y-1, dx, dy, stride, rounding); |
205 |
|
interpolate8x8_switch(cur, refn, x+9, y-1, dx, dy, stride, rounding); |
206 |
|
|
207 |
|
interpolate8x8_switch(cur, refn, x-1, y+7, dx, dy, stride, rounding); |
208 |
|
interpolate8x8_switch(cur, refn, x+7, y+7, dx, dy, stride, rounding); |
209 |
|
interpolate8x8_switch(cur, refn, x+9, y+7, dx, dy, stride, rounding); |
210 |
|
|
211 |
|
interpolate8x8_switch(cur, refn, x-1, y+9, dx, dy, stride, rounding); |
212 |
|
interpolate8x8_switch(cur, refn, x+7, y+9, dx, dy, stride, rounding); |
213 |
|
interpolate8x8_switch(cur, refn, x+9, y+9, dx, dy, stride, rounding); |
214 |
|
} |
215 |
|
|
216 |
|
static void |
217 |
|
CompensateChroma( int dx, int dy, |
218 |
|
const int i, const int j, |
219 |
|
IMAGE * const Cur, |
220 |
|
const IMAGE * const Ref, |
221 |
|
uint8_t * const temp, |
222 |
|
int16_t * const coeff, |
223 |
|
const int32_t stride, |
224 |
|
const int rounding, |
225 |
|
const int rrv) |
226 |
|
{ /* uv-block-based compensation */ |
227 |
|
|
228 |
|
if (!rrv) { |
229 |
|
transfer_8to16sub(coeff, Cur->u + 8 * j * stride + 8 * i, |
230 |
|
interpolate8x8_switch2(temp, Ref->u, 8 * i, 8 * j, |
231 |
|
dx, dy, stride, rounding), |
232 |
|
stride); |
233 |
|
transfer_8to16sub(coeff + 64, Cur->v + 8 * j * stride + 8 * i, |
234 |
|
interpolate8x8_switch2(temp, Ref->v, 8 * i, 8 * j, |
235 |
|
dx, dy, stride, rounding), |
236 |
|
stride); |
237 |
|
} else { |
238 |
|
uint8_t * current, * reference; |
239 |
|
|
240 |
|
current = Cur->u + 16*j*stride + 16*i; |
241 |
|
reference = temp - 16*j*stride - 16*i; |
242 |
|
interpolate18x18_switch(reference, Ref->u, 16*i, 16*j, dx, dy, stride, rounding); |
243 |
|
filter_18x18_to_8x8(coeff, current, stride); |
244 |
|
filter_diff_18x18_to_8x8(coeff, temp, stride); |
245 |
|
transfer16x16_copy(current, temp, stride); |
246 |
|
|
247 |
|
current = Cur->v + 16*j*stride + 16*i; |
248 |
|
interpolate18x18_switch(reference, Ref->v, 16*i, 16*j, dx, dy, stride, rounding); |
249 |
|
filter_18x18_to_8x8(coeff + 64, current, stride); |
250 |
|
filter_diff_18x18_to_8x8(coeff + 64, temp, stride); |
251 |
|
transfer16x16_copy(current, temp, stride); |
252 |
|
} |
253 |
|
} |
254 |
|
|
255 |
void |
void |
256 |
MBMotionCompensation(MACROBLOCK * const mb, |
MBMotionCompensation(MACROBLOCK * const mb, |
260 |
const IMAGE * const refh, |
const IMAGE * const refh, |
261 |
const IMAGE * const refv, |
const IMAGE * const refv, |
262 |
const IMAGE * const refhv, |
const IMAGE * const refhv, |
263 |
|
const IMAGE * const refGMC, |
264 |
IMAGE * const cur, |
IMAGE * const cur, |
265 |
int16_t * dct_codes, |
int16_t * dct_codes, |
266 |
const uint32_t width, |
const uint32_t width, |
267 |
const uint32_t height, |
const uint32_t height, |
268 |
const uint32_t edged_width, |
const uint32_t edged_width, |
269 |
const uint32_t rounding) |
const int32_t quarterpel, |
270 |
|
const int reduced_resolution, |
271 |
|
const int32_t rounding) |
272 |
{ |
{ |
273 |
static const uint32_t roundtab[16] = |
int32_t dx; |
274 |
{ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2 }; |
int32_t dy; |
275 |
|
|
276 |
|
uint8_t * const tmp = refv->u; |
277 |
|
|
278 |
if (mb->mode == MODE_INTER || mb->mode == MODE_INTER_Q) { |
if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) ) { /* quick copy for early SKIP */ |
279 |
int32_t dx = mb->mvs[0].x; |
/* early SKIP is only activated in P-VOPs, not in S-VOPs, so mcsel can never be 1 */ |
|
int32_t dy = mb->mvs[0].y; |
|
280 |
|
|
281 |
compensate8x8_halfpel(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
282 |
refv->y, refhv->y, 16 * i, 16 * j, dx, dy, |
ref->y + 16 * (i + j * edged_width), |
|
edged_width); |
|
|
compensate8x8_halfpel(&dct_codes[1 * 64], cur->y, ref->y, refh->y, |
|
|
refv->y, refhv->y, 16 * i + 8, 16 * j, dx, dy, |
|
283 |
edged_width); |
edged_width); |
|
compensate8x8_halfpel(&dct_codes[2 * 64], cur->y, ref->y, refh->y, |
|
|
refv->y, refhv->y, 16 * i, 16 * j + 8, dx, dy, |
|
|
edged_width); |
|
|
compensate8x8_halfpel(&dct_codes[3 * 64], cur->y, ref->y, refh->y, |
|
|
refv->y, refhv->y, 16 * i + 8, 16 * j + 8, dx, |
|
|
dy, edged_width); |
|
|
|
|
|
dx = (dx & 3) ? (dx >> 1) | 1 : dx / 2; |
|
|
dy = (dy & 3) ? (dy >> 1) | 1 : dy / 2; |
|
|
|
|
|
/* uv-image-based compensation */ |
|
|
|
|
|
interpolate8x8_switch(refv->u, ref->u, 8 * i, 8 * j, dx, dy, |
|
|
edged_width / 2, rounding); |
|
|
transfer_8to16sub(&dct_codes[4 * 64], |
|
|
cur->u + 8 * j * edged_width / 2 + 8 * i, |
|
|
refv->u + 8 * j * edged_width / 2 + 8 * i, |
|
|
edged_width / 2); |
|
284 |
|
|
285 |
interpolate8x8_switch(refv->v, ref->v, 8 * i, 8 * j, dx, dy, |
transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), |
286 |
edged_width / 2, rounding); |
ref->u + 8 * (i + j * edged_width/2), |
|
transfer_8to16sub(&dct_codes[5 * 64], |
|
|
cur->v + 8 * j * edged_width / 2 + 8 * i, |
|
|
refv->v + 8 * j * edged_width / 2 + 8 * i, |
|
287 |
edged_width / 2); |
edged_width / 2); |
288 |
|
transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), |
289 |
|
ref->v + 8 * (i + j * edged_width/2), |
290 |
|
edged_width / 2); |
291 |
|
return; |
292 |
|
} |
293 |
|
|
294 |
|
if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER |
295 |
|
|| mb->mode == MODE_INTER_Q)) { |
296 |
|
|
297 |
|
/* reduced resolution + GMC: not possible */ |
298 |
|
|
299 |
|
if (mb->mcsel) { |
300 |
|
|
301 |
|
/* call normal routine once, easier than "if (mcsel)"ing all the time */ |
302 |
|
|
303 |
|
transfer_8to16sub(&dct_codes[0*64], cur->y + 16*j*edged_width + 16*i, |
304 |
|
refGMC->y + 16*j*edged_width + 16*i, edged_width); |
305 |
|
transfer_8to16sub(&dct_codes[1*64], cur->y + 16*j*edged_width + 16*i+8, |
306 |
|
refGMC->y + 16*j*edged_width + 16*i+8, edged_width); |
307 |
|
transfer_8to16sub(&dct_codes[2*64], cur->y + (16*j+8)*edged_width + 16*i, |
308 |
|
refGMC->y + (16*j+8)*edged_width + 16*i, edged_width); |
309 |
|
transfer_8to16sub(&dct_codes[3*64], cur->y + (16*j+8)*edged_width + 16*i+8, |
310 |
|
refGMC->y + (16*j+8)*edged_width + 16*i+8, edged_width); |
311 |
|
|
312 |
|
/* lumi is needed earlier for mode decision, but chroma should be done block-based, but it isn't, yet. */ |
313 |
|
|
314 |
|
transfer_8to16sub(&dct_codes[4 * 64], cur->u + 8 *j*edged_width/2 + 8*i, |
315 |
|
refGMC->u + 8 *j*edged_width/2 + 8*i, edged_width/2); |
316 |
|
|
317 |
|
transfer_8to16sub(&dct_codes[5 * 64], cur->v + 8*j* edged_width/2 + 8*i, |
318 |
|
refGMC->v + 8*j* edged_width/2 + 8*i, edged_width/2); |
319 |
|
|
320 |
|
return; |
321 |
|
} |
322 |
|
|
323 |
|
/* ordinary compensation */ |
324 |
|
|
325 |
|
dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); |
326 |
|
dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); |
327 |
|
|
328 |
|
if (reduced_resolution) { |
329 |
|
dx = RRV_MV_SCALEUP(dx); |
330 |
|
dy = RRV_MV_SCALEUP(dy); |
331 |
|
} |
332 |
|
|
333 |
} else // mode == MODE_INTER4V |
compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
334 |
|
refv->y, refhv->y, tmp, 16 * i, 16 * j, dx, dy, |
335 |
|
edged_width, quarterpel, reduced_resolution, rounding); |
336 |
|
|
337 |
|
if (quarterpel) { dx /= 2; dy /= 2; } |
338 |
|
|
339 |
|
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
340 |
|
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
341 |
|
|
342 |
|
} else { /* mode == MODE_INTER4V */ |
343 |
|
int k, sumx = 0, sumy = 0; |
344 |
|
const VECTOR * const mvs = (quarterpel ? mb->qmvs : mb->mvs); |
345 |
|
|
346 |
|
for (k = 0; k < 4; k++) { |
347 |
|
dx = mvs[k].x; |
348 |
|
dy = mvs[k].y; |
349 |
|
sumx += quarterpel ? dx/2 : dx; |
350 |
|
sumy += quarterpel ? dy/2 : dy; |
351 |
|
|
352 |
|
if (reduced_resolution){ |
353 |
|
dx = RRV_MV_SCALEUP(dx); |
354 |
|
dy = RRV_MV_SCALEUP(dy); |
355 |
|
} |
356 |
|
|
357 |
|
compensate8x8_interpolate(&dct_codes[k * 64], cur->y, ref->y, refh->y, |
358 |
|
refv->y, refhv->y, tmp, 16 * i + 8*(k&1), 16 * j + 8*(k>>1), dx, |
359 |
|
dy, edged_width, quarterpel, reduced_resolution, rounding); |
360 |
|
} |
361 |
|
dx = (sumx >> 3) + roundtab_76[sumx & 0xf]; |
362 |
|
dy = (sumy >> 3) + roundtab_76[sumy & 0xf]; |
363 |
|
} |
364 |
|
|
365 |
|
CompensateChroma(dx, dy, i, j, cur, ref, tmp, |
366 |
|
&dct_codes[4 * 64], edged_width / 2, rounding, reduced_resolution); |
367 |
|
} |
368 |
|
|
369 |
|
|
370 |
|
void |
371 |
|
MBMotionCompensationBVOP(MBParam * pParam, |
372 |
|
MACROBLOCK * const mb, |
373 |
|
const uint32_t i, |
374 |
|
const uint32_t j, |
375 |
|
IMAGE * const cur, |
376 |
|
const IMAGE * const f_ref, |
377 |
|
const IMAGE * const f_refh, |
378 |
|
const IMAGE * const f_refv, |
379 |
|
const IMAGE * const f_refhv, |
380 |
|
const IMAGE * const b_ref, |
381 |
|
const IMAGE * const b_refh, |
382 |
|
const IMAGE * const b_refv, |
383 |
|
const IMAGE * const b_refhv, |
384 |
|
int16_t * dct_codes) |
385 |
{ |
{ |
386 |
int32_t sum, dx, dy; |
const uint32_t edged_width = pParam->edged_width; |
387 |
|
int32_t dx, dy, b_dx, b_dy, sumx, sumy, b_sumx, b_sumy; |
388 |
|
int k; |
389 |
|
const int quarterpel = pParam->vol_flags & XVID_VOL_QUARTERPEL; |
390 |
|
const uint8_t * ptr1, * ptr2; |
391 |
|
uint8_t * const tmp = f_refv->u; |
392 |
|
const VECTOR * const fmvs = (quarterpel ? mb->qmvs : mb->mvs); |
393 |
|
const VECTOR * const bmvs = (quarterpel ? mb->b_qmvs : mb->b_mvs); |
394 |
|
|
395 |
|
switch (mb->mode) { |
396 |
|
case MODE_FORWARD: |
397 |
|
dx = fmvs->x; dy = fmvs->y; |
398 |
|
|
399 |
|
compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, f_ref->y, f_refh->y, |
400 |
|
f_refv->y, f_refhv->y, tmp, 16 * i, 16 * j, dx, |
401 |
|
dy, edged_width, quarterpel, 0, 0); |
402 |
|
|
403 |
|
if (quarterpel) { dx /= 2; dy /= 2; } |
404 |
|
|
405 |
|
CompensateChroma( (dx >> 1) + roundtab_79[dx & 0x3], |
406 |
|
(dy >> 1) + roundtab_79[dy & 0x3], |
407 |
|
i, j, cur, f_ref, tmp, |
408 |
|
&dct_codes[4 * 64], edged_width / 2, 0, 0); |
409 |
|
|
410 |
|
return; |
411 |
|
|
412 |
|
case MODE_BACKWARD: |
413 |
|
b_dx = bmvs->x; b_dy = bmvs->y; |
414 |
|
|
415 |
|
compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, b_ref->y, b_refh->y, |
416 |
|
b_refv->y, b_refhv->y, tmp, 16 * i, 16 * j, b_dx, |
417 |
|
b_dy, edged_width, quarterpel, 0, 0); |
418 |
|
|
419 |
|
if (quarterpel) { b_dx /= 2; b_dy /= 2; } |
420 |
|
|
421 |
|
CompensateChroma( (b_dx >> 1) + roundtab_79[b_dx & 0x3], |
422 |
|
(b_dy >> 1) + roundtab_79[b_dy & 0x3], |
423 |
|
i, j, cur, b_ref, tmp, |
424 |
|
&dct_codes[4 * 64], edged_width / 2, 0, 0); |
425 |
|
|
426 |
|
return; |
427 |
|
|
428 |
|
case MODE_INTERPOLATE: /* _could_ use DIRECT, but would be overkill (no 4MV there) */ |
429 |
|
case MODE_DIRECT_NO4V: |
430 |
|
dx = fmvs->x; dy = fmvs->y; |
431 |
|
b_dx = bmvs->x; b_dy = bmvs->y; |
432 |
|
|
433 |
|
if (quarterpel) { |
434 |
|
|
435 |
|
if ((dx&3) | (dy&3)) { |
436 |
|
interpolate16x16_quarterpel(tmp - i * 16 - j * 16 * edged_width, |
437 |
|
(uint8_t *) f_ref->y, tmp + 32, |
438 |
|
tmp + 64, tmp + 96, 16*i, 16*j, dx, dy, edged_width, 0); |
439 |
|
ptr1 = tmp; |
440 |
|
} else ptr1 = f_ref->y + (16*j + dy/4)*edged_width + 16*i + dx/4; /* fullpixel position */ |
441 |
|
|
442 |
|
if ((b_dx&3) | (b_dy&3)) { |
443 |
|
interpolate16x16_quarterpel(tmp - i * 16 - j * 16 * edged_width + 16, |
444 |
|
(uint8_t *) b_ref->y, tmp + 32, |
445 |
|
tmp + 64, tmp + 96, 16*i, 16*j, b_dx, b_dy, edged_width, 0); |
446 |
|
ptr2 = tmp + 16; |
447 |
|
} else ptr2 = b_ref->y + (16*j + b_dy/4)*edged_width + 16*i + b_dx/4; /* fullpixel position */ |
448 |
|
|
449 |
|
b_dx /= 2; |
450 |
|
b_dy /= 2; |
451 |
|
dx /= 2; |
452 |
|
dy /= 2; |
453 |
|
|
454 |
|
} else { |
455 |
|
ptr1 = get_ref(f_ref->y, f_refh->y, f_refv->y, f_refhv->y, |
456 |
|
i, j, 16, dx, dy, edged_width); |
457 |
|
|
458 |
|
ptr2 = get_ref(b_ref->y, b_refh->y, b_refv->y, b_refhv->y, |
459 |
|
i, j, 16, b_dx, b_dy, edged_width); |
460 |
|
} |
461 |
|
for (k = 0; k < 4; k++) |
462 |
|
transfer_8to16sub2(&dct_codes[k * 64], |
463 |
|
cur->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, |
464 |
|
ptr1 + (k&1)*8 + (k>>1)*8*edged_width, |
465 |
|
ptr2 + (k&1)*8 + (k>>1)*8*edged_width, edged_width); |
466 |
|
|
467 |
|
|
468 |
|
dx = (dx >> 1) + roundtab_79[dx & 0x3]; |
469 |
|
dy = (dy >> 1) + roundtab_79[dy & 0x3]; |
470 |
|
|
471 |
|
b_dx = (b_dx >> 1) + roundtab_79[b_dx & 0x3]; |
472 |
|
b_dy = (b_dy >> 1) + roundtab_79[b_dy & 0x3]; |
473 |
|
|
474 |
|
break; |
475 |
|
|
476 |
|
default: /* MODE_DIRECT (or MODE_DIRECT_NONE_MV in case of bframes decoding) */ |
477 |
|
sumx = sumy = b_sumx = b_sumy = 0; |
478 |
|
|
479 |
|
for (k = 0; k < 4; k++) { |
480 |
|
|
481 |
|
dx = fmvs[k].x; dy = fmvs[k].y; |
482 |
|
b_dx = bmvs[k].x; b_dy = bmvs[k].y; |
483 |
|
|
484 |
|
if (quarterpel) { |
485 |
|
sumx += dx/2; sumy += dy/2; |
486 |
|
b_sumx += b_dx/2; b_sumy += b_dy/2; |
487 |
|
|
488 |
|
if ((dx&3) | (dy&3)) { |
489 |
|
interpolate8x8_quarterpel(tmp - (i * 16+(k&1)*8) - (j * 16+((k>>1)*8)) * edged_width, |
490 |
|
(uint8_t *) f_ref->y, |
491 |
|
tmp + 32, tmp + 64, tmp + 96, |
492 |
|
16*i + (k&1)*8, 16*j + (k>>1)*8, dx, dy, edged_width, 0); |
493 |
|
ptr1 = tmp; |
494 |
|
} else ptr1 = f_ref->y + (16*j + (k>>1)*8 + dy/4)*edged_width + 16*i + (k&1)*8 + dx/4; |
495 |
|
|
496 |
|
if ((b_dx&3) | (b_dy&3)) { |
497 |
|
interpolate8x8_quarterpel(tmp - (i * 16+(k&1)*8) - (j * 16+((k>>1)*8)) * edged_width + 16, |
498 |
|
(uint8_t *) b_ref->y, |
499 |
|
tmp + 16, tmp + 32, tmp + 48, |
500 |
|
16*i + (k&1)*8, 16*j + (k>>1)*8, b_dx, b_dy, edged_width, 0); |
501 |
|
ptr2 = tmp + 16; |
502 |
|
} else ptr2 = b_ref->y + (16*j + (k>>1)*8 + b_dy/4)*edged_width + 16*i + (k&1)*8 + b_dx/4; |
503 |
|
} else { |
504 |
|
sumx += dx; sumy += dy; |
505 |
|
b_sumx += b_dx; b_sumy += b_dy; |
506 |
|
|
507 |
|
ptr1 = get_ref(f_ref->y, f_refh->y, f_refv->y, f_refhv->y, |
508 |
|
2*i + (k&1), 2*j + (k>>1), 8, dx, dy, edged_width); |
509 |
|
ptr2 = get_ref(b_ref->y, b_refh->y, b_refv->y, b_refhv->y, |
510 |
|
2*i + (k&1), 2*j + (k>>1), 8, b_dx, b_dy, edged_width); |
511 |
|
} |
512 |
|
transfer_8to16sub2(&dct_codes[k * 64], |
513 |
|
cur->y + (i * 16+(k&1)*8) + (j * 16+((k>>1)*8)) * edged_width, |
514 |
|
ptr1, ptr2, edged_width); |
515 |
|
|
516 |
|
} |
517 |
|
|
518 |
compensate8x8_halfpel(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
dx = (sumx >> 3) + roundtab_76[sumx & 0xf]; |
519 |
refv->y, refhv->y, 16 * i, 16 * j, mb->mvs[0].x, |
dy = (sumy >> 3) + roundtab_76[sumy & 0xf]; |
520 |
mb->mvs[0].y, edged_width); |
b_dx = (b_sumx >> 3) + roundtab_76[b_sumx & 0xf]; |
521 |
compensate8x8_halfpel(&dct_codes[1 * 64], cur->y, ref->y, refh->y, |
b_dy = (b_sumy >> 3) + roundtab_76[b_sumy & 0xf]; |
522 |
refv->y, refhv->y, 16 * i + 8, 16 * j, |
|
523 |
mb->mvs[1].x, mb->mvs[1].y, edged_width); |
break; |
524 |
compensate8x8_halfpel(&dct_codes[2 * 64], cur->y, ref->y, refh->y, |
} |
525 |
refv->y, refhv->y, 16 * i, 16 * j + 8, |
|
526 |
mb->mvs[2].x, mb->mvs[2].y, edged_width); |
/* v block-based chroma interpolation for direct and interpolate modes */ |
527 |
compensate8x8_halfpel(&dct_codes[3 * 64], cur->y, ref->y, refh->y, |
transfer_8to16sub2(&dct_codes[4 * 64], |
528 |
refv->y, refhv->y, 16 * i + 8, 16 * j + 8, |
cur->u + (j * 8) * edged_width / 2 + (i * 8), |
529 |
mb->mvs[3].x, mb->mvs[3].y, edged_width); |
interpolate8x8_switch2(tmp, b_ref->u, 8 * i, 8 * j, |
530 |
|
b_dx, b_dy, edged_width / 2, 0), |
531 |
sum = mb->mvs[0].x + mb->mvs[1].x + mb->mvs[2].x + mb->mvs[3].x; |
interpolate8x8_switch2(tmp + 8, f_ref->u, 8 * i, 8 * j, |
532 |
dx = (sum ? SIGN(sum) * |
dx, dy, edged_width / 2, 0), |
|
(roundtab[ABS(sum) % 16] + (ABS(sum) / 16) * 2) : 0); |
|
|
|
|
|
sum = mb->mvs[0].y + mb->mvs[1].y + mb->mvs[2].y + mb->mvs[3].y; |
|
|
dy = (sum ? SIGN(sum) * |
|
|
(roundtab[ABS(sum) % 16] + (ABS(sum) / 16) * 2) : 0); |
|
|
|
|
|
/* uv-block-based compensation */ |
|
|
interpolate8x8_switch(refv->u, ref->u, 8 * i, 8 * j, dx, dy, |
|
|
edged_width / 2, rounding); |
|
|
transfer_8to16sub(&dct_codes[4 * 64], |
|
|
cur->u + 8 * j * edged_width / 2 + 8 * i, |
|
|
refv->u + 8 * j * edged_width / 2 + 8 * i, |
|
533 |
edged_width / 2); |
edged_width / 2); |
534 |
|
|
535 |
interpolate8x8_switch(refv->v, ref->v, 8 * i, 8 * j, dx, dy, |
transfer_8to16sub2(&dct_codes[5 * 64], |
536 |
edged_width / 2, rounding); |
cur->v + (j * 8) * edged_width / 2 + (i * 8), |
537 |
transfer_8to16sub(&dct_codes[5 * 64], |
interpolate8x8_switch2(tmp, b_ref->v, 8 * i, 8 * j, |
538 |
cur->v + 8 * j * edged_width / 2 + 8 * i, |
b_dx, b_dy, edged_width / 2, 0), |
539 |
refv->v + 8 * j * edged_width / 2 + 8 * i, |
interpolate8x8_switch2(tmp + 8, f_ref->v, 8 * i, 8 * j, |
540 |
|
dx, dy, edged_width / 2, 0), |
541 |
edged_width / 2); |
edged_width / 2); |
542 |
} |
} |
543 |
|
|
544 |
|
|
545 |
|
|
546 |
|
void generate_GMCparameters( const int num_wp, const int res, |
547 |
|
const WARPPOINTS *const warp, |
548 |
|
const int width, const int height, |
549 |
|
GMC_DATA *const gmc) |
550 |
|
{ |
551 |
|
const int du0 = warp->duv[0].x; |
552 |
|
const int dv0 = warp->duv[0].y; |
553 |
|
const int du1 = warp->duv[1].x; |
554 |
|
const int dv1 = warp->duv[1].y; |
555 |
|
const int du2 = warp->duv[2].x; |
556 |
|
const int dv2 = warp->duv[2].y; |
557 |
|
|
558 |
|
gmc->W = width; |
559 |
|
gmc->H = height; |
560 |
|
|
561 |
|
gmc->rho = 4 - log2bin(res-1); /* = {3,2,1,0} for res={2,4,8,16} */ |
562 |
|
|
563 |
|
gmc->alpha = log2bin(gmc->W-1); |
564 |
|
gmc->Ws = (1 << gmc->alpha); |
565 |
|
|
566 |
|
gmc->dxF = 16*gmc->Ws + RDIV( 8*gmc->Ws*du1, gmc->W ); |
567 |
|
gmc->dxG = RDIV( 8*gmc->Ws*dv1, gmc->W ); |
568 |
|
gmc->Fo = (res*du0 + 1) << (gmc->alpha+gmc->rho-1); |
569 |
|
gmc->Go = (res*dv0 + 1) << (gmc->alpha+gmc->rho-1); |
570 |
|
|
571 |
|
if (num_wp==2) { |
572 |
|
gmc->dyF = -gmc->dxG; |
573 |
|
gmc->dyG = gmc->dxF; |
574 |
|
} else if (num_wp==3) { |
575 |
|
gmc->beta = log2bin(gmc->H-1); |
576 |
|
gmc->Hs = (1 << gmc->beta); |
577 |
|
gmc->dyF = RDIV( 8*gmc->Hs*du2, gmc->H ); |
578 |
|
gmc->dyG = 16*gmc->Hs + RDIV( 8*gmc->Hs*dv2, gmc->H ); |
579 |
|
if (gmc->beta > gmc->alpha) { |
580 |
|
gmc->dxF <<= (gmc->beta - gmc->alpha); |
581 |
|
gmc->dxG <<= (gmc->beta - gmc->alpha); |
582 |
|
gmc->alpha = gmc->beta; |
583 |
|
gmc->Ws = 1<< gmc->beta; |
584 |
|
} else { |
585 |
|
gmc->dyF <<= gmc->alpha - gmc->beta; |
586 |
|
gmc->dyG <<= gmc->alpha - gmc->beta; |
587 |
|
} |
588 |
|
} |
589 |
|
|
590 |
|
gmc->cFo = gmc->dxF + gmc->dyF + (1 << (gmc->alpha+gmc->rho+1)); |
591 |
|
gmc->cFo += 16*gmc->Ws*(du0-1); |
592 |
|
|
593 |
|
gmc->cGo = gmc->dxG + gmc->dyG + (1 << (gmc->alpha+gmc->rho+1)); |
594 |
|
gmc->cGo += 16*gmc->Ws*(dv0-1); |
595 |
|
} |
596 |
|
|
597 |
|
void |
598 |
|
generate_GMCimage( const GMC_DATA *const gmc_data, /* [input] precalculated data */ |
599 |
|
const IMAGE *const pRef, /* [input] */ |
600 |
|
const int mb_width, |
601 |
|
const int mb_height, |
602 |
|
const int stride, |
603 |
|
const int stride2, |
604 |
|
const int fcode, /* [input] some parameters... */ |
605 |
|
const int32_t quarterpel, /* [input] for rounding avgMV */ |
606 |
|
const int reduced_resolution, /* [input] ignored */ |
607 |
|
const int32_t rounding, /* [input] for rounding image data */ |
608 |
|
MACROBLOCK *const pMBs, /* [output] average motion vectors */ |
609 |
|
IMAGE *const pGMC) /* [output] full warped image */ |
610 |
|
{ |
611 |
|
|
612 |
|
unsigned int mj,mi; |
613 |
|
VECTOR avgMV; |
614 |
|
|
615 |
|
for (mj = 0; mj < (unsigned int)mb_height; mj++) |
616 |
|
for (mi = 0; mi < (unsigned int)mb_width; mi++) { |
617 |
|
|
618 |
|
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
619 |
|
stride, stride2, quarterpel, rounding, pGMC); |
620 |
|
|
621 |
|
pMBs[mj*mb_width+mi].amv.x = gmc_sanitize(avgMV.x, quarterpel, fcode); |
622 |
|
pMBs[mj*mb_width+mi].amv.y = gmc_sanitize(avgMV.y, quarterpel, fcode); |
623 |
|
pMBs[mj*mb_width+mi].mcsel = 0; /* until mode decision */ |
624 |
|
} |
625 |
|
} |
626 |
|
|
627 |
|
|
628 |
|
|
629 |
|
#define MLT(i) (((16-(i))<<16) + (i)) |
630 |
|
static const uint32_t MTab[16] = { |
631 |
|
MLT( 0), MLT( 1), MLT( 2), MLT( 3), MLT( 4), MLT( 5), MLT( 6), MLT(7), |
632 |
|
MLT( 8), MLT( 9), MLT(10), MLT(11), MLT(12), MLT(13), MLT(14), MLT(15) |
633 |
|
}; |
634 |
|
#undef MLT |
635 |
|
|
636 |
|
VECTOR generate_GMCimageMB( const GMC_DATA *const gmc_data, |
637 |
|
const IMAGE *const pRef, |
638 |
|
const int mi, const int mj, |
639 |
|
const int stride, |
640 |
|
const int stride2, |
641 |
|
const int quarterpel, |
642 |
|
const int rounding, |
643 |
|
IMAGE *const pGMC) |
644 |
|
{ |
645 |
|
const int W = gmc_data->W; |
646 |
|
const int H = gmc_data->H; |
647 |
|
|
648 |
|
const int rho = gmc_data->rho; |
649 |
|
const int alpha = gmc_data->alpha; |
650 |
|
|
651 |
|
const int rounder = ( 128 - (rounding<<(rho+rho)) ) << 16; |
652 |
|
|
653 |
|
const int dxF = gmc_data->dxF; |
654 |
|
const int dyF = gmc_data->dyF; |
655 |
|
const int dxG = gmc_data->dxG; |
656 |
|
const int dyG = gmc_data->dyG; |
657 |
|
|
658 |
|
uint8_t *dstY, *dstU, *dstV; |
659 |
|
|
660 |
|
int I,J; |
661 |
|
VECTOR avgMV = {0,0}; |
662 |
|
|
663 |
|
int32_t Fj, Gj; |
664 |
|
|
665 |
|
dstY = &pGMC->y[(mj*16)*stride+mi*16] + 16; |
666 |
|
|
667 |
|
Fj = gmc_data->Fo + dyF*mj*16 + dxF*mi*16; |
668 |
|
Gj = gmc_data->Go + dyG*mj*16 + dxG*mi*16; |
669 |
|
|
670 |
|
for (J = 16; J > 0; --J) { |
671 |
|
int32_t Fi, Gi; |
672 |
|
|
673 |
|
Fi = Fj; Fj += dyF; |
674 |
|
Gi = Gj; Gj += dyG; |
675 |
|
for (I = -16; I < 0; ++I) { |
676 |
|
int32_t F, G; |
677 |
|
uint32_t ri, rj; |
678 |
|
|
679 |
|
F = ( Fi >> (alpha+rho) ) << rho; Fi += dxF; |
680 |
|
G = ( Gi >> (alpha+rho) ) << rho; Gi += dxG; |
681 |
|
|
682 |
|
avgMV.x += F; |
683 |
|
avgMV.y += G; |
684 |
|
|
685 |
|
ri = MTab[F&15]; |
686 |
|
rj = MTab[G&15]; |
687 |
|
|
688 |
|
F >>= 4; |
689 |
|
G >>= 4; |
690 |
|
|
691 |
|
if (F < -1) F = -1; |
692 |
|
else if (F > W) F = W; |
693 |
|
if (G< -1) G=-1; |
694 |
|
else if (G>H) G=H; |
695 |
|
|
696 |
|
{ /* MMX-like bilinear... */ |
697 |
|
const int offset = G*stride + F; |
698 |
|
uint32_t f0, f1; |
699 |
|
f0 = pRef->y[ offset +0 ]; |
700 |
|
f0 |= pRef->y[ offset +1 ] << 16; |
701 |
|
f1 = pRef->y[ offset+stride +0 ]; |
702 |
|
f1 |= pRef->y[ offset+stride +1 ] << 16; |
703 |
|
f0 = (ri*f0)>>16; |
704 |
|
f1 = (ri*f1) & 0x0fff0000; |
705 |
|
f0 |= f1; |
706 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
707 |
|
|
708 |
|
dstY[I] = (uint8_t)f0; |
709 |
|
} |
710 |
|
} |
711 |
|
|
712 |
|
dstY += stride; |
713 |
|
} |
714 |
|
|
715 |
|
dstU = &pGMC->u[(mj*8)*stride2+mi*8] + 8; |
716 |
|
dstV = &pGMC->v[(mj*8)*stride2+mi*8] + 8; |
717 |
|
|
718 |
|
Fj = gmc_data->cFo + dyF*4 *mj*8 + dxF*4 *mi*8; |
719 |
|
Gj = gmc_data->cGo + dyG*4 *mj*8 + dxG*4 *mi*8; |
720 |
|
|
721 |
|
for (J = 8; J > 0; --J) { |
722 |
|
int32_t Fi, Gi; |
723 |
|
Fi = Fj; Fj += 4*dyF; |
724 |
|
Gi = Gj; Gj += 4*dyG; |
725 |
|
|
726 |
|
for (I = -8; I < 0; ++I) { |
727 |
|
int32_t F, G; |
728 |
|
uint32_t ri, rj; |
729 |
|
|
730 |
|
F = ( Fi >> (alpha+rho+2) ) << rho; Fi += 4*dxF; |
731 |
|
G = ( Gi >> (alpha+rho+2) ) << rho; Gi += 4*dxG; |
732 |
|
|
733 |
|
ri = MTab[F&15]; |
734 |
|
rj = MTab[G&15]; |
735 |
|
|
736 |
|
F >>= 4; |
737 |
|
G >>= 4; |
738 |
|
|
739 |
|
if (F < -1) F=-1; |
740 |
|
else if (F >= W/2) F = W/2; |
741 |
|
if (G < -1) G = -1; |
742 |
|
else if (G >= H/2) G = H/2; |
743 |
|
|
744 |
|
{ |
745 |
|
const int offset = G*stride2 + F; |
746 |
|
uint32_t f0, f1; |
747 |
|
|
748 |
|
f0 = pRef->u[ offset +0 ]; |
749 |
|
f0 |= pRef->u[ offset +1 ] << 16; |
750 |
|
f1 = pRef->u[ offset+stride2 +0 ]; |
751 |
|
f1 |= pRef->u[ offset+stride2 +1 ] << 16; |
752 |
|
f0 = (ri*f0)>>16; |
753 |
|
f1 = (ri*f1) & 0x0fff0000; |
754 |
|
f0 |= f1; |
755 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
756 |
|
|
757 |
|
dstU[I] = (uint8_t)f0; |
758 |
|
|
759 |
|
|
760 |
|
f0 = pRef->v[ offset +0 ]; |
761 |
|
f0 |= pRef->v[ offset +1 ] << 16; |
762 |
|
f1 = pRef->v[ offset+stride2 +0 ]; |
763 |
|
f1 |= pRef->v[ offset+stride2 +1 ] << 16; |
764 |
|
f0 = (ri*f0)>>16; |
765 |
|
f1 = (ri*f1) & 0x0fff0000; |
766 |
|
f0 |= f1; |
767 |
|
f0 = ( rj*f0 + rounder ) >> 24; |
768 |
|
|
769 |
|
dstV[I] = (uint8_t)f0; |
770 |
|
} |
771 |
|
} |
772 |
|
dstU += stride2; |
773 |
|
dstV += stride2; |
774 |
|
} |
775 |
|
|
776 |
|
|
777 |
|
avgMV.x -= 16*((256*mi+120)<<4); /* 120 = 15*16/2 */ |
778 |
|
avgMV.y -= 16*((256*mj+120)<<4); |
779 |
|
|
780 |
|
avgMV.x = RSHIFT( avgMV.x, (4+7-quarterpel) ); |
781 |
|
avgMV.y = RSHIFT( avgMV.y, (4+7-quarterpel) ); |
782 |
|
|
783 |
|
return avgMV; |
784 |
} |
} |
785 |
|
|
786 |
|
|
787 |
|
|
788 |
|
#ifdef OLD_GRUEL_GMC |
789 |
|
void |
790 |
|
generate_GMCparameters( const int num_wp, /* [input]: number of warppoints */ |
791 |
|
const int res, /* [input]: resolution */ |
792 |
|
const WARPPOINTS *const warp, /* [input]: warp points */ |
793 |
|
const int width, const int height, |
794 |
|
GMC_DATA *const gmc) /* [output] precalculated parameters */ |
795 |
|
{ |
796 |
|
|
797 |
|
/* We follow mainly two sources: The original standard, which is ugly, and the |
798 |
|
thesis from Andreas Dehnhardt, which is much nicer. |
799 |
|
|
800 |
|
Notation is: indices are written next to the variable, |
801 |
|
primes in the standard are denoted by a suffix 'p'. |
802 |
|
types are "c"=constant, "i"=input parameter, "f"=calculated, then fixed, |
803 |
|
"o"=output data, " "=other, "u" = unused, "p"=calc for every pixel |
804 |
|
|
805 |
|
type | variable name | ISO name (TeX-style) | value or range | usage |
806 |
|
------------------------------------------------------------------------------------- |
807 |
|
c | H | H | [16 , ?] | image width (w/o edges) |
808 |
|
c | W | W | [16 , ?] | image height (w/o edges) |
809 |
|
|
810 |
|
c | i0 | i_0 | 0 | ref. point #1, X |
811 |
|
c | j0 | j_0 | 0 | ref. point #1, Y |
812 |
|
c | i1 | i_1 | W | ref. point #2, X |
813 |
|
c | j1 | j_1 | 0 | ref. point #2, Y |
814 |
|
cu | i2 | i_2 | 0 | ref. point #3, X |
815 |
|
cu | i2 | j_2 | H | ref. point #3, Y |
816 |
|
|
817 |
|
i | du0 | du[0] | [-16863,16863] | warp vector #1, Y |
818 |
|
i | dv0 | dv[0] | [-16863,16863] | warp vector #1, Y |
819 |
|
i | du1 | du[1] | [-16863,16863] | warp vector #2, Y |
820 |
|
i | dv1 | dv[1] | [-16863,16863] | warp vector #2, Y |
821 |
|
iu | du2 | du[2] | [-16863,16863] | warp vector #3, Y |
822 |
|
iu | dv2 | dv[2] | [-16863,16863] | warp vector #3, Y |
823 |
|
|
824 |
|
i | s | s | {2,4,8,16} | interpol. resolution |
825 |
|
f | sigma | - | log2(s) | X / s == X >> sigma |
826 |
|
f | r | r | =16/s | complementary res. |
827 |
|
f | rho | \rho | log2(r) | X / r == X >> rho |
828 |
|
|
829 |
|
f | i0s | i'_0 | | |
830 |
|
f | j0s | j'_0 | | |
831 |
|
f | i1s | i'_1 | | |
832 |
|
f | j1s | j'_1 | | |
833 |
|
f | i2s | i'_2 | | |
834 |
|
f | j2s | j'_2 | | |
835 |
|
|
836 |
|
f | alpha | \alpha | | 2^{alpha-1} < W <= 2^alpha |
837 |
|
f | beta | \beta | | 2^{beta-1} < H <= 2^beta |
838 |
|
|
839 |
|
f | Ws | W' | W = 2^{alpha} | scaled width |
840 |
|
f | Hs | H' | W = 2^{beta} | scaled height |
841 |
|
|
842 |
|
f | i1ss | i''_1 | "virtual sprite stuff" |
843 |
|
f | j1ss | j''_1 | "virtual sprite stuff" |
844 |
|
f | i2ss | i''_2 | "virtual sprite stuff" |
845 |
|
f | j2ss | j''_2 | "virtual sprite stuff" |
846 |
|
*/ |
847 |
|
|
848 |
|
/* Some calculations are disabled because we only use 2 warppoints at the moment */ |
849 |
|
|
850 |
|
int du0 = warp->duv[0].x; |
851 |
|
int dv0 = warp->duv[0].y; |
852 |
|
int du1 = warp->duv[1].x; |
853 |
|
int dv1 = warp->duv[1].y; |
854 |
|
#if 0 |
855 |
|
int du2 = warp->duv[2].x; |
856 |
|
int dv2 = warp->duv[2].y; |
857 |
|
#endif |
858 |
|
|
859 |
|
gmc->num_wp = num_wp; |
860 |
|
|
861 |
|
gmc->s = res; /* scaling parameters 2,4,8 or 16 */ |
862 |
|
gmc->sigma = log2bin(res-1); /* log2bin(15)=4, log2bin(16)=5, log2bin(17)=5 */ |
863 |
|
gmc->r = 16/res; |
864 |
|
gmc->rho = 4 - gmc->sigma; /* = log2bin(r-1) */ |
865 |
|
|
866 |
|
gmc->W = width; |
867 |
|
gmc->H = height; /* fixed reference coordinates */ |
868 |
|
|
869 |
|
gmc->alpha = log2bin(gmc->W-1); |
870 |
|
gmc->Ws= 1<<gmc->alpha; |
871 |
|
|
872 |
|
#if 0 |
873 |
|
gmc->beta = log2bin(gmc->H-1); |
874 |
|
gmc->Hs= 1<<gmc->beta; |
875 |
|
#endif |
876 |
|
|
877 |
|
#if 0 |
878 |
|
printf("du0=%d dv0=%d du1=%d dv1=%d s=%d sigma=%d W=%d alpha=%d, Ws=%d, rho=%d\n",du0,dv0,du1,dv1,gmc->s,gmc->sigma,gmc->W,gmc->alpha,gmc->Ws,gmc->rho); |
879 |
|
#endif |
880 |
|
|
881 |
|
/* |
882 |
|
* i2s is only needed for num_wp >= 3, etc. |
883 |
|
* the 's' values are in 1/s pel resolution |
884 |
|
*/ |
885 |
|
gmc->i0s = res/2 * ( du0 ); |
886 |
|
gmc->j0s = res/2 * ( dv0 ); |
887 |
|
gmc->i1s = res/2 * (2*width + du1 + du0 ); |
888 |
|
gmc->j1s = res/2 * ( dv1 + dv0 ); |
889 |
|
#if 0 |
890 |
|
gmc->i2s = res/2 * ( du2 + du0 ); |
891 |
|
gmc->j2s = res/2 * (2*height + dv2 + dv0 ); |
892 |
|
#endif |
893 |
|
|
894 |
|
/* i2s and i2ss are only needed for num_wp == 3, etc. */ |
895 |
|
|
896 |
|
/* the 'ss' values are in 1/16 pel resolution */ |
897 |
|
gmc->i1ss = 16*gmc->Ws + ROUNDED_DIV(((gmc->W-gmc->Ws)*(gmc->r*gmc->i0s) + gmc->Ws*(gmc->r*gmc->i1s - 16*gmc->W)),gmc->W); |
898 |
|
gmc->j1ss = ROUNDED_DIV( ((gmc->W - gmc->Ws)*(gmc->r*gmc->j0s) + gmc->Ws*gmc->r*gmc->j1s) ,gmc->W ); |
899 |
|
|
900 |
|
#if 0 |
901 |
|
gmc->i2ss = ROUNDED_DIV( ((gmc->H - gmc->Hs)*(gmc->r*gmc->i0s) + gmc->Hs*(gmc->r*gmc->i2s)), gmc->H); |
902 |
|
gmc->j2ss = 16*gmc->Hs + ROUNDED_DIV( ((gmc->H-gmc->Hs)*(gmc->r*gmc->j0s) + gmc->Ws*(gmc->r*gmc->j2s - 16*gmc->H)), gmc->H); |
903 |
|
#endif |
904 |
|
|
905 |
|
return; |
906 |
|
} |
907 |
|
|
908 |
|
void |
909 |
|
generate_GMCimage( const GMC_DATA *const gmc_data, /* [input] precalculated data */ |
910 |
|
const IMAGE *const pRef, /* [input] */ |
911 |
|
const int mb_width, |
912 |
|
const int mb_height, |
913 |
|
const int stride, |
914 |
|
const int stride2, |
915 |
|
const int fcode, /* [input] some parameters... */ |
916 |
|
const int32_t quarterpel, /* [input] for rounding avgMV */ |
917 |
|
const int reduced_resolution, /* [input] ignored */ |
918 |
|
const int32_t rounding, /* [input] for rounding image data */ |
919 |
|
MACROBLOCK *const pMBs, /* [output] average motion vectors */ |
920 |
|
IMAGE *const pGMC) /* [output] full warped image */ |
921 |
|
{ |
922 |
|
|
923 |
|
unsigned int mj,mi; |
924 |
|
VECTOR avgMV; |
925 |
|
|
926 |
|
for (mj = 0;mj < mb_height; mj++) |
927 |
|
for (mi = 0;mi < mb_width; mi++) { |
928 |
|
|
929 |
|
avgMV = generate_GMCimageMB(gmc_data, pRef, mi, mj, |
930 |
|
stride, stride2, quarterpel, rounding, pGMC); |
931 |
|
|
932 |
|
pMBs[mj*mb_width+mi].amv.x = gmc_sanitize(avgMV.x, quarterpel, fcode); |
933 |
|
pMBs[mj*mb_width+mi].amv.y = gmc_sanitize(avgMV.y, quarterpel, fcode); |
934 |
|
pMBs[mj*mb_width+mi].mcsel = 0; /* until mode decision */ |
935 |
|
} |
936 |
|
} |
937 |
|
|
938 |
|
|
939 |
|
VECTOR generate_GMCimageMB( const GMC_DATA *const gmc_data, /* [input] all precalc data */ |
940 |
|
const IMAGE *const pRef, /* [input] */ |
941 |
|
const int mi, const int mj, /* [input] MB position */ |
942 |
|
const int stride, /* [input] Lumi stride */ |
943 |
|
const int stride2, /* [input] chroma stride */ |
944 |
|
const int quarterpel, /* [input] for rounding of avgMV */ |
945 |
|
const int rounding, /* [input] for rounding of imgae data */ |
946 |
|
IMAGE *const pGMC) /* [outut] generate image */ |
947 |
|
|
948 |
|
/* |
949 |
|
type | variable name | ISO name (TeX-style) | value or range | usage |
950 |
|
------------------------------------------------------------------------------------- |
951 |
|
p | F | F(i,j) | | pelwise motion vector X in s-th pel |
952 |
|
p | G | G(i,j) | | pelwise motion vector Y in s-th pel |
953 |
|
p | Fc | F_c(i,j) | | |
954 |
|
p | Gc | G_c(i,j) | | same for chroma |
955 |
|
|
956 |
|
p | Y00 | Y_{00} | [0,255*s*s] | first: 4 neighbouring Y-values |
957 |
|
p | Y01 | Y_{01} | [0,255] | at fullpel position, around the |
958 |
|
p | Y10 | Y_{10} | [0,255*s] | position where pelweise MV points to |
959 |
|
p | Y11 | Y_{11} | [0,255] | later: bilinear interpol Y-values in Y00 |
960 |
|
|
961 |
|
p | C00 | C_{00} | [0,255*s*s] | same for chroma Cb and Cr |
962 |
|
p | C01 | C_{01} | [0,255] | |
963 |
|
p | C10 | C_{10} | [0,255*s] | |
964 |
|
p | C11 | C_{11} | [0,255] | |
965 |
|
|
966 |
|
*/ |
967 |
|
{ |
968 |
|
const int W = gmc_data->W; |
969 |
|
const int H = gmc_data->H; |
970 |
|
|
971 |
|
const int s = gmc_data->s; |
972 |
|
const int sigma = gmc_data->sigma; |
973 |
|
|
974 |
|
const int r = gmc_data->r; |
975 |
|
const int rho = gmc_data->rho; |
976 |
|
|
977 |
|
const int i0s = gmc_data->i0s; |
978 |
|
const int j0s = gmc_data->j0s; |
979 |
|
|
980 |
|
const int i1ss = gmc_data->i1ss; |
981 |
|
const int j1ss = gmc_data->j1ss; |
982 |
|
#if 0 |
983 |
|
const int i2ss = gmc_data->i2ss; |
984 |
|
const int j2ss = gmc_data->j2ss; |
985 |
|
#endif |
986 |
|
|
987 |
|
const int alpha = gmc_data->alpha; |
988 |
|
const int Ws = gmc_data->Ws; |
989 |
|
|
990 |
|
#if 0 |
991 |
|
const int beta = gmc_data->beta; |
992 |
|
const int Hs = gmc_data->Hs; |
993 |
|
#endif |
994 |
|
|
995 |
|
int I,J; |
996 |
|
VECTOR avgMV = {0,0}; |
997 |
|
|
998 |
|
for (J=16*mj;J<16*(mj+1);J++) |
999 |
|
for (I=16*mi;I<16*(mi+1);I++) |
1000 |
|
{ |
1001 |
|
int F= i0s + ( ((-r*i0s+i1ss)*I + (r*j0s-j1ss)*J + (1<<(alpha+rho-1))) >> (alpha+rho) ); |
1002 |
|
int G= j0s + ( ((-r*j0s+j1ss)*I + (-r*i0s+i1ss)*J + (1<<(alpha+rho-1))) >> (alpha+rho) ); |
1003 |
|
|
1004 |
|
/* this naive implementation (with lots of multiplications) isn't slower (rather faster) than |
1005 |
|
working incremental. Don't ask me why... maybe the whole this is memory bound? */ |
1006 |
|
|
1007 |
|
const int ri= F & (s-1); /* fractional part of pelwise MV X */ |
1008 |
|
const int rj= G & (s-1); /* fractional part of pelwise MV Y */ |
1009 |
|
|
1010 |
|
int Y00,Y01,Y10,Y11; |
1011 |
|
|
1012 |
|
/* unclipped values are used for avgMV */ |
1013 |
|
avgMV.x += F-(I<<sigma); /* shift position to 1/s-pel, as the MV is */ |
1014 |
|
avgMV.y += G-(J<<sigma); /* TODO: don't do this (of course) */ |
1015 |
|
|
1016 |
|
F >>= sigma; |
1017 |
|
G >>= sigma; |
1018 |
|
|
1019 |
|
/* clip values to be in range. Since we have edges, clip to 1 less than lower boundary |
1020 |
|
this way positions F+1/G+1 are still right */ |
1021 |
|
|
1022 |
|
if (F< -1) |
1023 |
|
F=-1; |
1024 |
|
else if (F>W) |
1025 |
|
F=W; /* W or W-1 doesn't matter, so save 1 subtract ;-) */ |
1026 |
|
if (G< -1) |
1027 |
|
G=-1; |
1028 |
|
else if (G>H) |
1029 |
|
G=H; /* dito */ |
1030 |
|
|
1031 |
|
Y00 = pRef->y[ G*stride + F ]; /* Lumi values */ |
1032 |
|
Y01 = pRef->y[ G*stride + F+1 ]; |
1033 |
|
Y10 = pRef->y[ G*stride + F+stride ]; |
1034 |
|
Y11 = pRef->y[ G*stride + F+stride+1 ]; |
1035 |
|
|
1036 |
|
/* bilinear interpolation */ |
1037 |
|
Y00 = ((s-ri)*Y00 + ri*Y01); |
1038 |
|
Y10 = ((s-ri)*Y10 + ri*Y11); |
1039 |
|
Y00 = ((s-rj)*Y00 + rj*Y10 + s*s/2 - rounding ) >> (sigma+sigma); |
1040 |
|
|
1041 |
|
pGMC->y[J*stride+I] = (uint8_t)Y00; /* output 1 Y-pixel */ |
1042 |
|
} |
1043 |
|
|
1044 |
|
|
1045 |
|
/* doing chroma _here_ is even more stupid and slow, because won't be used until Compensation and |
1046 |
|
most likely not even then (only if the block really _is_ GMC) |
1047 |
|
*/ |
1048 |
|
|
1049 |
|
for (J=8*mj;J<8*(mj+1);J++) /* this plays the role of j_c,i_c in the standard */ |
1050 |
|
for (I=8*mi;I<8*(mi+1);I++) /* For I_c we have to use I_c = 4*i_c+1 ! */ |
1051 |
|
{ |
1052 |
|
/* same positions for both chroma components, U=Cb and V=Cr */ |
1053 |
|
int Fc=((-r*i0s+i1ss)*(4*I+1) + (r*j0s-j1ss)*(4*J+1) +2*Ws*r*i0s |
1054 |
|
-16*Ws +(1<<(alpha+rho+1)))>>(alpha+rho+2); |
1055 |
|
int Gc=((-r*j0s+j1ss)*(4*I+1) +(-r*i0s+i1ss)*(4*J+1) +2*Ws*r*j0s |
1056 |
|
-16*Ws +(1<<(alpha+rho+1))) >>(alpha+rho+2); |
1057 |
|
|
1058 |
|
const int ri= Fc & (s-1); /* fractional part of pelwise MV X */ |
1059 |
|
const int rj= Gc & (s-1); /* fractional part of pelwise MV Y */ |
1060 |
|
|
1061 |
|
int C00,C01,C10,C11; |
1062 |
|
|
1063 |
|
Fc >>= sigma; |
1064 |
|
Gc >>= sigma; |
1065 |
|
|
1066 |
|
if (Fc< -1) |
1067 |
|
Fc=-1; |
1068 |
|
else if (Fc>=W/2) |
1069 |
|
Fc=W/2; /* W or W-1 doesn't matter, so save 1 subtraction ;-) */ |
1070 |
|
if (Gc< -1) |
1071 |
|
Gc=-1; |
1072 |
|
else if (Gc>=H/2) |
1073 |
|
Gc=H/2; /* dito */ |
1074 |
|
|
1075 |
|
/* now calculate U data */ |
1076 |
|
C00 = pRef->u[ Gc*stride2 + Fc ]; /* chroma-value Cb */ |
1077 |
|
C01 = pRef->u[ Gc*stride2 + Fc+1 ]; |
1078 |
|
C10 = pRef->u[ (Gc+1)*stride2 + Fc ]; |
1079 |
|
C11 = pRef->u[ (Gc+1)*stride2 + Fc+1 ]; |
1080 |
|
|
1081 |
|
/* bilinear interpolation */ |
1082 |
|
C00 = ((s-ri)*C00 + ri*C01); |
1083 |
|
C10 = ((s-ri)*C10 + ri*C11); |
1084 |
|
C00 = ((s-rj)*C00 + rj*C10 + s*s/2 - rounding ) >> (sigma+sigma); |
1085 |
|
|
1086 |
|
pGMC->u[J*stride2+I] = (uint8_t)C00; /* output 1 U-pixel */ |
1087 |
|
|
1088 |
|
/* now calculate V data */ |
1089 |
|
C00 = pRef->v[ Gc*stride2 + Fc ]; /* chroma-value Cr */ |
1090 |
|
C01 = pRef->v[ Gc*stride2 + Fc+1 ]; |
1091 |
|
C10 = pRef->v[ (Gc+1)*stride2 + Fc ]; |
1092 |
|
C11 = pRef->v[ (Gc+1)*stride2 + Fc+1 ]; |
1093 |
|
|
1094 |
|
/* bilinear interpolation */ |
1095 |
|
C00 = ((s-ri)*C00 + ri*C01); |
1096 |
|
C10 = ((s-ri)*C10 + ri*C11); |
1097 |
|
C00 = ((s-rj)*C00 + rj*C10 + s*s/2 - rounding ) >> (sigma+sigma); |
1098 |
|
|
1099 |
|
pGMC->v[J*stride2+I] = (uint8_t)C00; /* output 1 V-pixel */ |
1100 |
|
} |
1101 |
|
|
1102 |
|
/* The average vector is rounded from 1/s-pel to 1/2 or 1/4 using the '//' operator */ |
1103 |
|
|
1104 |
|
avgMV.x = RSHIFT( avgMV.x, (sigma+7-quarterpel) ); |
1105 |
|
avgMV.y = RSHIFT( avgMV.y, (sigma+7-quarterpel) ); |
1106 |
|
|
1107 |
|
/* ^^^^ this is the way MS Reference Software does it */ |
1108 |
|
|
1109 |
|
return avgMV; /* clipping to fcode area is done outside! */ |
1110 |
|
} |
1111 |
|
|
1112 |
|
#endif |