3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* - QPel interpolation - |
* - QPel interpolation - |
5 |
* |
* |
6 |
|
* Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> |
7 |
|
* |
8 |
* This program is free software ; you can redistribute it and/or modify |
* This program is free software ; you can redistribute it and/or modify |
9 |
* it under the terms of the GNU General Public License as published by |
* it under the terms of the GNU General Public License as published by |
10 |
* the Free Software Foundation ; either version 2 of the License, or |
* the Free Software Foundation ; either version 2 of the License, or |
19 |
* along with this program ; if not, write to the Free Software |
* along with this program ; if not, write to the Free Software |
20 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
* |
* |
22 |
*****************************************************************************/ |
* $Id$ |
|
|
|
|
/************************************************************************** |
|
|
* |
|
|
* History: |
|
23 |
* |
* |
24 |
* 22.10.2002 initial coding - Skal - |
****************************************************************************/ |
|
* |
|
|
*************************************************************************/ |
|
25 |
|
|
26 |
#ifndef _XVID_QPEL_H_ |
#ifndef _XVID_QPEL_H_ |
27 |
#define _XVID_QPEL_H_ |
#define _XVID_QPEL_H_ |
28 |
|
|
29 |
|
#include "interpolate8x8.h" |
30 |
#include "../utils/mem_transfer.h" |
#include "../utils/mem_transfer.h" |
31 |
|
|
32 |
/***************************************************************************** |
/***************************************************************************** |
38 |
|
|
39 |
typedef XVID_QP_PASS_SIGNATURE(XVID_QP_PASS); |
typedef XVID_QP_PASS_SIGNATURE(XVID_QP_PASS); |
40 |
|
|
41 |
// We put everything in a single struct so it can easily be passed |
/* We put everything in a single struct so it can easily be passed |
42 |
// to prediction functions as a whole... |
* to prediction functions as a whole... */ |
43 |
|
|
44 |
struct XVID_QP_FUNCS { |
typedef struct _XVID_QP_FUNCS { |
45 |
|
|
46 |
// filter for QPel 16x? prediction |
/* filter for QPel 16x? prediction */ |
47 |
|
|
48 |
XVID_QP_PASS *H_Pass; |
XVID_QP_PASS *H_Pass; |
49 |
XVID_QP_PASS *H_Pass_Avrg; |
XVID_QP_PASS *H_Pass_Avrg; |
52 |
XVID_QP_PASS *V_Pass_Avrg; |
XVID_QP_PASS *V_Pass_Avrg; |
53 |
XVID_QP_PASS *V_Pass_Avrg_Up; |
XVID_QP_PASS *V_Pass_Avrg_Up; |
54 |
|
|
55 |
// filter for QPel 8x? prediction |
/* filter for QPel 8x? prediction */ |
56 |
|
|
57 |
XVID_QP_PASS *H_Pass_8; |
XVID_QP_PASS *H_Pass_8; |
58 |
XVID_QP_PASS *H_Pass_Avrg_8; |
XVID_QP_PASS *H_Pass_Avrg_8; |
60 |
XVID_QP_PASS *V_Pass_8; |
XVID_QP_PASS *V_Pass_8; |
61 |
XVID_QP_PASS *V_Pass_Avrg_8; |
XVID_QP_PASS *V_Pass_Avrg_8; |
62 |
XVID_QP_PASS *V_Pass_Avrg_Up_8; |
XVID_QP_PASS *V_Pass_Avrg_Up_8; |
63 |
}; |
} XVID_QP_FUNCS; |
|
typedef struct XVID_QP_FUNCS XVID_QP_FUNCS; |
|
64 |
|
|
65 |
/***************************************************************************** |
/***************************************************************************** |
66 |
* fwd dcl |
* fwd dcl |
67 |
****************************************************************************/ |
****************************************************************************/ |
68 |
|
extern void xvid_Init_QP(); |
69 |
|
|
70 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ |
72 |
|
|
73 |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; // for P-frames |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; // for B-frames |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
75 |
|
|
76 |
|
#ifdef ARCH_IS_IA32 |
77 |
extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; |
78 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
79 |
extern void xvid_Init_QP_mmx(); // should be called at mmx initialization |
#endif |
80 |
|
|
81 |
extern XVID_QP_FUNCS *xvid_QP_Funcs; // <- main pointer for enc/dec structure |
#ifdef ARCH_IS_PPC |
82 |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; // <- main pointer for enc/dec structure |
extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; |
83 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; |
84 |
|
#endif |
85 |
|
|
86 |
|
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
87 |
|
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
88 |
|
|
89 |
/***************************************************************************** |
/***************************************************************************** |
90 |
* macros |
* macros |
106 |
|
|
107 |
****************************************************************************/ |
****************************************************************************/ |
108 |
|
|
109 |
static __inline void new_interpolate16x16_quarterpel( |
static void __inline |
110 |
uint8_t * const cur, |
interpolate16x16_quarterpel(uint8_t * const cur, |
111 |
uint8_t * const refn, |
uint8_t * const refn, |
112 |
uint8_t * const refh, |
uint8_t * const refh, |
113 |
uint8_t * const refv, |
uint8_t * const refv, |
125 |
|
|
126 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
127 |
|
|
128 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
129 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
130 |
|
|
131 |
Ops = xvid_QP_Funcs; // TODO: pass as argument |
Ops = xvid_QP_Funcs; |
132 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
133 |
|
|
134 |
x_int = xRef/4; |
x_int = xRef/4; |
140 |
y_int--; |
y_int--; |
141 |
|
|
142 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
143 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
144 |
|
|
145 |
tmp = refh; // we need at least a 16 x stride scratch block |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
146 |
|
|
147 |
switch(quads) { |
switch(quads) { |
148 |
case 0: |
case 0: |
208 |
} |
} |
209 |
} |
} |
210 |
|
|
211 |
static __inline void new_interpolate16x8_quarterpel( |
static void __inline |
212 |
uint8_t * const cur, |
interpolate16x16_add_quarterpel(uint8_t * const cur, |
213 |
uint8_t * const refn, |
uint8_t * const refn, |
214 |
uint8_t * const refh, |
uint8_t * const refh, |
215 |
uint8_t * const refv, |
uint8_t * const refv, |
224 |
uint8_t *tmp; |
uint8_t *tmp; |
225 |
int32_t quads; |
int32_t quads; |
226 |
const XVID_QP_FUNCS *Ops; |
const XVID_QP_FUNCS *Ops; |
227 |
|
const XVID_QP_FUNCS *Ops_Copy; |
228 |
|
|
229 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
230 |
|
|
231 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
232 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
233 |
|
|
234 |
Ops = xvid_QP_Funcs; // TODO: pass as argument |
Ops = xvid_QP_Add_Funcs; |
235 |
|
Ops_Copy = xvid_QP_Funcs; |
236 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
237 |
|
|
238 |
x_int = xRef/4; |
x_int = xRef/4; |
244 |
y_int--; |
y_int--; |
245 |
|
|
246 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
247 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
248 |
|
|
249 |
tmp = refh; // we need at least a 16 x stride scratch block |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
250 |
|
|
251 |
|
switch(quads) { |
252 |
|
case 0: |
253 |
|
/* NB: there is no halfpel involved ! the name's function can be |
254 |
|
* misleading */ |
255 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
256 |
|
interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); |
257 |
|
interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); |
258 |
|
interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); |
259 |
|
break; |
260 |
|
case 1: |
261 |
|
Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); |
262 |
|
break; |
263 |
|
case 2: |
264 |
|
Ops->H_Pass(dst, src, 16, stride, rounding); |
265 |
|
break; |
266 |
|
case 3: |
267 |
|
Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
268 |
|
break; |
269 |
|
case 4: |
270 |
|
Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); |
271 |
|
break; |
272 |
|
case 5: |
273 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
274 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
275 |
|
break; |
276 |
|
case 6: |
277 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
278 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
279 |
|
break; |
280 |
|
case 7: |
281 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
282 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
283 |
|
break; |
284 |
|
case 8: |
285 |
|
Ops->V_Pass(dst, src, 16, stride, rounding); |
286 |
|
break; |
287 |
|
case 9: |
288 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
289 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
290 |
|
break; |
291 |
|
case 10: |
292 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
293 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
294 |
|
break; |
295 |
|
case 11: |
296 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
297 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
298 |
|
break; |
299 |
|
case 12: |
300 |
|
Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
301 |
|
break; |
302 |
|
case 13: |
303 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
304 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
305 |
|
break; |
306 |
|
case 14: |
307 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
308 |
|
Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); |
309 |
|
break; |
310 |
|
case 15: |
311 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
312 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
313 |
|
break; |
314 |
|
} |
315 |
|
} |
316 |
|
|
317 |
|
static void __inline |
318 |
|
interpolate16x8_quarterpel(uint8_t * const cur, |
319 |
|
uint8_t * const refn, |
320 |
|
uint8_t * const refh, |
321 |
|
uint8_t * const refv, |
322 |
|
uint8_t * const refhv, |
323 |
|
const uint32_t x, const uint32_t y, |
324 |
|
const int32_t dx, const int dy, |
325 |
|
const uint32_t stride, |
326 |
|
const uint32_t rounding) |
327 |
|
{ |
328 |
|
const uint8_t *src; |
329 |
|
uint8_t *dst; |
330 |
|
uint8_t *tmp; |
331 |
|
int32_t quads; |
332 |
|
const XVID_QP_FUNCS *Ops; |
333 |
|
|
334 |
|
int32_t x_int, y_int; |
335 |
|
|
336 |
|
const int32_t xRef = (int)x*4 + dx; |
337 |
|
const int32_t yRef = (int)y*4 + dy; |
338 |
|
|
339 |
|
Ops = xvid_QP_Funcs; |
340 |
|
quads = (dx&3) | ((dy&3)<<2); |
341 |
|
|
342 |
|
x_int = xRef/4; |
343 |
|
if (xRef < 0 && xRef % 4) |
344 |
|
x_int--; |
345 |
|
|
346 |
|
y_int = yRef/4; |
347 |
|
if (yRef < 0 && yRef % 4) |
348 |
|
y_int--; |
349 |
|
|
350 |
|
dst = cur + y * stride + x; |
351 |
|
src = refn + y_int * (int)stride + x_int; |
352 |
|
|
353 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
354 |
|
|
355 |
switch(quads) { |
switch(quads) { |
356 |
case 0: |
case 0: |
414 |
} |
} |
415 |
} |
} |
416 |
|
|
417 |
static __inline void new_interpolate8x8_quarterpel( |
static void __inline |
418 |
uint8_t * const cur, |
interpolate8x8_quarterpel(uint8_t * const cur, |
419 |
uint8_t * const refn, |
uint8_t * const refn, |
420 |
uint8_t * const refh, |
uint8_t * const refh, |
421 |
uint8_t * const refv, |
uint8_t * const refv, |
433 |
|
|
434 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
435 |
|
|
436 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
437 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
438 |
|
|
439 |
Ops = xvid_QP_Funcs; // TODO: pass as argument |
Ops = xvid_QP_Funcs; |
440 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
441 |
|
|
442 |
x_int = xRef/4; |
x_int = xRef/4; |
448 |
y_int--; |
y_int--; |
449 |
|
|
450 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
451 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
452 |
|
|
453 |
tmp = refh; // we need at least a 16 x stride scratch block |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
454 |
|
|
455 |
switch(quads) { |
switch(quads) { |
456 |
case 0: |
case 0: |
512 |
break; |
break; |
513 |
} |
} |
514 |
} |
} |
515 |
/*****************************************************************************/ |
|
516 |
|
static void __inline |
517 |
|
interpolate8x8_add_quarterpel(uint8_t * const cur, |
518 |
|
uint8_t * const refn, |
519 |
|
uint8_t * const refh, |
520 |
|
uint8_t * const refv, |
521 |
|
uint8_t * const refhv, |
522 |
|
const uint32_t x, const uint32_t y, |
523 |
|
const int32_t dx, const int dy, |
524 |
|
const uint32_t stride, |
525 |
|
const uint32_t rounding) |
526 |
|
{ |
527 |
|
const uint8_t *src; |
528 |
|
uint8_t *dst; |
529 |
|
uint8_t *tmp; |
530 |
|
int32_t quads; |
531 |
|
const XVID_QP_FUNCS *Ops; |
532 |
|
const XVID_QP_FUNCS *Ops_Copy; |
533 |
|
|
534 |
|
int32_t x_int, y_int; |
535 |
|
|
536 |
|
const int32_t xRef = (int)x*4 + dx; |
537 |
|
const int32_t yRef = (int)y*4 + dy; |
538 |
|
|
539 |
|
Ops = xvid_QP_Add_Funcs; |
540 |
|
Ops_Copy = xvid_QP_Funcs; |
541 |
|
quads = (dx&3) | ((dy&3)<<2); |
542 |
|
|
543 |
|
x_int = xRef/4; |
544 |
|
if (xRef < 0 && xRef % 4) |
545 |
|
x_int--; |
546 |
|
|
547 |
|
y_int = yRef/4; |
548 |
|
if (yRef < 0 && yRef % 4) |
549 |
|
y_int--; |
550 |
|
|
551 |
|
dst = cur + y * stride + x; |
552 |
|
src = refn + y_int * (int)stride + x_int; |
553 |
|
|
554 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
555 |
|
|
556 |
|
switch(quads) { |
557 |
|
case 0: |
558 |
|
/* Misleading function name, there is no halfpel involved |
559 |
|
* just dst and src averaging with rounding=0 */ |
560 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
561 |
|
break; |
562 |
|
case 1: |
563 |
|
Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); |
564 |
|
break; |
565 |
|
case 2: |
566 |
|
Ops->H_Pass_8(dst, src, 8, stride, rounding); |
567 |
|
break; |
568 |
|
case 3: |
569 |
|
Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
570 |
|
break; |
571 |
|
case 4: |
572 |
|
Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); |
573 |
|
break; |
574 |
|
case 5: |
575 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
576 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
577 |
|
break; |
578 |
|
case 6: |
579 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
580 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
581 |
|
break; |
582 |
|
case 7: |
583 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
584 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
585 |
|
break; |
586 |
|
case 8: |
587 |
|
Ops->V_Pass_8(dst, src, 8, stride, rounding); |
588 |
|
break; |
589 |
|
case 9: |
590 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
591 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
592 |
|
break; |
593 |
|
case 10: |
594 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
595 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
596 |
|
break; |
597 |
|
case 11: |
598 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
599 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
600 |
|
break; |
601 |
|
case 12: |
602 |
|
Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
603 |
|
break; |
604 |
|
case 13: |
605 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
606 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
607 |
|
break; |
608 |
|
case 14: |
609 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
610 |
|
Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); |
611 |
|
break; |
612 |
|
case 15: |
613 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
614 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
615 |
|
break; |
616 |
|
} |
617 |
|
} |
618 |
|
|
619 |
#endif /* _XVID_QPEL_H_ */ |
#endif /* _XVID_QPEL_H_ */ |