26 |
#ifndef _XVID_QPEL_H_ |
#ifndef _XVID_QPEL_H_ |
27 |
#define _XVID_QPEL_H_ |
#define _XVID_QPEL_H_ |
28 |
|
|
29 |
|
#include "interpolate8x8.h" |
30 |
#include "../utils/mem_transfer.h" |
#include "../utils/mem_transfer.h" |
31 |
|
|
32 |
/***************************************************************************** |
/***************************************************************************** |
67 |
****************************************************************************/ |
****************************************************************************/ |
68 |
extern void xvid_Init_QP(); |
extern void xvid_Init_QP(); |
69 |
|
|
70 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ |
72 |
|
|
73 |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
75 |
|
|
78 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
79 |
#endif |
#endif |
80 |
|
|
81 |
|
#ifdef ARCH_IS_PPC |
82 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; |
83 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; |
84 |
|
#endif |
85 |
|
|
86 |
|
#ifdef ARCH_IS_X86_64 |
87 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_x86_64; |
88 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_x86_64; |
89 |
|
#endif |
90 |
|
|
91 |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
92 |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
93 |
|
|
112 |
****************************************************************************/ |
****************************************************************************/ |
113 |
|
|
114 |
static void __inline |
static void __inline |
115 |
new_interpolate16x16_quarterpel(uint8_t * const cur, |
interpolate16x16_quarterpel(uint8_t * const cur, |
116 |
uint8_t * const refn, |
uint8_t * const refn, |
117 |
uint8_t * const refh, |
uint8_t * const refh, |
118 |
uint8_t * const refv, |
uint8_t * const refv, |
130 |
|
|
131 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
132 |
|
|
133 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
134 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
135 |
|
|
136 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Funcs; |
137 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
138 |
|
|
139 |
x_int = xRef/4; |
x_int = xRef/4; |
145 |
y_int--; |
y_int--; |
146 |
|
|
147 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
148 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
149 |
|
|
150 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
151 |
|
|
214 |
} |
} |
215 |
|
|
216 |
static void __inline |
static void __inline |
217 |
new_interpolate16x8_quarterpel(uint8_t * const cur, |
interpolate16x16_add_quarterpel(uint8_t * const cur, |
218 |
uint8_t * const refn, |
uint8_t * const refn, |
219 |
uint8_t * const refh, |
uint8_t * const refh, |
220 |
uint8_t * const refv, |
uint8_t * const refv, |
229 |
uint8_t *tmp; |
uint8_t *tmp; |
230 |
int32_t quads; |
int32_t quads; |
231 |
const XVID_QP_FUNCS *Ops; |
const XVID_QP_FUNCS *Ops; |
232 |
|
const XVID_QP_FUNCS *Ops_Copy; |
233 |
|
|
234 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
235 |
|
|
236 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
237 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
238 |
|
|
239 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Add_Funcs; |
240 |
|
Ops_Copy = xvid_QP_Funcs; |
241 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
242 |
|
|
243 |
x_int = xRef/4; |
x_int = xRef/4; |
249 |
y_int--; |
y_int--; |
250 |
|
|
251 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
252 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
253 |
|
|
254 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
255 |
|
|
256 |
|
switch(quads) { |
257 |
|
case 0: |
258 |
|
/* NB: there is no halfpel involved ! the name's function can be |
259 |
|
* misleading */ |
260 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
261 |
|
interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); |
262 |
|
interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); |
263 |
|
interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); |
264 |
|
break; |
265 |
|
case 1: |
266 |
|
Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); |
267 |
|
break; |
268 |
|
case 2: |
269 |
|
Ops->H_Pass(dst, src, 16, stride, rounding); |
270 |
|
break; |
271 |
|
case 3: |
272 |
|
Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
273 |
|
break; |
274 |
|
case 4: |
275 |
|
Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); |
276 |
|
break; |
277 |
|
case 5: |
278 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
279 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
280 |
|
break; |
281 |
|
case 6: |
282 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
283 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
284 |
|
break; |
285 |
|
case 7: |
286 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
287 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
288 |
|
break; |
289 |
|
case 8: |
290 |
|
Ops->V_Pass(dst, src, 16, stride, rounding); |
291 |
|
break; |
292 |
|
case 9: |
293 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
294 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
295 |
|
break; |
296 |
|
case 10: |
297 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
298 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
299 |
|
break; |
300 |
|
case 11: |
301 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
302 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
303 |
|
break; |
304 |
|
case 12: |
305 |
|
Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
306 |
|
break; |
307 |
|
case 13: |
308 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
309 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
310 |
|
break; |
311 |
|
case 14: |
312 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
313 |
|
Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); |
314 |
|
break; |
315 |
|
case 15: |
316 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
317 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
318 |
|
break; |
319 |
|
} |
320 |
|
} |
321 |
|
|
322 |
|
static void __inline |
323 |
|
interpolate16x8_quarterpel(uint8_t * const cur, |
324 |
|
uint8_t * const refn, |
325 |
|
uint8_t * const refh, |
326 |
|
uint8_t * const refv, |
327 |
|
uint8_t * const refhv, |
328 |
|
const uint32_t x, const uint32_t y, |
329 |
|
const int32_t dx, const int dy, |
330 |
|
const uint32_t stride, |
331 |
|
const uint32_t rounding) |
332 |
|
{ |
333 |
|
const uint8_t *src; |
334 |
|
uint8_t *dst; |
335 |
|
uint8_t *tmp; |
336 |
|
int32_t quads; |
337 |
|
const XVID_QP_FUNCS *Ops; |
338 |
|
|
339 |
|
int32_t x_int, y_int; |
340 |
|
|
341 |
|
const int32_t xRef = (int)x*4 + dx; |
342 |
|
const int32_t yRef = (int)y*4 + dy; |
343 |
|
|
344 |
|
Ops = xvid_QP_Funcs; |
345 |
|
quads = (dx&3) | ((dy&3)<<2); |
346 |
|
|
347 |
|
x_int = xRef/4; |
348 |
|
if (xRef < 0 && xRef % 4) |
349 |
|
x_int--; |
350 |
|
|
351 |
|
y_int = yRef/4; |
352 |
|
if (yRef < 0 && yRef % 4) |
353 |
|
y_int--; |
354 |
|
|
355 |
|
dst = cur + y * stride + x; |
356 |
|
src = refn + y_int * (int)stride + x_int; |
357 |
|
|
358 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
359 |
|
|
420 |
} |
} |
421 |
|
|
422 |
static void __inline |
static void __inline |
423 |
new_interpolate8x8_quarterpel(uint8_t * const cur, |
interpolate8x8_quarterpel(uint8_t * const cur, |
424 |
uint8_t * const refn, |
uint8_t * const refn, |
425 |
uint8_t * const refh, |
uint8_t * const refh, |
426 |
uint8_t * const refv, |
uint8_t * const refv, |
438 |
|
|
439 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
440 |
|
|
441 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
442 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
443 |
|
|
444 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Funcs; |
445 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
446 |
|
|
447 |
x_int = xRef/4; |
x_int = xRef/4; |
453 |
y_int--; |
y_int--; |
454 |
|
|
455 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
456 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
457 |
|
|
458 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
459 |
|
|
518 |
} |
} |
519 |
} |
} |
520 |
|
|
521 |
|
static void __inline |
522 |
|
interpolate8x8_add_quarterpel(uint8_t * const cur, |
523 |
|
uint8_t * const refn, |
524 |
|
uint8_t * const refh, |
525 |
|
uint8_t * const refv, |
526 |
|
uint8_t * const refhv, |
527 |
|
const uint32_t x, const uint32_t y, |
528 |
|
const int32_t dx, const int dy, |
529 |
|
const uint32_t stride, |
530 |
|
const uint32_t rounding) |
531 |
|
{ |
532 |
|
const uint8_t *src; |
533 |
|
uint8_t *dst; |
534 |
|
uint8_t *tmp; |
535 |
|
int32_t quads; |
536 |
|
const XVID_QP_FUNCS *Ops; |
537 |
|
const XVID_QP_FUNCS *Ops_Copy; |
538 |
|
|
539 |
|
int32_t x_int, y_int; |
540 |
|
|
541 |
|
const int32_t xRef = (int)x*4 + dx; |
542 |
|
const int32_t yRef = (int)y*4 + dy; |
543 |
|
|
544 |
|
Ops = xvid_QP_Add_Funcs; |
545 |
|
Ops_Copy = xvid_QP_Funcs; |
546 |
|
quads = (dx&3) | ((dy&3)<<2); |
547 |
|
|
548 |
|
x_int = xRef/4; |
549 |
|
if (xRef < 0 && xRef % 4) |
550 |
|
x_int--; |
551 |
|
|
552 |
|
y_int = yRef/4; |
553 |
|
if (yRef < 0 && yRef % 4) |
554 |
|
y_int--; |
555 |
|
|
556 |
|
dst = cur + y * stride + x; |
557 |
|
src = refn + y_int * (int)stride + x_int; |
558 |
|
|
559 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
560 |
|
|
561 |
|
switch(quads) { |
562 |
|
case 0: |
563 |
|
/* Misleading function name, there is no halfpel involved |
564 |
|
* just dst and src averaging with rounding=0 */ |
565 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
566 |
|
break; |
567 |
|
case 1: |
568 |
|
Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); |
569 |
|
break; |
570 |
|
case 2: |
571 |
|
Ops->H_Pass_8(dst, src, 8, stride, rounding); |
572 |
|
break; |
573 |
|
case 3: |
574 |
|
Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
575 |
|
break; |
576 |
|
case 4: |
577 |
|
Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); |
578 |
|
break; |
579 |
|
case 5: |
580 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
581 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
582 |
|
break; |
583 |
|
case 6: |
584 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
585 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
586 |
|
break; |
587 |
|
case 7: |
588 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
589 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
590 |
|
break; |
591 |
|
case 8: |
592 |
|
Ops->V_Pass_8(dst, src, 8, stride, rounding); |
593 |
|
break; |
594 |
|
case 9: |
595 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
596 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
597 |
|
break; |
598 |
|
case 10: |
599 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
600 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
601 |
|
break; |
602 |
|
case 11: |
603 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
604 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
605 |
|
break; |
606 |
|
case 12: |
607 |
|
Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
608 |
|
break; |
609 |
|
case 13: |
610 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
611 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
612 |
|
break; |
613 |
|
case 14: |
614 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
615 |
|
Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); |
616 |
|
break; |
617 |
|
case 15: |
618 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
619 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
620 |
|
break; |
621 |
|
} |
622 |
|
} |
623 |
|
|
624 |
#endif /* _XVID_QPEL_H_ */ |
#endif /* _XVID_QPEL_H_ */ |