26 |
#ifndef _XVID_QPEL_H_ |
#ifndef _XVID_QPEL_H_ |
27 |
#define _XVID_QPEL_H_ |
#define _XVID_QPEL_H_ |
28 |
|
|
29 |
|
#include "interpolate8x8.h" |
30 |
#include "../utils/mem_transfer.h" |
#include "../utils/mem_transfer.h" |
31 |
|
|
32 |
/***************************************************************************** |
/***************************************************************************** |
67 |
****************************************************************************/ |
****************************************************************************/ |
68 |
extern void xvid_Init_QP(); |
extern void xvid_Init_QP(); |
69 |
|
|
70 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ |
72 |
|
|
73 |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
75 |
|
|
78 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
79 |
#endif |
#endif |
80 |
|
|
81 |
|
#ifdef ARCH_IS_PPC |
82 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; |
83 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; |
84 |
|
#endif |
85 |
|
|
86 |
|
#ifdef ARCH_IS_X86_64 |
87 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_x86_64; |
88 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_x86_64; |
89 |
|
#endif |
90 |
|
|
91 |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
92 |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
93 |
|
|
112 |
****************************************************************************/ |
****************************************************************************/ |
113 |
|
|
114 |
static void __inline |
static void __inline |
115 |
new_interpolate16x16_quarterpel(uint8_t * const cur, |
interpolate16x16_quarterpel(uint8_t * const cur, |
116 |
uint8_t * const refn, |
uint8_t * const refn, |
117 |
uint8_t * const refh, |
uint8_t * const refh, |
118 |
uint8_t * const refv, |
uint8_t * const refv, |
130 |
|
|
131 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
132 |
|
|
133 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
134 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
135 |
|
|
136 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Funcs; |
137 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
138 |
|
|
139 |
x_int = xRef/4; |
x_int = xRef >> 2; |
140 |
if (xRef < 0 && xRef % 4) |
y_int = yRef >> 2; |
|
x_int--; |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
141 |
|
|
142 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
143 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
144 |
|
|
145 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
146 |
|
|
209 |
} |
} |
210 |
|
|
211 |
static void __inline |
static void __inline |
212 |
new_interpolate16x8_quarterpel(uint8_t * const cur, |
interpolate16x16_add_quarterpel(uint8_t * const cur, |
213 |
uint8_t * const refn, |
uint8_t * const refn, |
214 |
uint8_t * const refh, |
uint8_t * const refh, |
215 |
uint8_t * const refv, |
uint8_t * const refv, |
224 |
uint8_t *tmp; |
uint8_t *tmp; |
225 |
int32_t quads; |
int32_t quads; |
226 |
const XVID_QP_FUNCS *Ops; |
const XVID_QP_FUNCS *Ops; |
227 |
|
const XVID_QP_FUNCS *Ops_Copy; |
228 |
|
|
229 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
230 |
|
|
231 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
232 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
233 |
|
|
234 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Add_Funcs; |
235 |
|
Ops_Copy = xvid_QP_Funcs; |
236 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
237 |
|
|
238 |
x_int = xRef/4; |
x_int = xRef >> 2; |
239 |
if (xRef < 0 && xRef % 4) |
y_int = yRef >> 2; |
|
x_int--; |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
240 |
|
|
241 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
242 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
243 |
|
|
244 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
245 |
|
|
246 |
|
switch(quads) { |
247 |
|
case 0: |
248 |
|
/* NB: there is no halfpel involved ! the name's function can be |
249 |
|
* misleading */ |
250 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
251 |
|
interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); |
252 |
|
interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); |
253 |
|
interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); |
254 |
|
break; |
255 |
|
case 1: |
256 |
|
Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); |
257 |
|
break; |
258 |
|
case 2: |
259 |
|
Ops->H_Pass(dst, src, 16, stride, rounding); |
260 |
|
break; |
261 |
|
case 3: |
262 |
|
Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
263 |
|
break; |
264 |
|
case 4: |
265 |
|
Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); |
266 |
|
break; |
267 |
|
case 5: |
268 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
269 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
270 |
|
break; |
271 |
|
case 6: |
272 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
273 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
274 |
|
break; |
275 |
|
case 7: |
276 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
277 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
278 |
|
break; |
279 |
|
case 8: |
280 |
|
Ops->V_Pass(dst, src, 16, stride, rounding); |
281 |
|
break; |
282 |
|
case 9: |
283 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
284 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
285 |
|
break; |
286 |
|
case 10: |
287 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
288 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
289 |
|
break; |
290 |
|
case 11: |
291 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
292 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
293 |
|
break; |
294 |
|
case 12: |
295 |
|
Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
296 |
|
break; |
297 |
|
case 13: |
298 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
299 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
300 |
|
break; |
301 |
|
case 14: |
302 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
303 |
|
Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); |
304 |
|
break; |
305 |
|
case 15: |
306 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
307 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
308 |
|
break; |
309 |
|
} |
310 |
|
} |
311 |
|
|
312 |
|
static void __inline |
313 |
|
interpolate16x8_quarterpel(uint8_t * const cur, |
314 |
|
uint8_t * const refn, |
315 |
|
uint8_t * const refh, |
316 |
|
uint8_t * const refv, |
317 |
|
uint8_t * const refhv, |
318 |
|
const uint32_t x, const uint32_t y, |
319 |
|
const int32_t dx, const int dy, |
320 |
|
const uint32_t stride, |
321 |
|
const uint32_t rounding) |
322 |
|
{ |
323 |
|
const uint8_t *src; |
324 |
|
uint8_t *dst; |
325 |
|
uint8_t *tmp; |
326 |
|
int32_t quads; |
327 |
|
const XVID_QP_FUNCS *Ops; |
328 |
|
|
329 |
|
int32_t x_int, y_int; |
330 |
|
|
331 |
|
const int32_t xRef = (int)x*4 + dx; |
332 |
|
const int32_t yRef = (int)y*4 + dy; |
333 |
|
|
334 |
|
Ops = xvid_QP_Funcs; |
335 |
|
quads = (dx&3) | ((dy&3)<<2); |
336 |
|
|
337 |
|
x_int = xRef >> 2; |
338 |
|
y_int = yRef >> 2; |
339 |
|
|
340 |
|
dst = cur + y * stride + x; |
341 |
|
src = refn + y_int * (int)stride + x_int; |
342 |
|
|
343 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
344 |
|
|
405 |
} |
} |
406 |
|
|
407 |
static void __inline |
static void __inline |
408 |
new_interpolate8x8_quarterpel(uint8_t * const cur, |
interpolate8x8_quarterpel(uint8_t * const cur, |
409 |
uint8_t * const refn, |
uint8_t * const refn, |
410 |
uint8_t * const refh, |
uint8_t * const refh, |
411 |
uint8_t * const refv, |
uint8_t * const refv, |
423 |
|
|
424 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
425 |
|
|
426 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
427 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
428 |
|
|
429 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Funcs; |
430 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
431 |
|
|
432 |
x_int = xRef/4; |
x_int = xRef >> 2; |
433 |
if (xRef < 0 && xRef % 4) |
y_int = yRef >> 2; |
|
x_int--; |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
434 |
|
|
435 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
436 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
437 |
|
|
438 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
439 |
|
|
498 |
} |
} |
499 |
} |
} |
500 |
|
|
501 |
|
static void __inline |
502 |
|
interpolate8x8_add_quarterpel(uint8_t * const cur, |
503 |
|
uint8_t * const refn, |
504 |
|
uint8_t * const refh, |
505 |
|
uint8_t * const refv, |
506 |
|
uint8_t * const refhv, |
507 |
|
const uint32_t x, const uint32_t y, |
508 |
|
const int32_t dx, const int dy, |
509 |
|
const uint32_t stride, |
510 |
|
const uint32_t rounding) |
511 |
|
{ |
512 |
|
const uint8_t *src; |
513 |
|
uint8_t *dst; |
514 |
|
uint8_t *tmp; |
515 |
|
int32_t quads; |
516 |
|
const XVID_QP_FUNCS *Ops; |
517 |
|
const XVID_QP_FUNCS *Ops_Copy; |
518 |
|
|
519 |
|
int32_t x_int, y_int; |
520 |
|
|
521 |
|
const int32_t xRef = (int)x*4 + dx; |
522 |
|
const int32_t yRef = (int)y*4 + dy; |
523 |
|
|
524 |
|
Ops = xvid_QP_Add_Funcs; |
525 |
|
Ops_Copy = xvid_QP_Funcs; |
526 |
|
quads = (dx&3) | ((dy&3)<<2); |
527 |
|
|
528 |
|
x_int = xRef >> 2; |
529 |
|
y_int = yRef >> 2; |
530 |
|
|
531 |
|
dst = cur + y * stride + x; |
532 |
|
src = refn + y_int * (int)stride + x_int; |
533 |
|
|
534 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
535 |
|
|
536 |
|
switch(quads) { |
537 |
|
case 0: |
538 |
|
/* Misleading function name, there is no halfpel involved |
539 |
|
* just dst and src averaging with rounding=0 */ |
540 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
541 |
|
break; |
542 |
|
case 1: |
543 |
|
Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); |
544 |
|
break; |
545 |
|
case 2: |
546 |
|
Ops->H_Pass_8(dst, src, 8, stride, rounding); |
547 |
|
break; |
548 |
|
case 3: |
549 |
|
Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
550 |
|
break; |
551 |
|
case 4: |
552 |
|
Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); |
553 |
|
break; |
554 |
|
case 5: |
555 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
556 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
557 |
|
break; |
558 |
|
case 6: |
559 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
560 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
561 |
|
break; |
562 |
|
case 7: |
563 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
564 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
565 |
|
break; |
566 |
|
case 8: |
567 |
|
Ops->V_Pass_8(dst, src, 8, stride, rounding); |
568 |
|
break; |
569 |
|
case 9: |
570 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
571 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
572 |
|
break; |
573 |
|
case 10: |
574 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
575 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
576 |
|
break; |
577 |
|
case 11: |
578 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
579 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
580 |
|
break; |
581 |
|
case 12: |
582 |
|
Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
583 |
|
break; |
584 |
|
case 13: |
585 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
586 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
587 |
|
break; |
588 |
|
case 14: |
589 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
590 |
|
Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); |
591 |
|
break; |
592 |
|
case 15: |
593 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
594 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
595 |
|
break; |
596 |
|
} |
597 |
|
} |
598 |
|
|
599 |
#endif /* _XVID_QPEL_H_ */ |
#endif /* _XVID_QPEL_H_ */ |