78 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
79 |
#endif |
#endif |
80 |
|
|
81 |
|
#ifdef ARCH_IS_PPC |
82 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; |
83 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; |
84 |
|
#endif |
85 |
|
|
86 |
|
#ifdef ARCH_IS_X86_64 |
87 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_x86_64; |
88 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_x86_64; |
89 |
|
#endif |
90 |
|
|
91 |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
92 |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
93 |
|
|
130 |
|
|
131 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
132 |
|
|
133 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
134 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
135 |
|
|
136 |
Ops = xvid_QP_Funcs; |
Ops = xvid_QP_Funcs; |
137 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
145 |
y_int--; |
y_int--; |
146 |
|
|
147 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
148 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
149 |
|
|
150 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
151 |
|
|
233 |
|
|
234 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
235 |
|
|
236 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
237 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
238 |
|
|
239 |
Ops = xvid_QP_Add_Funcs; |
Ops = xvid_QP_Add_Funcs; |
240 |
Ops_Copy = xvid_QP_Funcs; |
Ops_Copy = xvid_QP_Funcs; |
249 |
y_int--; |
y_int--; |
250 |
|
|
251 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
252 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
253 |
|
|
254 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
255 |
|
|
338 |
|
|
339 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
340 |
|
|
341 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
342 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
343 |
|
|
344 |
Ops = xvid_QP_Funcs; |
Ops = xvid_QP_Funcs; |
345 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
353 |
y_int--; |
y_int--; |
354 |
|
|
355 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
356 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
357 |
|
|
358 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
359 |
|
|
438 |
|
|
439 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
440 |
|
|
441 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
442 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
443 |
|
|
444 |
Ops = xvid_QP_Funcs; |
Ops = xvid_QP_Funcs; |
445 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
453 |
y_int--; |
y_int--; |
454 |
|
|
455 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
456 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
457 |
|
|
458 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
459 |
|
|
538 |
|
|
539 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
540 |
|
|
541 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
542 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
543 |
|
|
544 |
Ops = xvid_QP_Add_Funcs; |
Ops = xvid_QP_Add_Funcs; |
545 |
Ops_Copy = xvid_QP_Funcs; |
Ops_Copy = xvid_QP_Funcs; |
554 |
y_int--; |
y_int--; |
555 |
|
|
556 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
557 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
558 |
|
|
559 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
560 |
|
|