[cvs] / xvidcore / src / image / qpel.h Repository:
ViewVC logotype

Annotation of /xvidcore/src/image/qpel.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.6 - (view) (download)

1 : edgomez 1.2 /*****************************************************************************
2 :     *
3 :     * XVID MPEG-4 VIDEO CODEC
4 :     * - QPel interpolation -
5 :     *
6 :     * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
7 :     *
8 :     * This program is free software ; you can redistribute it and/or modify
9 :     * it under the terms of the GNU General Public License as published by
10 :     * the Free Software Foundation ; either version 2 of the License, or
11 :     * (at your option) any later version.
12 :     *
13 :     * This program is distributed in the hope that it will be useful,
14 :     * but WITHOUT ANY WARRANTY ; without even the implied warranty of
15 :     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 :     * GNU General Public License for more details.
17 :     *
18 :     * You should have received a copy of the GNU General Public License
19 :     * along with this program ; if not, write to the Free Software
20 :     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 :     *
22 :     * $Id$
23 :     *
24 :     ****************************************************************************/
25 :    
26 :     #ifndef _XVID_QPEL_H_
27 :     #define _XVID_QPEL_H_
28 :    
29 : edgomez 1.3 #include "interpolate8x8.h"
30 : edgomez 1.2 #include "../utils/mem_transfer.h"
31 :    
32 :     /*****************************************************************************
33 :     * Signatures
34 :     ****************************************************************************/
35 :    
36 :     #define XVID_QP_PASS_SIGNATURE(NAME) \
37 :     void (NAME)(uint8_t *dst, const uint8_t *src, int32_t length, int32_t BpS, int32_t rounding)
38 :    
39 :     typedef XVID_QP_PASS_SIGNATURE(XVID_QP_PASS);
40 :    
41 :     /* We put everything in a single struct so it can easily be passed
42 :     * to prediction functions as a whole... */
43 :    
44 :     typedef struct _XVID_QP_FUNCS {
45 :    
46 :     /* filter for QPel 16x? prediction */
47 :    
48 :     XVID_QP_PASS *H_Pass;
49 :     XVID_QP_PASS *H_Pass_Avrg;
50 :     XVID_QP_PASS *H_Pass_Avrg_Up;
51 :     XVID_QP_PASS *V_Pass;
52 :     XVID_QP_PASS *V_Pass_Avrg;
53 :     XVID_QP_PASS *V_Pass_Avrg_Up;
54 :    
55 :     /* filter for QPel 8x? prediction */
56 :    
57 :     XVID_QP_PASS *H_Pass_8;
58 :     XVID_QP_PASS *H_Pass_Avrg_8;
59 :     XVID_QP_PASS *H_Pass_Avrg_Up_8;
60 :     XVID_QP_PASS *V_Pass_8;
61 :     XVID_QP_PASS *V_Pass_Avrg_8;
62 :     XVID_QP_PASS *V_Pass_Avrg_Up_8;
63 :     } XVID_QP_FUNCS;
64 :    
65 :     /*****************************************************************************
66 :     * fwd dcl
67 :     ****************************************************************************/
68 :     extern void xvid_Init_QP();
69 :    
70 : edgomez 1.3 extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */
71 :     extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */
72 :    
73 : edgomez 1.2 extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */
74 :     extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */
75 :    
76 :     #ifdef ARCH_IS_IA32
77 :     extern XVID_QP_FUNCS xvid_QP_Funcs_mmx;
78 :     extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx;
79 :     #endif
80 :    
81 : edgomez 1.5 #ifdef ARCH_IS_PPC
82 :     extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C;
83 :     extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C;
84 :     #endif
85 :    
86 : edgomez 1.6 #ifdef ARCH_IS_X86_64
87 :     extern XVID_QP_FUNCS xvid_QP_Funcs_x86_64;
88 :     extern XVID_QP_FUNCS xvid_QP_Add_Funcs_x86_64;
89 :     #endif
90 :    
91 : edgomez 1.2 extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */
92 :     extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */
93 :    
94 :     /*****************************************************************************
95 :     * macros
96 :     ****************************************************************************/
97 :    
98 :     /*****************************************************************************
99 :    
100 :     Passes to be performed
101 :    
102 :     case 0: copy
103 :     case 2: h-pass
104 :     case 1/3: h-pass + h-avrg
105 :     case 8: v-pass
106 :     case 10: h-pass + v-pass
107 :     case 9/11: h-pass + h-avrg + v-pass
108 :     case 4/12: v-pass + v-avrg
109 :     case 6/14: h-pass + v-pass + v-avrg
110 :     case 5/13/7/15: h-pass + h-avrg + v-pass + v-avrg
111 :    
112 :     ****************************************************************************/
113 :    
114 : edgomez 1.3 static void __inline
115 :     interpolate16x16_quarterpel(uint8_t * const cur,
116 : edgomez 1.2 uint8_t * const refn,
117 :     uint8_t * const refh,
118 :     uint8_t * const refv,
119 :     uint8_t * const refhv,
120 :     const uint32_t x, const uint32_t y,
121 :     const int32_t dx, const int dy,
122 :     const uint32_t stride,
123 :     const uint32_t rounding)
124 :     {
125 :     const uint8_t *src;
126 :     uint8_t *dst;
127 :     uint8_t *tmp;
128 :     int32_t quads;
129 :     const XVID_QP_FUNCS *Ops;
130 :    
131 :     int32_t x_int, y_int;
132 :    
133 : edgomez 1.4 const int32_t xRef = (int)x*4 + dx;
134 :     const int32_t yRef = (int)y*4 + dy;
135 : edgomez 1.2
136 : edgomez 1.3 Ops = xvid_QP_Funcs;
137 : edgomez 1.2 quads = (dx&3) | ((dy&3)<<2);
138 :    
139 :     x_int = xRef/4;
140 :     if (xRef < 0 && xRef % 4)
141 :     x_int--;
142 :    
143 :     y_int = yRef/4;
144 :     if (yRef < 0 && yRef % 4)
145 :     y_int--;
146 :    
147 :     dst = cur + y * stride + x;
148 : edgomez 1.4 src = refn + y_int * (int)stride + x_int;
149 : edgomez 1.2
150 :     tmp = refh; /* we need at least a 16 x stride scratch block */
151 :    
152 :     switch(quads) {
153 :     case 0:
154 : edgomez 1.3 transfer8x8_copy(dst, src, stride);
155 :     transfer8x8_copy(dst+8, src+8, stride);
156 :     transfer8x8_copy(dst+8*stride, src+8*stride, stride);
157 :     transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride);
158 : edgomez 1.2 break;
159 :     case 1:
160 :     Ops->H_Pass_Avrg(dst, src, 16, stride, rounding);
161 :     break;
162 :     case 2:
163 :     Ops->H_Pass(dst, src, 16, stride, rounding);
164 :     break;
165 :     case 3:
166 :     Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding);
167 :     break;
168 :     case 4:
169 :     Ops->V_Pass_Avrg(dst, src, 16, stride, rounding);
170 :     break;
171 :     case 5:
172 :     Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
173 :     Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
174 :     break;
175 :     case 6:
176 :     Ops->H_Pass(tmp, src, 17, stride, rounding);
177 :     Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
178 :     break;
179 :     case 7:
180 :     Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
181 :     Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
182 :     break;
183 :     case 8:
184 :     Ops->V_Pass(dst, src, 16, stride, rounding);
185 :     break;
186 :     case 9:
187 :     Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
188 :     Ops->V_Pass(dst, tmp, 16, stride, rounding);
189 :     break;
190 :     case 10:
191 :     Ops->H_Pass(tmp, src, 17, stride, rounding);
192 :     Ops->V_Pass(dst, tmp, 16, stride, rounding);
193 :     break;
194 :     case 11:
195 :     Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
196 :     Ops->V_Pass(dst, tmp, 16, stride, rounding);
197 :     break;
198 :     case 12:
199 :     Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding);
200 :     break;
201 :     case 13:
202 :     Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
203 :     Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
204 :     break;
205 :     case 14:
206 :     Ops->H_Pass(tmp, src, 17, stride, rounding);
207 :     Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding);
208 :     break;
209 :     case 15:
210 :     Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
211 :     Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
212 :     break;
213 :     }
214 :     }
215 :    
216 : edgomez 1.3 static void __inline
217 :     interpolate16x16_add_quarterpel(uint8_t * const cur,
218 :     uint8_t * const refn,
219 :     uint8_t * const refh,
220 :     uint8_t * const refv,
221 :     uint8_t * const refhv,
222 :     const uint32_t x, const uint32_t y,
223 :     const int32_t dx, const int dy,
224 :     const uint32_t stride,
225 :     const uint32_t rounding)
226 :     {
227 :     const uint8_t *src;
228 :     uint8_t *dst;
229 :     uint8_t *tmp;
230 :     int32_t quads;
231 :     const XVID_QP_FUNCS *Ops;
232 :     const XVID_QP_FUNCS *Ops_Copy;
233 :    
234 :     int32_t x_int, y_int;
235 :    
236 : edgomez 1.4 const int32_t xRef = (int)x*4 + dx;
237 :     const int32_t yRef = (int)y*4 + dy;
238 : edgomez 1.3
239 :     Ops = xvid_QP_Add_Funcs;
240 :     Ops_Copy = xvid_QP_Funcs;
241 :     quads = (dx&3) | ((dy&3)<<2);
242 :    
243 :     x_int = xRef/4;
244 :     if (xRef < 0 && xRef % 4)
245 :     x_int--;
246 :    
247 :     y_int = yRef/4;
248 :     if (yRef < 0 && yRef % 4)
249 :     y_int--;
250 :    
251 :     dst = cur + y * stride + x;
252 : edgomez 1.4 src = refn + y_int * (int)stride + x_int;
253 : edgomez 1.3
254 :     tmp = refh; /* we need at least a 16 x stride scratch block */
255 :    
256 :     switch(quads) {
257 :     case 0:
258 :     /* NB: there is no halfpel involved ! the name's function can be
259 :     * misleading */
260 :     interpolate8x8_halfpel_add(dst, src, stride, rounding);
261 :     interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding);
262 :     interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding);
263 :     interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding);
264 :     break;
265 :     case 1:
266 :     Ops->H_Pass_Avrg(dst, src, 16, stride, rounding);
267 :     break;
268 :     case 2:
269 :     Ops->H_Pass(dst, src, 16, stride, rounding);
270 :     break;
271 :     case 3:
272 :     Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding);
273 :     break;
274 :     case 4:
275 :     Ops->V_Pass_Avrg(dst, src, 16, stride, rounding);
276 :     break;
277 :     case 5:
278 :     Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding);
279 :     Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
280 :     break;
281 :     case 6:
282 :     Ops_Copy->H_Pass(tmp, src, 17, stride, rounding);
283 :     Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
284 :     break;
285 :     case 7:
286 :     Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
287 :     Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
288 :     break;
289 :     case 8:
290 :     Ops->V_Pass(dst, src, 16, stride, rounding);
291 :     break;
292 :     case 9:
293 :     Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding);
294 :     Ops->V_Pass(dst, tmp, 16, stride, rounding);
295 :     break;
296 :     case 10:
297 :     Ops_Copy->H_Pass(tmp, src, 17, stride, rounding);
298 :     Ops->V_Pass(dst, tmp, 16, stride, rounding);
299 :     break;
300 :     case 11:
301 :     Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
302 :     Ops->V_Pass(dst, tmp, 16, stride, rounding);
303 :     break;
304 :     case 12:
305 :     Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding);
306 :     break;
307 :     case 13:
308 :     Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding);
309 :     Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
310 :     break;
311 :     case 14:
312 :     Ops_Copy->H_Pass(tmp, src, 17, stride, rounding);
313 :     Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding);
314 :     break;
315 :     case 15:
316 :     Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
317 :     Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
318 :     break;
319 :     }
320 :     }
321 :    
322 : edgomez 1.2 static void __inline
323 : edgomez 1.3 interpolate16x8_quarterpel(uint8_t * const cur,
324 : edgomez 1.2 uint8_t * const refn,
325 :     uint8_t * const refh,
326 :     uint8_t * const refv,
327 :     uint8_t * const refhv,
328 :     const uint32_t x, const uint32_t y,
329 :     const int32_t dx, const int dy,
330 :     const uint32_t stride,
331 :     const uint32_t rounding)
332 :     {
333 :     const uint8_t *src;
334 :     uint8_t *dst;
335 :     uint8_t *tmp;
336 :     int32_t quads;
337 :     const XVID_QP_FUNCS *Ops;
338 :    
339 :     int32_t x_int, y_int;
340 :    
341 : edgomez 1.4 const int32_t xRef = (int)x*4 + dx;
342 :     const int32_t yRef = (int)y*4 + dy;
343 : edgomez 1.2
344 : edgomez 1.3 Ops = xvid_QP_Funcs;
345 : edgomez 1.2 quads = (dx&3) | ((dy&3)<<2);
346 :    
347 :     x_int = xRef/4;
348 :     if (xRef < 0 && xRef % 4)
349 :     x_int--;
350 :    
351 :     y_int = yRef/4;
352 :     if (yRef < 0 && yRef % 4)
353 :     y_int--;
354 :    
355 :     dst = cur + y * stride + x;
356 : edgomez 1.4 src = refn + y_int * (int)stride + x_int;
357 : edgomez 1.2
358 :     tmp = refh; /* we need at least a 16 x stride scratch block */
359 :    
360 :     switch(quads) {
361 :     case 0:
362 :     transfer8x8_copy( dst, src, stride);
363 :     transfer8x8_copy( dst+8, src+8, stride);
364 :     break;
365 :     case 1:
366 :     Ops->H_Pass_Avrg(dst, src, 8, stride, rounding);
367 :     break;
368 :     case 2:
369 :     Ops->H_Pass(dst, src, 8, stride, rounding);
370 :     break;
371 :     case 3:
372 :     Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding);
373 :     break;
374 :     case 4:
375 :     Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding);
376 :     break;
377 :     case 5:
378 :     Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
379 :     Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
380 :     break;
381 :     case 6:
382 :     Ops->H_Pass(tmp, src, 9, stride, rounding);
383 :     Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
384 :     break;
385 :     case 7:
386 :     Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
387 :     Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
388 :     break;
389 :     case 8:
390 :     Ops->V_Pass_8(dst, src, 16, stride, rounding);
391 :     break;
392 :     case 9:
393 :     Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
394 :     Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
395 :     break;
396 :     case 10:
397 :     Ops->H_Pass(tmp, src, 9, stride, rounding);
398 :     Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
399 :     break;
400 :     case 11:
401 :     Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
402 :     Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
403 :     break;
404 :     case 12:
405 :     Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding);
406 :     break;
407 :     case 13:
408 :     Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
409 :     Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
410 :     break;
411 :     case 14:
412 :     Ops->H_Pass(tmp, src, 9, stride, rounding);
413 :     Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding);
414 :     break;
415 :     case 15:
416 :     Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
417 :     Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
418 :     break;
419 :     }
420 :     }
421 :    
422 :     static void __inline
423 : edgomez 1.3 interpolate8x8_quarterpel(uint8_t * const cur,
424 : edgomez 1.2 uint8_t * const refn,
425 :     uint8_t * const refh,
426 :     uint8_t * const refv,
427 :     uint8_t * const refhv,
428 :     const uint32_t x, const uint32_t y,
429 :     const int32_t dx, const int dy,
430 :     const uint32_t stride,
431 :     const uint32_t rounding)
432 :     {
433 :     const uint8_t *src;
434 :     uint8_t *dst;
435 :     uint8_t *tmp;
436 :     int32_t quads;
437 :     const XVID_QP_FUNCS *Ops;
438 :    
439 :     int32_t x_int, y_int;
440 :    
441 : edgomez 1.4 const int32_t xRef = (int)x*4 + dx;
442 :     const int32_t yRef = (int)y*4 + dy;
443 : edgomez 1.2
444 : edgomez 1.3 Ops = xvid_QP_Funcs;
445 : edgomez 1.2 quads = (dx&3) | ((dy&3)<<2);
446 :    
447 :     x_int = xRef/4;
448 :     if (xRef < 0 && xRef % 4)
449 :     x_int--;
450 :    
451 :     y_int = yRef/4;
452 :     if (yRef < 0 && yRef % 4)
453 :     y_int--;
454 :    
455 :     dst = cur + y * stride + x;
456 : edgomez 1.4 src = refn + y_int * (int)stride + x_int;
457 : edgomez 1.2
458 :     tmp = refh; /* we need at least a 16 x stride scratch block */
459 :    
460 :     switch(quads) {
461 :     case 0:
462 :     transfer8x8_copy( dst, src, stride);
463 :     break;
464 :     case 1:
465 :     Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
466 :     break;
467 :     case 2:
468 :     Ops->H_Pass_8(dst, src, 8, stride, rounding);
469 :     break;
470 :     case 3:
471 :     Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
472 :     break;
473 :     case 4:
474 :     Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
475 :     break;
476 :     case 5:
477 :     Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
478 :     Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
479 :     break;
480 :     case 6:
481 :     Ops->H_Pass_8(tmp, src, 9, stride, rounding);
482 :     Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
483 :     break;
484 :     case 7:
485 :     Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
486 :     Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
487 :     break;
488 :     case 8:
489 :     Ops->V_Pass_8(dst, src, 8, stride, rounding);
490 :     break;
491 :     case 9:
492 :     Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
493 :     Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
494 :     break;
495 :     case 10:
496 :     Ops->H_Pass_8(tmp, src, 9, stride, rounding);
497 :     Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
498 :     break;
499 :     case 11:
500 :     Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
501 :     Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
502 :     break;
503 :     case 12:
504 :     Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
505 :     break;
506 :     case 13:
507 :     Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
508 :     Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
509 :     break;
510 :     case 14:
511 :     Ops->H_Pass_8(tmp, src, 9, stride, rounding);
512 :     Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
513 :     break;
514 :     case 15:
515 :     Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
516 :     Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
517 :     break;
518 :     }
519 :     }
520 :    
521 : edgomez 1.3 static void __inline
522 :     interpolate8x8_add_quarterpel(uint8_t * const cur,
523 :     uint8_t * const refn,
524 :     uint8_t * const refh,
525 :     uint8_t * const refv,
526 :     uint8_t * const refhv,
527 :     const uint32_t x, const uint32_t y,
528 :     const int32_t dx, const int dy,
529 :     const uint32_t stride,
530 :     const uint32_t rounding)
531 :     {
532 :     const uint8_t *src;
533 :     uint8_t *dst;
534 :     uint8_t *tmp;
535 :     int32_t quads;
536 :     const XVID_QP_FUNCS *Ops;
537 :     const XVID_QP_FUNCS *Ops_Copy;
538 :    
539 :     int32_t x_int, y_int;
540 :    
541 : edgomez 1.4 const int32_t xRef = (int)x*4 + dx;
542 :     const int32_t yRef = (int)y*4 + dy;
543 : edgomez 1.3
544 :     Ops = xvid_QP_Add_Funcs;
545 :     Ops_Copy = xvid_QP_Funcs;
546 :     quads = (dx&3) | ((dy&3)<<2);
547 :    
548 :     x_int = xRef/4;
549 :     if (xRef < 0 && xRef % 4)
550 :     x_int--;
551 :    
552 :     y_int = yRef/4;
553 :     if (yRef < 0 && yRef % 4)
554 :     y_int--;
555 :    
556 :     dst = cur + y * stride + x;
557 : edgomez 1.4 src = refn + y_int * (int)stride + x_int;
558 : edgomez 1.3
559 :     tmp = refh; /* we need at least a 16 x stride scratch block */
560 :    
561 :     switch(quads) {
562 :     case 0:
563 :     /* Misleading function name, there is no halfpel involved
564 :     * just dst and src averaging with rounding=0 */
565 :     interpolate8x8_halfpel_add(dst, src, stride, rounding);
566 :     break;
567 :     case 1:
568 :     Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
569 :     break;
570 :     case 2:
571 :     Ops->H_Pass_8(dst, src, 8, stride, rounding);
572 :     break;
573 :     case 3:
574 :     Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
575 :     break;
576 :     case 4:
577 :     Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
578 :     break;
579 :     case 5:
580 :     Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
581 :     Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
582 :     break;
583 :     case 6:
584 :     Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
585 :     Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
586 :     break;
587 :     case 7:
588 :     Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
589 :     Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
590 :     break;
591 :     case 8:
592 :     Ops->V_Pass_8(dst, src, 8, stride, rounding);
593 :     break;
594 :     case 9:
595 :     Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
596 :     Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
597 :     break;
598 :     case 10:
599 :     Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
600 :     Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
601 :     break;
602 :     case 11:
603 :     Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
604 :     Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
605 :     break;
606 :     case 12:
607 :     Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
608 :     break;
609 :     case 13:
610 :     Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
611 :     Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
612 :     break;
613 :     case 14:
614 :     Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
615 :     Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
616 :     break;
617 :     case 15:
618 :     Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
619 :     Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
620 :     break;
621 :     }
622 :     }
623 :    
624 : edgomez 1.2 #endif /* _XVID_QPEL_H_ */

No admin address has been configured
ViewVC Help
Powered by ViewVC 1.0.4