Parent Directory
|
Revision Log
Revision 1.5 - (view) (download)
1 : | edgomez | 1.2 | /***************************************************************************** |
2 : | * | ||
3 : | * XVID MPEG-4 VIDEO CODEC | ||
4 : | * - QPel interpolation - | ||
5 : | * | ||
6 : | * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> | ||
7 : | * | ||
8 : | * This program is free software ; you can redistribute it and/or modify | ||
9 : | * it under the terms of the GNU General Public License as published by | ||
10 : | * the Free Software Foundation ; either version 2 of the License, or | ||
11 : | * (at your option) any later version. | ||
12 : | * | ||
13 : | * This program is distributed in the hope that it will be useful, | ||
14 : | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
15 : | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 : | * GNU General Public License for more details. | ||
17 : | * | ||
18 : | * You should have received a copy of the GNU General Public License | ||
19 : | * along with this program ; if not, write to the Free Software | ||
20 : | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 : | * | ||
22 : | * $Id$ | ||
23 : | * | ||
24 : | ****************************************************************************/ | ||
25 : | |||
26 : | #ifndef _XVID_QPEL_H_ | ||
27 : | #define _XVID_QPEL_H_ | ||
28 : | |||
29 : | edgomez | 1.3 | #include "interpolate8x8.h" |
30 : | edgomez | 1.2 | #include "../utils/mem_transfer.h" |
31 : | |||
32 : | /***************************************************************************** | ||
33 : | * Signatures | ||
34 : | ****************************************************************************/ | ||
35 : | |||
36 : | #define XVID_QP_PASS_SIGNATURE(NAME) \ | ||
37 : | void (NAME)(uint8_t *dst, const uint8_t *src, int32_t length, int32_t BpS, int32_t rounding) | ||
38 : | |||
39 : | typedef XVID_QP_PASS_SIGNATURE(XVID_QP_PASS); | ||
40 : | |||
41 : | /* We put everything in a single struct so it can easily be passed | ||
42 : | * to prediction functions as a whole... */ | ||
43 : | |||
44 : | typedef struct _XVID_QP_FUNCS { | ||
45 : | |||
46 : | /* filter for QPel 16x? prediction */ | ||
47 : | |||
48 : | XVID_QP_PASS *H_Pass; | ||
49 : | XVID_QP_PASS *H_Pass_Avrg; | ||
50 : | XVID_QP_PASS *H_Pass_Avrg_Up; | ||
51 : | XVID_QP_PASS *V_Pass; | ||
52 : | XVID_QP_PASS *V_Pass_Avrg; | ||
53 : | XVID_QP_PASS *V_Pass_Avrg_Up; | ||
54 : | |||
55 : | /* filter for QPel 8x? prediction */ | ||
56 : | |||
57 : | XVID_QP_PASS *H_Pass_8; | ||
58 : | XVID_QP_PASS *H_Pass_Avrg_8; | ||
59 : | XVID_QP_PASS *H_Pass_Avrg_Up_8; | ||
60 : | XVID_QP_PASS *V_Pass_8; | ||
61 : | XVID_QP_PASS *V_Pass_Avrg_8; | ||
62 : | XVID_QP_PASS *V_Pass_Avrg_Up_8; | ||
63 : | } XVID_QP_FUNCS; | ||
64 : | |||
65 : | /***************************************************************************** | ||
66 : | * fwd dcl | ||
67 : | ****************************************************************************/ | ||
68 : | extern void xvid_Init_QP(); | ||
69 : | |||
70 : | edgomez | 1.3 | extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ | ||
72 : | |||
73 : | edgomez | 1.2 | extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ | ||
75 : | |||
76 : | #ifdef ARCH_IS_IA32 | ||
77 : | extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; | ||
78 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; | ||
79 : | #endif | ||
80 : | |||
81 : | edgomez | 1.5 | #ifdef ARCH_IS_PPC |
82 : | extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; | ||
83 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; | ||
84 : | #endif | ||
85 : | |||
86 : | edgomez | 1.2 | extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
87 : | extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ | ||
88 : | |||
89 : | /***************************************************************************** | ||
90 : | * macros | ||
91 : | ****************************************************************************/ | ||
92 : | |||
93 : | /***************************************************************************** | ||
94 : | |||
95 : | Passes to be performed | ||
96 : | |||
97 : | case 0: copy | ||
98 : | case 2: h-pass | ||
99 : | case 1/3: h-pass + h-avrg | ||
100 : | case 8: v-pass | ||
101 : | case 10: h-pass + v-pass | ||
102 : | case 9/11: h-pass + h-avrg + v-pass | ||
103 : | case 4/12: v-pass + v-avrg | ||
104 : | case 6/14: h-pass + v-pass + v-avrg | ||
105 : | case 5/13/7/15: h-pass + h-avrg + v-pass + v-avrg | ||
106 : | |||
107 : | ****************************************************************************/ | ||
108 : | |||
109 : | edgomez | 1.3 | static void __inline |
110 : | interpolate16x16_quarterpel(uint8_t * const cur, | ||
111 : | edgomez | 1.2 | uint8_t * const refn, |
112 : | uint8_t * const refh, | ||
113 : | uint8_t * const refv, | ||
114 : | uint8_t * const refhv, | ||
115 : | const uint32_t x, const uint32_t y, | ||
116 : | const int32_t dx, const int dy, | ||
117 : | const uint32_t stride, | ||
118 : | const uint32_t rounding) | ||
119 : | { | ||
120 : | const uint8_t *src; | ||
121 : | uint8_t *dst; | ||
122 : | uint8_t *tmp; | ||
123 : | int32_t quads; | ||
124 : | const XVID_QP_FUNCS *Ops; | ||
125 : | |||
126 : | int32_t x_int, y_int; | ||
127 : | |||
128 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
129 : | const int32_t yRef = (int)y*4 + dy; | ||
130 : | edgomez | 1.2 | |
131 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
132 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
133 : | |||
134 : | x_int = xRef/4; | ||
135 : | if (xRef < 0 && xRef % 4) | ||
136 : | x_int--; | ||
137 : | |||
138 : | y_int = yRef/4; | ||
139 : | if (yRef < 0 && yRef % 4) | ||
140 : | y_int--; | ||
141 : | |||
142 : | dst = cur + y * stride + x; | ||
143 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
144 : | edgomez | 1.2 | |
145 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
146 : | |||
147 : | switch(quads) { | ||
148 : | case 0: | ||
149 : | edgomez | 1.3 | transfer8x8_copy(dst, src, stride); |
150 : | transfer8x8_copy(dst+8, src+8, stride); | ||
151 : | transfer8x8_copy(dst+8*stride, src+8*stride, stride); | ||
152 : | transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); | ||
153 : | edgomez | 1.2 | break; |
154 : | case 1: | ||
155 : | Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); | ||
156 : | break; | ||
157 : | case 2: | ||
158 : | Ops->H_Pass(dst, src, 16, stride, rounding); | ||
159 : | break; | ||
160 : | case 3: | ||
161 : | Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
162 : | break; | ||
163 : | case 4: | ||
164 : | Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); | ||
165 : | break; | ||
166 : | case 5: | ||
167 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
168 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
169 : | break; | ||
170 : | case 6: | ||
171 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
172 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
173 : | break; | ||
174 : | case 7: | ||
175 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
176 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
177 : | break; | ||
178 : | case 8: | ||
179 : | Ops->V_Pass(dst, src, 16, stride, rounding); | ||
180 : | break; | ||
181 : | case 9: | ||
182 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
183 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
184 : | break; | ||
185 : | case 10: | ||
186 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
187 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
188 : | break; | ||
189 : | case 11: | ||
190 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
191 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
192 : | break; | ||
193 : | case 12: | ||
194 : | Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
195 : | break; | ||
196 : | case 13: | ||
197 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
198 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
199 : | break; | ||
200 : | case 14: | ||
201 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
202 : | Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); | ||
203 : | break; | ||
204 : | case 15: | ||
205 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
206 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
207 : | break; | ||
208 : | } | ||
209 : | } | ||
210 : | |||
211 : | edgomez | 1.3 | static void __inline |
212 : | interpolate16x16_add_quarterpel(uint8_t * const cur, | ||
213 : | uint8_t * const refn, | ||
214 : | uint8_t * const refh, | ||
215 : | uint8_t * const refv, | ||
216 : | uint8_t * const refhv, | ||
217 : | const uint32_t x, const uint32_t y, | ||
218 : | const int32_t dx, const int dy, | ||
219 : | const uint32_t stride, | ||
220 : | const uint32_t rounding) | ||
221 : | { | ||
222 : | const uint8_t *src; | ||
223 : | uint8_t *dst; | ||
224 : | uint8_t *tmp; | ||
225 : | int32_t quads; | ||
226 : | const XVID_QP_FUNCS *Ops; | ||
227 : | const XVID_QP_FUNCS *Ops_Copy; | ||
228 : | |||
229 : | int32_t x_int, y_int; | ||
230 : | |||
231 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
232 : | const int32_t yRef = (int)y*4 + dy; | ||
233 : | edgomez | 1.3 | |
234 : | Ops = xvid_QP_Add_Funcs; | ||
235 : | Ops_Copy = xvid_QP_Funcs; | ||
236 : | quads = (dx&3) | ((dy&3)<<2); | ||
237 : | |||
238 : | x_int = xRef/4; | ||
239 : | if (xRef < 0 && xRef % 4) | ||
240 : | x_int--; | ||
241 : | |||
242 : | y_int = yRef/4; | ||
243 : | if (yRef < 0 && yRef % 4) | ||
244 : | y_int--; | ||
245 : | |||
246 : | dst = cur + y * stride + x; | ||
247 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
248 : | edgomez | 1.3 | |
249 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
250 : | |||
251 : | switch(quads) { | ||
252 : | case 0: | ||
253 : | /* NB: there is no halfpel involved ! the name's function can be | ||
254 : | * misleading */ | ||
255 : | interpolate8x8_halfpel_add(dst, src, stride, rounding); | ||
256 : | interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); | ||
257 : | interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); | ||
258 : | interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); | ||
259 : | break; | ||
260 : | case 1: | ||
261 : | Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); | ||
262 : | break; | ||
263 : | case 2: | ||
264 : | Ops->H_Pass(dst, src, 16, stride, rounding); | ||
265 : | break; | ||
266 : | case 3: | ||
267 : | Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
268 : | break; | ||
269 : | case 4: | ||
270 : | Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); | ||
271 : | break; | ||
272 : | case 5: | ||
273 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
274 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
275 : | break; | ||
276 : | case 6: | ||
277 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
278 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
279 : | break; | ||
280 : | case 7: | ||
281 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
282 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
283 : | break; | ||
284 : | case 8: | ||
285 : | Ops->V_Pass(dst, src, 16, stride, rounding); | ||
286 : | break; | ||
287 : | case 9: | ||
288 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
289 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
290 : | break; | ||
291 : | case 10: | ||
292 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
293 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
294 : | break; | ||
295 : | case 11: | ||
296 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
297 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
298 : | break; | ||
299 : | case 12: | ||
300 : | Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
301 : | break; | ||
302 : | case 13: | ||
303 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
304 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
305 : | break; | ||
306 : | case 14: | ||
307 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
308 : | Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); | ||
309 : | break; | ||
310 : | case 15: | ||
311 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
312 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
313 : | break; | ||
314 : | } | ||
315 : | } | ||
316 : | |||
317 : | edgomez | 1.2 | static void __inline |
318 : | edgomez | 1.3 | interpolate16x8_quarterpel(uint8_t * const cur, |
319 : | edgomez | 1.2 | uint8_t * const refn, |
320 : | uint8_t * const refh, | ||
321 : | uint8_t * const refv, | ||
322 : | uint8_t * const refhv, | ||
323 : | const uint32_t x, const uint32_t y, | ||
324 : | const int32_t dx, const int dy, | ||
325 : | const uint32_t stride, | ||
326 : | const uint32_t rounding) | ||
327 : | { | ||
328 : | const uint8_t *src; | ||
329 : | uint8_t *dst; | ||
330 : | uint8_t *tmp; | ||
331 : | int32_t quads; | ||
332 : | const XVID_QP_FUNCS *Ops; | ||
333 : | |||
334 : | int32_t x_int, y_int; | ||
335 : | |||
336 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
337 : | const int32_t yRef = (int)y*4 + dy; | ||
338 : | edgomez | 1.2 | |
339 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
340 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
341 : | |||
342 : | x_int = xRef/4; | ||
343 : | if (xRef < 0 && xRef % 4) | ||
344 : | x_int--; | ||
345 : | |||
346 : | y_int = yRef/4; | ||
347 : | if (yRef < 0 && yRef % 4) | ||
348 : | y_int--; | ||
349 : | |||
350 : | dst = cur + y * stride + x; | ||
351 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
352 : | edgomez | 1.2 | |
353 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
354 : | |||
355 : | switch(quads) { | ||
356 : | case 0: | ||
357 : | transfer8x8_copy( dst, src, stride); | ||
358 : | transfer8x8_copy( dst+8, src+8, stride); | ||
359 : | break; | ||
360 : | case 1: | ||
361 : | Ops->H_Pass_Avrg(dst, src, 8, stride, rounding); | ||
362 : | break; | ||
363 : | case 2: | ||
364 : | Ops->H_Pass(dst, src, 8, stride, rounding); | ||
365 : | break; | ||
366 : | case 3: | ||
367 : | Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding); | ||
368 : | break; | ||
369 : | case 4: | ||
370 : | Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding); | ||
371 : | break; | ||
372 : | case 5: | ||
373 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
374 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
375 : | break; | ||
376 : | case 6: | ||
377 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
378 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
379 : | break; | ||
380 : | case 7: | ||
381 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
382 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
383 : | break; | ||
384 : | case 8: | ||
385 : | Ops->V_Pass_8(dst, src, 16, stride, rounding); | ||
386 : | break; | ||
387 : | case 9: | ||
388 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
389 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
390 : | break; | ||
391 : | case 10: | ||
392 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
393 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
394 : | break; | ||
395 : | case 11: | ||
396 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
397 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
398 : | break; | ||
399 : | case 12: | ||
400 : | Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding); | ||
401 : | break; | ||
402 : | case 13: | ||
403 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
404 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding); | ||
405 : | break; | ||
406 : | case 14: | ||
407 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
408 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding); | ||
409 : | break; | ||
410 : | case 15: | ||
411 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
412 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding); | ||
413 : | break; | ||
414 : | } | ||
415 : | } | ||
416 : | |||
417 : | static void __inline | ||
418 : | edgomez | 1.3 | interpolate8x8_quarterpel(uint8_t * const cur, |
419 : | edgomez | 1.2 | uint8_t * const refn, |
420 : | uint8_t * const refh, | ||
421 : | uint8_t * const refv, | ||
422 : | uint8_t * const refhv, | ||
423 : | const uint32_t x, const uint32_t y, | ||
424 : | const int32_t dx, const int dy, | ||
425 : | const uint32_t stride, | ||
426 : | const uint32_t rounding) | ||
427 : | { | ||
428 : | const uint8_t *src; | ||
429 : | uint8_t *dst; | ||
430 : | uint8_t *tmp; | ||
431 : | int32_t quads; | ||
432 : | const XVID_QP_FUNCS *Ops; | ||
433 : | |||
434 : | int32_t x_int, y_int; | ||
435 : | |||
436 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
437 : | const int32_t yRef = (int)y*4 + dy; | ||
438 : | edgomez | 1.2 | |
439 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
440 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
441 : | |||
442 : | x_int = xRef/4; | ||
443 : | if (xRef < 0 && xRef % 4) | ||
444 : | x_int--; | ||
445 : | |||
446 : | y_int = yRef/4; | ||
447 : | if (yRef < 0 && yRef % 4) | ||
448 : | y_int--; | ||
449 : | |||
450 : | dst = cur + y * stride + x; | ||
451 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
452 : | edgomez | 1.2 | |
453 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
454 : | |||
455 : | switch(quads) { | ||
456 : | case 0: | ||
457 : | transfer8x8_copy( dst, src, stride); | ||
458 : | break; | ||
459 : | case 1: | ||
460 : | Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
461 : | break; | ||
462 : | case 2: | ||
463 : | Ops->H_Pass_8(dst, src, 8, stride, rounding); | ||
464 : | break; | ||
465 : | case 3: | ||
466 : | Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
467 : | break; | ||
468 : | case 4: | ||
469 : | Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
470 : | break; | ||
471 : | case 5: | ||
472 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
473 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
474 : | break; | ||
475 : | case 6: | ||
476 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
477 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
478 : | break; | ||
479 : | case 7: | ||
480 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
481 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
482 : | break; | ||
483 : | case 8: | ||
484 : | Ops->V_Pass_8(dst, src, 8, stride, rounding); | ||
485 : | break; | ||
486 : | case 9: | ||
487 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
488 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
489 : | break; | ||
490 : | case 10: | ||
491 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
492 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
493 : | break; | ||
494 : | case 11: | ||
495 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
496 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
497 : | break; | ||
498 : | case 12: | ||
499 : | Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
500 : | break; | ||
501 : | case 13: | ||
502 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
503 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
504 : | break; | ||
505 : | case 14: | ||
506 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
507 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); | ||
508 : | break; | ||
509 : | case 15: | ||
510 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
511 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
512 : | break; | ||
513 : | } | ||
514 : | } | ||
515 : | |||
516 : | edgomez | 1.3 | static void __inline |
517 : | interpolate8x8_add_quarterpel(uint8_t * const cur, | ||
518 : | uint8_t * const refn, | ||
519 : | uint8_t * const refh, | ||
520 : | uint8_t * const refv, | ||
521 : | uint8_t * const refhv, | ||
522 : | const uint32_t x, const uint32_t y, | ||
523 : | const int32_t dx, const int dy, | ||
524 : | const uint32_t stride, | ||
525 : | const uint32_t rounding) | ||
526 : | { | ||
527 : | const uint8_t *src; | ||
528 : | uint8_t *dst; | ||
529 : | uint8_t *tmp; | ||
530 : | int32_t quads; | ||
531 : | const XVID_QP_FUNCS *Ops; | ||
532 : | const XVID_QP_FUNCS *Ops_Copy; | ||
533 : | |||
534 : | int32_t x_int, y_int; | ||
535 : | |||
536 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
537 : | const int32_t yRef = (int)y*4 + dy; | ||
538 : | edgomez | 1.3 | |
539 : | Ops = xvid_QP_Add_Funcs; | ||
540 : | Ops_Copy = xvid_QP_Funcs; | ||
541 : | quads = (dx&3) | ((dy&3)<<2); | ||
542 : | |||
543 : | x_int = xRef/4; | ||
544 : | if (xRef < 0 && xRef % 4) | ||
545 : | x_int--; | ||
546 : | |||
547 : | y_int = yRef/4; | ||
548 : | if (yRef < 0 && yRef % 4) | ||
549 : | y_int--; | ||
550 : | |||
551 : | dst = cur + y * stride + x; | ||
552 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
553 : | edgomez | 1.3 | |
554 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
555 : | |||
556 : | switch(quads) { | ||
557 : | case 0: | ||
558 : | /* Misleading function name, there is no halfpel involved | ||
559 : | * just dst and src averaging with rounding=0 */ | ||
560 : | interpolate8x8_halfpel_add(dst, src, stride, rounding); | ||
561 : | break; | ||
562 : | case 1: | ||
563 : | Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
564 : | break; | ||
565 : | case 2: | ||
566 : | Ops->H_Pass_8(dst, src, 8, stride, rounding); | ||
567 : | break; | ||
568 : | case 3: | ||
569 : | Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
570 : | break; | ||
571 : | case 4: | ||
572 : | Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
573 : | break; | ||
574 : | case 5: | ||
575 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
576 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
577 : | break; | ||
578 : | case 6: | ||
579 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
580 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
581 : | break; | ||
582 : | case 7: | ||
583 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
584 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
585 : | break; | ||
586 : | case 8: | ||
587 : | Ops->V_Pass_8(dst, src, 8, stride, rounding); | ||
588 : | break; | ||
589 : | case 9: | ||
590 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
591 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
592 : | break; | ||
593 : | case 10: | ||
594 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
595 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
596 : | break; | ||
597 : | case 11: | ||
598 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
599 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
600 : | break; | ||
601 : | case 12: | ||
602 : | Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
603 : | break; | ||
604 : | case 13: | ||
605 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
606 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
607 : | break; | ||
608 : | case 14: | ||
609 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
610 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); | ||
611 : | break; | ||
612 : | case 15: | ||
613 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
614 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
615 : | break; | ||
616 : | } | ||
617 : | } | ||
618 : | |||
619 : | edgomez | 1.2 | #endif /* _XVID_QPEL_H_ */ |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |