Parent Directory
|
Revision Log
Revision 1.3 - (view) (download)
1 : | edgomez | 1.2 | /***************************************************************************** |
2 : | * | ||
3 : | * XVID MPEG-4 VIDEO CODEC | ||
4 : | * - QPel interpolation - | ||
5 : | * | ||
6 : | * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> | ||
7 : | * | ||
8 : | * This program is free software ; you can redistribute it and/or modify | ||
9 : | * it under the terms of the GNU General Public License as published by | ||
10 : | * the Free Software Foundation ; either version 2 of the License, or | ||
11 : | * (at your option) any later version. | ||
12 : | * | ||
13 : | * This program is distributed in the hope that it will be useful, | ||
14 : | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
15 : | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 : | * GNU General Public License for more details. | ||
17 : | * | ||
18 : | * You should have received a copy of the GNU General Public License | ||
19 : | * along with this program ; if not, write to the Free Software | ||
20 : | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 : | * | ||
22 : | * $Id$ | ||
23 : | * | ||
24 : | ****************************************************************************/ | ||
25 : | |||
26 : | #ifndef _XVID_QPEL_H_ | ||
27 : | #define _XVID_QPEL_H_ | ||
28 : | |||
29 : | edgomez | 1.3 | #include "interpolate8x8.h" |
30 : | edgomez | 1.2 | #include "../utils/mem_transfer.h" |
31 : | |||
32 : | /***************************************************************************** | ||
33 : | * Signatures | ||
34 : | ****************************************************************************/ | ||
35 : | |||
36 : | #define XVID_QP_PASS_SIGNATURE(NAME) \ | ||
37 : | void (NAME)(uint8_t *dst, const uint8_t *src, int32_t length, int32_t BpS, int32_t rounding) | ||
38 : | |||
39 : | typedef XVID_QP_PASS_SIGNATURE(XVID_QP_PASS); | ||
40 : | |||
41 : | /* We put everything in a single struct so it can easily be passed | ||
42 : | * to prediction functions as a whole... */ | ||
43 : | |||
44 : | typedef struct _XVID_QP_FUNCS { | ||
45 : | |||
46 : | /* filter for QPel 16x? prediction */ | ||
47 : | |||
48 : | XVID_QP_PASS *H_Pass; | ||
49 : | XVID_QP_PASS *H_Pass_Avrg; | ||
50 : | XVID_QP_PASS *H_Pass_Avrg_Up; | ||
51 : | XVID_QP_PASS *V_Pass; | ||
52 : | XVID_QP_PASS *V_Pass_Avrg; | ||
53 : | XVID_QP_PASS *V_Pass_Avrg_Up; | ||
54 : | |||
55 : | /* filter for QPel 8x? prediction */ | ||
56 : | |||
57 : | XVID_QP_PASS *H_Pass_8; | ||
58 : | XVID_QP_PASS *H_Pass_Avrg_8; | ||
59 : | XVID_QP_PASS *H_Pass_Avrg_Up_8; | ||
60 : | XVID_QP_PASS *V_Pass_8; | ||
61 : | XVID_QP_PASS *V_Pass_Avrg_8; | ||
62 : | XVID_QP_PASS *V_Pass_Avrg_Up_8; | ||
63 : | } XVID_QP_FUNCS; | ||
64 : | |||
65 : | /***************************************************************************** | ||
66 : | * fwd dcl | ||
67 : | ****************************************************************************/ | ||
68 : | extern void xvid_Init_QP(); | ||
69 : | |||
70 : | edgomez | 1.3 | extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ | ||
72 : | |||
73 : | edgomez | 1.2 | extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ | ||
75 : | |||
76 : | #ifdef ARCH_IS_IA32 | ||
77 : | extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; | ||
78 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; | ||
79 : | #endif | ||
80 : | |||
81 : | extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ | ||
82 : | extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ | ||
83 : | |||
84 : | /***************************************************************************** | ||
85 : | * macros | ||
86 : | ****************************************************************************/ | ||
87 : | |||
88 : | /***************************************************************************** | ||
89 : | |||
90 : | Passes to be performed | ||
91 : | |||
92 : | case 0: copy | ||
93 : | case 2: h-pass | ||
94 : | case 1/3: h-pass + h-avrg | ||
95 : | case 8: v-pass | ||
96 : | case 10: h-pass + v-pass | ||
97 : | case 9/11: h-pass + h-avrg + v-pass | ||
98 : | case 4/12: v-pass + v-avrg | ||
99 : | case 6/14: h-pass + v-pass + v-avrg | ||
100 : | case 5/13/7/15: h-pass + h-avrg + v-pass + v-avrg | ||
101 : | |||
102 : | ****************************************************************************/ | ||
103 : | |||
104 : | edgomez | 1.3 | static void __inline |
105 : | interpolate16x16_quarterpel(uint8_t * const cur, | ||
106 : | edgomez | 1.2 | uint8_t * const refn, |
107 : | uint8_t * const refh, | ||
108 : | uint8_t * const refv, | ||
109 : | uint8_t * const refhv, | ||
110 : | const uint32_t x, const uint32_t y, | ||
111 : | const int32_t dx, const int dy, | ||
112 : | const uint32_t stride, | ||
113 : | const uint32_t rounding) | ||
114 : | { | ||
115 : | const uint8_t *src; | ||
116 : | uint8_t *dst; | ||
117 : | uint8_t *tmp; | ||
118 : | int32_t quads; | ||
119 : | const XVID_QP_FUNCS *Ops; | ||
120 : | |||
121 : | int32_t x_int, y_int; | ||
122 : | |||
123 : | const int32_t xRef = x*4 + dx; | ||
124 : | const int32_t yRef = y*4 + dy; | ||
125 : | |||
126 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
127 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
128 : | |||
129 : | x_int = xRef/4; | ||
130 : | if (xRef < 0 && xRef % 4) | ||
131 : | x_int--; | ||
132 : | |||
133 : | y_int = yRef/4; | ||
134 : | if (yRef < 0 && yRef % 4) | ||
135 : | y_int--; | ||
136 : | |||
137 : | dst = cur + y * stride + x; | ||
138 : | src = refn + y_int * stride + x_int; | ||
139 : | |||
140 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
141 : | |||
142 : | switch(quads) { | ||
143 : | case 0: | ||
144 : | edgomez | 1.3 | transfer8x8_copy(dst, src, stride); |
145 : | transfer8x8_copy(dst+8, src+8, stride); | ||
146 : | transfer8x8_copy(dst+8*stride, src+8*stride, stride); | ||
147 : | transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); | ||
148 : | edgomez | 1.2 | break; |
149 : | case 1: | ||
150 : | Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); | ||
151 : | break; | ||
152 : | case 2: | ||
153 : | Ops->H_Pass(dst, src, 16, stride, rounding); | ||
154 : | break; | ||
155 : | case 3: | ||
156 : | Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
157 : | break; | ||
158 : | case 4: | ||
159 : | Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); | ||
160 : | break; | ||
161 : | case 5: | ||
162 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
163 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
164 : | break; | ||
165 : | case 6: | ||
166 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
167 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
168 : | break; | ||
169 : | case 7: | ||
170 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
171 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
172 : | break; | ||
173 : | case 8: | ||
174 : | Ops->V_Pass(dst, src, 16, stride, rounding); | ||
175 : | break; | ||
176 : | case 9: | ||
177 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
178 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
179 : | break; | ||
180 : | case 10: | ||
181 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
182 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
183 : | break; | ||
184 : | case 11: | ||
185 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
186 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
187 : | break; | ||
188 : | case 12: | ||
189 : | Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
190 : | break; | ||
191 : | case 13: | ||
192 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
193 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
194 : | break; | ||
195 : | case 14: | ||
196 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
197 : | Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); | ||
198 : | break; | ||
199 : | case 15: | ||
200 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
201 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
202 : | break; | ||
203 : | } | ||
204 : | } | ||
205 : | |||
206 : | edgomez | 1.3 | static void __inline |
207 : | interpolate16x16_add_quarterpel(uint8_t * const cur, | ||
208 : | uint8_t * const refn, | ||
209 : | uint8_t * const refh, | ||
210 : | uint8_t * const refv, | ||
211 : | uint8_t * const refhv, | ||
212 : | const uint32_t x, const uint32_t y, | ||
213 : | const int32_t dx, const int dy, | ||
214 : | const uint32_t stride, | ||
215 : | const uint32_t rounding) | ||
216 : | { | ||
217 : | const uint8_t *src; | ||
218 : | uint8_t *dst; | ||
219 : | uint8_t *tmp; | ||
220 : | int32_t quads; | ||
221 : | const XVID_QP_FUNCS *Ops; | ||
222 : | const XVID_QP_FUNCS *Ops_Copy; | ||
223 : | |||
224 : | int32_t x_int, y_int; | ||
225 : | |||
226 : | const int32_t xRef = x*4 + dx; | ||
227 : | const int32_t yRef = y*4 + dy; | ||
228 : | |||
229 : | Ops = xvid_QP_Add_Funcs; | ||
230 : | Ops_Copy = xvid_QP_Funcs; | ||
231 : | quads = (dx&3) | ((dy&3)<<2); | ||
232 : | |||
233 : | x_int = xRef/4; | ||
234 : | if (xRef < 0 && xRef % 4) | ||
235 : | x_int--; | ||
236 : | |||
237 : | y_int = yRef/4; | ||
238 : | if (yRef < 0 && yRef % 4) | ||
239 : | y_int--; | ||
240 : | |||
241 : | dst = cur + y * stride + x; | ||
242 : | src = refn + y_int * stride + x_int; | ||
243 : | |||
244 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
245 : | |||
246 : | switch(quads) { | ||
247 : | case 0: | ||
248 : | /* NB: there is no halfpel involved ! the name's function can be | ||
249 : | * misleading */ | ||
250 : | interpolate8x8_halfpel_add(dst, src, stride, rounding); | ||
251 : | interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); | ||
252 : | interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); | ||
253 : | interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); | ||
254 : | break; | ||
255 : | case 1: | ||
256 : | Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); | ||
257 : | break; | ||
258 : | case 2: | ||
259 : | Ops->H_Pass(dst, src, 16, stride, rounding); | ||
260 : | break; | ||
261 : | case 3: | ||
262 : | Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
263 : | break; | ||
264 : | case 4: | ||
265 : | Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); | ||
266 : | break; | ||
267 : | case 5: | ||
268 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
269 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
270 : | break; | ||
271 : | case 6: | ||
272 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
273 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
274 : | break; | ||
275 : | case 7: | ||
276 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
277 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
278 : | break; | ||
279 : | case 8: | ||
280 : | Ops->V_Pass(dst, src, 16, stride, rounding); | ||
281 : | break; | ||
282 : | case 9: | ||
283 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
284 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
285 : | break; | ||
286 : | case 10: | ||
287 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
288 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
289 : | break; | ||
290 : | case 11: | ||
291 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
292 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
293 : | break; | ||
294 : | case 12: | ||
295 : | Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
296 : | break; | ||
297 : | case 13: | ||
298 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
299 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
300 : | break; | ||
301 : | case 14: | ||
302 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
303 : | Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); | ||
304 : | break; | ||
305 : | case 15: | ||
306 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
307 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
308 : | break; | ||
309 : | } | ||
310 : | } | ||
311 : | |||
312 : | edgomez | 1.2 | static void __inline |
313 : | edgomez | 1.3 | interpolate16x8_quarterpel(uint8_t * const cur, |
314 : | edgomez | 1.2 | uint8_t * const refn, |
315 : | uint8_t * const refh, | ||
316 : | uint8_t * const refv, | ||
317 : | uint8_t * const refhv, | ||
318 : | const uint32_t x, const uint32_t y, | ||
319 : | const int32_t dx, const int dy, | ||
320 : | const uint32_t stride, | ||
321 : | const uint32_t rounding) | ||
322 : | { | ||
323 : | const uint8_t *src; | ||
324 : | uint8_t *dst; | ||
325 : | uint8_t *tmp; | ||
326 : | int32_t quads; | ||
327 : | const XVID_QP_FUNCS *Ops; | ||
328 : | |||
329 : | int32_t x_int, y_int; | ||
330 : | |||
331 : | const int32_t xRef = x*4 + dx; | ||
332 : | const int32_t yRef = y*4 + dy; | ||
333 : | |||
334 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
335 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
336 : | |||
337 : | x_int = xRef/4; | ||
338 : | if (xRef < 0 && xRef % 4) | ||
339 : | x_int--; | ||
340 : | |||
341 : | y_int = yRef/4; | ||
342 : | if (yRef < 0 && yRef % 4) | ||
343 : | y_int--; | ||
344 : | |||
345 : | dst = cur + y * stride + x; | ||
346 : | src = refn + y_int * stride + x_int; | ||
347 : | |||
348 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
349 : | |||
350 : | switch(quads) { | ||
351 : | case 0: | ||
352 : | transfer8x8_copy( dst, src, stride); | ||
353 : | transfer8x8_copy( dst+8, src+8, stride); | ||
354 : | break; | ||
355 : | case 1: | ||
356 : | Ops->H_Pass_Avrg(dst, src, 8, stride, rounding); | ||
357 : | break; | ||
358 : | case 2: | ||
359 : | Ops->H_Pass(dst, src, 8, stride, rounding); | ||
360 : | break; | ||
361 : | case 3: | ||
362 : | Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding); | ||
363 : | break; | ||
364 : | case 4: | ||
365 : | Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding); | ||
366 : | break; | ||
367 : | case 5: | ||
368 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
369 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
370 : | break; | ||
371 : | case 6: | ||
372 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
373 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
374 : | break; | ||
375 : | case 7: | ||
376 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
377 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
378 : | break; | ||
379 : | case 8: | ||
380 : | Ops->V_Pass_8(dst, src, 16, stride, rounding); | ||
381 : | break; | ||
382 : | case 9: | ||
383 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
384 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
385 : | break; | ||
386 : | case 10: | ||
387 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
388 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
389 : | break; | ||
390 : | case 11: | ||
391 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
392 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
393 : | break; | ||
394 : | case 12: | ||
395 : | Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding); | ||
396 : | break; | ||
397 : | case 13: | ||
398 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
399 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding); | ||
400 : | break; | ||
401 : | case 14: | ||
402 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
403 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding); | ||
404 : | break; | ||
405 : | case 15: | ||
406 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
407 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding); | ||
408 : | break; | ||
409 : | } | ||
410 : | } | ||
411 : | |||
412 : | static void __inline | ||
413 : | edgomez | 1.3 | interpolate8x8_quarterpel(uint8_t * const cur, |
414 : | edgomez | 1.2 | uint8_t * const refn, |
415 : | uint8_t * const refh, | ||
416 : | uint8_t * const refv, | ||
417 : | uint8_t * const refhv, | ||
418 : | const uint32_t x, const uint32_t y, | ||
419 : | const int32_t dx, const int dy, | ||
420 : | const uint32_t stride, | ||
421 : | const uint32_t rounding) | ||
422 : | { | ||
423 : | const uint8_t *src; | ||
424 : | uint8_t *dst; | ||
425 : | uint8_t *tmp; | ||
426 : | int32_t quads; | ||
427 : | const XVID_QP_FUNCS *Ops; | ||
428 : | |||
429 : | int32_t x_int, y_int; | ||
430 : | |||
431 : | const int32_t xRef = x*4 + dx; | ||
432 : | const int32_t yRef = y*4 + dy; | ||
433 : | |||
434 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
435 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
436 : | |||
437 : | x_int = xRef/4; | ||
438 : | if (xRef < 0 && xRef % 4) | ||
439 : | x_int--; | ||
440 : | |||
441 : | y_int = yRef/4; | ||
442 : | if (yRef < 0 && yRef % 4) | ||
443 : | y_int--; | ||
444 : | |||
445 : | dst = cur + y * stride + x; | ||
446 : | src = refn + y_int * stride + x_int; | ||
447 : | |||
448 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
449 : | |||
450 : | switch(quads) { | ||
451 : | case 0: | ||
452 : | transfer8x8_copy( dst, src, stride); | ||
453 : | break; | ||
454 : | case 1: | ||
455 : | Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
456 : | break; | ||
457 : | case 2: | ||
458 : | Ops->H_Pass_8(dst, src, 8, stride, rounding); | ||
459 : | break; | ||
460 : | case 3: | ||
461 : | Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
462 : | break; | ||
463 : | case 4: | ||
464 : | Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
465 : | break; | ||
466 : | case 5: | ||
467 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
468 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
469 : | break; | ||
470 : | case 6: | ||
471 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
472 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
473 : | break; | ||
474 : | case 7: | ||
475 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
476 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
477 : | break; | ||
478 : | case 8: | ||
479 : | Ops->V_Pass_8(dst, src, 8, stride, rounding); | ||
480 : | break; | ||
481 : | case 9: | ||
482 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
483 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
484 : | break; | ||
485 : | case 10: | ||
486 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
487 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
488 : | break; | ||
489 : | case 11: | ||
490 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
491 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
492 : | break; | ||
493 : | case 12: | ||
494 : | Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
495 : | break; | ||
496 : | case 13: | ||
497 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
498 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
499 : | break; | ||
500 : | case 14: | ||
501 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
502 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); | ||
503 : | break; | ||
504 : | case 15: | ||
505 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
506 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
507 : | break; | ||
508 : | } | ||
509 : | } | ||
510 : | |||
511 : | edgomez | 1.3 | static void __inline |
512 : | interpolate8x8_add_quarterpel(uint8_t * const cur, | ||
513 : | uint8_t * const refn, | ||
514 : | uint8_t * const refh, | ||
515 : | uint8_t * const refv, | ||
516 : | uint8_t * const refhv, | ||
517 : | const uint32_t x, const uint32_t y, | ||
518 : | const int32_t dx, const int dy, | ||
519 : | const uint32_t stride, | ||
520 : | const uint32_t rounding) | ||
521 : | { | ||
522 : | const uint8_t *src; | ||
523 : | uint8_t *dst; | ||
524 : | uint8_t *tmp; | ||
525 : | int32_t quads; | ||
526 : | const XVID_QP_FUNCS *Ops; | ||
527 : | const XVID_QP_FUNCS *Ops_Copy; | ||
528 : | |||
529 : | int32_t x_int, y_int; | ||
530 : | |||
531 : | const int32_t xRef = x*4 + dx; | ||
532 : | const int32_t yRef = y*4 + dy; | ||
533 : | |||
534 : | Ops = xvid_QP_Add_Funcs; | ||
535 : | Ops_Copy = xvid_QP_Funcs; | ||
536 : | quads = (dx&3) | ((dy&3)<<2); | ||
537 : | |||
538 : | x_int = xRef/4; | ||
539 : | if (xRef < 0 && xRef % 4) | ||
540 : | x_int--; | ||
541 : | |||
542 : | y_int = yRef/4; | ||
543 : | if (yRef < 0 && yRef % 4) | ||
544 : | y_int--; | ||
545 : | |||
546 : | dst = cur + y * stride + x; | ||
547 : | src = refn + y_int * stride + x_int; | ||
548 : | |||
549 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
550 : | |||
551 : | switch(quads) { | ||
552 : | case 0: | ||
553 : | /* Misleading function name, there is no halfpel involved | ||
554 : | * just dst and src averaging with rounding=0 */ | ||
555 : | interpolate8x8_halfpel_add(dst, src, stride, rounding); | ||
556 : | break; | ||
557 : | case 1: | ||
558 : | Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
559 : | break; | ||
560 : | case 2: | ||
561 : | Ops->H_Pass_8(dst, src, 8, stride, rounding); | ||
562 : | break; | ||
563 : | case 3: | ||
564 : | Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
565 : | break; | ||
566 : | case 4: | ||
567 : | Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
568 : | break; | ||
569 : | case 5: | ||
570 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
571 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
572 : | break; | ||
573 : | case 6: | ||
574 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
575 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
576 : | break; | ||
577 : | case 7: | ||
578 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
579 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
580 : | break; | ||
581 : | case 8: | ||
582 : | Ops->V_Pass_8(dst, src, 8, stride, rounding); | ||
583 : | break; | ||
584 : | case 9: | ||
585 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
586 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
587 : | break; | ||
588 : | case 10: | ||
589 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
590 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
591 : | break; | ||
592 : | case 11: | ||
593 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
594 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
595 : | break; | ||
596 : | case 12: | ||
597 : | Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
598 : | break; | ||
599 : | case 13: | ||
600 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
601 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
602 : | break; | ||
603 : | case 14: | ||
604 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
605 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); | ||
606 : | break; | ||
607 : | case 15: | ||
608 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
609 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
610 : | break; | ||
611 : | } | ||
612 : | } | ||
613 : | |||
614 : | edgomez | 1.2 | #endif /* _XVID_QPEL_H_ */ |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |