Parent Directory
|
Revision Log
Revision 1.7 - (view) (download)
1 : | edgomez | 1.2 | /***************************************************************************** |
2 : | * | ||
3 : | * XVID MPEG-4 VIDEO CODEC | ||
4 : | * - QPel interpolation - | ||
5 : | * | ||
6 : | * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> | ||
7 : | * | ||
8 : | * This program is free software ; you can redistribute it and/or modify | ||
9 : | * it under the terms of the GNU General Public License as published by | ||
10 : | * the Free Software Foundation ; either version 2 of the License, or | ||
11 : | * (at your option) any later version. | ||
12 : | * | ||
13 : | * This program is distributed in the hope that it will be useful, | ||
14 : | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
15 : | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 : | * GNU General Public License for more details. | ||
17 : | * | ||
18 : | * You should have received a copy of the GNU General Public License | ||
19 : | * along with this program ; if not, write to the Free Software | ||
20 : | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 : | * | ||
22 : | Skal | 1.7 | * $Id: qpel.h,v 1.6 2005/01/05 23:02:15 edgomez Exp $ |
23 : | edgomez | 1.2 | * |
24 : | ****************************************************************************/ | ||
25 : | |||
26 : | #ifndef _XVID_QPEL_H_ | ||
27 : | #define _XVID_QPEL_H_ | ||
28 : | |||
29 : | edgomez | 1.3 | #include "interpolate8x8.h" |
30 : | edgomez | 1.2 | #include "../utils/mem_transfer.h" |
31 : | |||
32 : | /***************************************************************************** | ||
33 : | * Signatures | ||
34 : | ****************************************************************************/ | ||
35 : | |||
36 : | #define XVID_QP_PASS_SIGNATURE(NAME) \ | ||
37 : | void (NAME)(uint8_t *dst, const uint8_t *src, int32_t length, int32_t BpS, int32_t rounding) | ||
38 : | |||
39 : | typedef XVID_QP_PASS_SIGNATURE(XVID_QP_PASS); | ||
40 : | |||
41 : | /* We put everything in a single struct so it can easily be passed | ||
42 : | * to prediction functions as a whole... */ | ||
43 : | |||
44 : | typedef struct _XVID_QP_FUNCS { | ||
45 : | |||
46 : | /* filter for QPel 16x? prediction */ | ||
47 : | |||
48 : | XVID_QP_PASS *H_Pass; | ||
49 : | XVID_QP_PASS *H_Pass_Avrg; | ||
50 : | XVID_QP_PASS *H_Pass_Avrg_Up; | ||
51 : | XVID_QP_PASS *V_Pass; | ||
52 : | XVID_QP_PASS *V_Pass_Avrg; | ||
53 : | XVID_QP_PASS *V_Pass_Avrg_Up; | ||
54 : | |||
55 : | /* filter for QPel 8x? prediction */ | ||
56 : | |||
57 : | XVID_QP_PASS *H_Pass_8; | ||
58 : | XVID_QP_PASS *H_Pass_Avrg_8; | ||
59 : | XVID_QP_PASS *H_Pass_Avrg_Up_8; | ||
60 : | XVID_QP_PASS *V_Pass_8; | ||
61 : | XVID_QP_PASS *V_Pass_Avrg_8; | ||
62 : | XVID_QP_PASS *V_Pass_Avrg_Up_8; | ||
63 : | } XVID_QP_FUNCS; | ||
64 : | |||
65 : | /***************************************************************************** | ||
66 : | * fwd dcl | ||
67 : | ****************************************************************************/ | ||
68 : | extern void xvid_Init_QP(); | ||
69 : | |||
70 : | edgomez | 1.3 | extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ | ||
72 : | |||
73 : | edgomez | 1.2 | extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ | ||
75 : | |||
76 : | #ifdef ARCH_IS_IA32 | ||
77 : | extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; | ||
78 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; | ||
79 : | #endif | ||
80 : | |||
81 : | edgomez | 1.5 | #ifdef ARCH_IS_PPC |
82 : | extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; | ||
83 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; | ||
84 : | #endif | ||
85 : | |||
86 : | edgomez | 1.6 | #ifdef ARCH_IS_X86_64 |
87 : | extern XVID_QP_FUNCS xvid_QP_Funcs_x86_64; | ||
88 : | extern XVID_QP_FUNCS xvid_QP_Add_Funcs_x86_64; | ||
89 : | #endif | ||
90 : | |||
91 : | edgomez | 1.2 | extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
92 : | extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ | ||
93 : | |||
94 : | /***************************************************************************** | ||
95 : | * macros | ||
96 : | ****************************************************************************/ | ||
97 : | |||
98 : | /***************************************************************************** | ||
99 : | |||
100 : | Passes to be performed | ||
101 : | |||
102 : | case 0: copy | ||
103 : | case 2: h-pass | ||
104 : | case 1/3: h-pass + h-avrg | ||
105 : | case 8: v-pass | ||
106 : | case 10: h-pass + v-pass | ||
107 : | case 9/11: h-pass + h-avrg + v-pass | ||
108 : | case 4/12: v-pass + v-avrg | ||
109 : | case 6/14: h-pass + v-pass + v-avrg | ||
110 : | case 5/13/7/15: h-pass + h-avrg + v-pass + v-avrg | ||
111 : | |||
112 : | ****************************************************************************/ | ||
113 : | |||
114 : | edgomez | 1.3 | static void __inline |
115 : | interpolate16x16_quarterpel(uint8_t * const cur, | ||
116 : | edgomez | 1.2 | uint8_t * const refn, |
117 : | uint8_t * const refh, | ||
118 : | uint8_t * const refv, | ||
119 : | uint8_t * const refhv, | ||
120 : | const uint32_t x, const uint32_t y, | ||
121 : | const int32_t dx, const int dy, | ||
122 : | const uint32_t stride, | ||
123 : | const uint32_t rounding) | ||
124 : | { | ||
125 : | const uint8_t *src; | ||
126 : | uint8_t *dst; | ||
127 : | uint8_t *tmp; | ||
128 : | int32_t quads; | ||
129 : | const XVID_QP_FUNCS *Ops; | ||
130 : | |||
131 : | int32_t x_int, y_int; | ||
132 : | |||
133 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
134 : | const int32_t yRef = (int)y*4 + dy; | ||
135 : | edgomez | 1.2 | |
136 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
137 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
138 : | |||
139 : | Skal | 1.7 | x_int = xRef >> 2; |
140 : | y_int = yRef >> 2; | ||
141 : | edgomez | 1.2 | |
142 : | dst = cur + y * stride + x; | ||
143 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
144 : | edgomez | 1.2 | |
145 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
146 : | |||
147 : | switch(quads) { | ||
148 : | case 0: | ||
149 : | edgomez | 1.3 | transfer8x8_copy(dst, src, stride); |
150 : | transfer8x8_copy(dst+8, src+8, stride); | ||
151 : | transfer8x8_copy(dst+8*stride, src+8*stride, stride); | ||
152 : | transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); | ||
153 : | edgomez | 1.2 | break; |
154 : | case 1: | ||
155 : | Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); | ||
156 : | break; | ||
157 : | case 2: | ||
158 : | Ops->H_Pass(dst, src, 16, stride, rounding); | ||
159 : | break; | ||
160 : | case 3: | ||
161 : | Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
162 : | break; | ||
163 : | case 4: | ||
164 : | Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); | ||
165 : | break; | ||
166 : | case 5: | ||
167 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
168 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
169 : | break; | ||
170 : | case 6: | ||
171 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
172 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
173 : | break; | ||
174 : | case 7: | ||
175 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
176 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
177 : | break; | ||
178 : | case 8: | ||
179 : | Ops->V_Pass(dst, src, 16, stride, rounding); | ||
180 : | break; | ||
181 : | case 9: | ||
182 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
183 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
184 : | break; | ||
185 : | case 10: | ||
186 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
187 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
188 : | break; | ||
189 : | case 11: | ||
190 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
191 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
192 : | break; | ||
193 : | case 12: | ||
194 : | Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
195 : | break; | ||
196 : | case 13: | ||
197 : | Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
198 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
199 : | break; | ||
200 : | case 14: | ||
201 : | Ops->H_Pass(tmp, src, 17, stride, rounding); | ||
202 : | Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); | ||
203 : | break; | ||
204 : | case 15: | ||
205 : | Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
206 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
207 : | break; | ||
208 : | } | ||
209 : | } | ||
210 : | |||
211 : | edgomez | 1.3 | static void __inline |
212 : | interpolate16x16_add_quarterpel(uint8_t * const cur, | ||
213 : | uint8_t * const refn, | ||
214 : | uint8_t * const refh, | ||
215 : | uint8_t * const refv, | ||
216 : | uint8_t * const refhv, | ||
217 : | const uint32_t x, const uint32_t y, | ||
218 : | const int32_t dx, const int dy, | ||
219 : | const uint32_t stride, | ||
220 : | const uint32_t rounding) | ||
221 : | { | ||
222 : | const uint8_t *src; | ||
223 : | uint8_t *dst; | ||
224 : | uint8_t *tmp; | ||
225 : | int32_t quads; | ||
226 : | const XVID_QP_FUNCS *Ops; | ||
227 : | const XVID_QP_FUNCS *Ops_Copy; | ||
228 : | |||
229 : | int32_t x_int, y_int; | ||
230 : | |||
231 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
232 : | const int32_t yRef = (int)y*4 + dy; | ||
233 : | edgomez | 1.3 | |
234 : | Ops = xvid_QP_Add_Funcs; | ||
235 : | Ops_Copy = xvid_QP_Funcs; | ||
236 : | quads = (dx&3) | ((dy&3)<<2); | ||
237 : | |||
238 : | Skal | 1.7 | x_int = xRef >> 2; |
239 : | y_int = yRef >> 2; | ||
240 : | edgomez | 1.3 | |
241 : | dst = cur + y * stride + x; | ||
242 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
243 : | edgomez | 1.3 | |
244 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
245 : | |||
246 : | switch(quads) { | ||
247 : | case 0: | ||
248 : | /* NB: there is no halfpel involved ! the name's function can be | ||
249 : | * misleading */ | ||
250 : | interpolate8x8_halfpel_add(dst, src, stride, rounding); | ||
251 : | interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); | ||
252 : | interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); | ||
253 : | interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); | ||
254 : | break; | ||
255 : | case 1: | ||
256 : | Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); | ||
257 : | break; | ||
258 : | case 2: | ||
259 : | Ops->H_Pass(dst, src, 16, stride, rounding); | ||
260 : | break; | ||
261 : | case 3: | ||
262 : | Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
263 : | break; | ||
264 : | case 4: | ||
265 : | Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); | ||
266 : | break; | ||
267 : | case 5: | ||
268 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
269 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
270 : | break; | ||
271 : | case 6: | ||
272 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
273 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
274 : | break; | ||
275 : | case 7: | ||
276 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
277 : | Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); | ||
278 : | break; | ||
279 : | case 8: | ||
280 : | Ops->V_Pass(dst, src, 16, stride, rounding); | ||
281 : | break; | ||
282 : | case 9: | ||
283 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
284 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
285 : | break; | ||
286 : | case 10: | ||
287 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
288 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
289 : | break; | ||
290 : | case 11: | ||
291 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
292 : | Ops->V_Pass(dst, tmp, 16, stride, rounding); | ||
293 : | break; | ||
294 : | case 12: | ||
295 : | Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); | ||
296 : | break; | ||
297 : | case 13: | ||
298 : | Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); | ||
299 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
300 : | break; | ||
301 : | case 14: | ||
302 : | Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); | ||
303 : | Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); | ||
304 : | break; | ||
305 : | case 15: | ||
306 : | Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); | ||
307 : | Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); | ||
308 : | break; | ||
309 : | } | ||
310 : | } | ||
311 : | |||
312 : | edgomez | 1.2 | static void __inline |
313 : | edgomez | 1.3 | interpolate16x8_quarterpel(uint8_t * const cur, |
314 : | edgomez | 1.2 | uint8_t * const refn, |
315 : | uint8_t * const refh, | ||
316 : | uint8_t * const refv, | ||
317 : | uint8_t * const refhv, | ||
318 : | const uint32_t x, const uint32_t y, | ||
319 : | const int32_t dx, const int dy, | ||
320 : | const uint32_t stride, | ||
321 : | const uint32_t rounding) | ||
322 : | { | ||
323 : | const uint8_t *src; | ||
324 : | uint8_t *dst; | ||
325 : | uint8_t *tmp; | ||
326 : | int32_t quads; | ||
327 : | const XVID_QP_FUNCS *Ops; | ||
328 : | |||
329 : | int32_t x_int, y_int; | ||
330 : | |||
331 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
332 : | const int32_t yRef = (int)y*4 + dy; | ||
333 : | edgomez | 1.2 | |
334 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
335 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
336 : | |||
337 : | Skal | 1.7 | x_int = xRef >> 2; |
338 : | y_int = yRef >> 2; | ||
339 : | edgomez | 1.2 | |
340 : | dst = cur + y * stride + x; | ||
341 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
342 : | edgomez | 1.2 | |
343 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
344 : | |||
345 : | switch(quads) { | ||
346 : | case 0: | ||
347 : | transfer8x8_copy( dst, src, stride); | ||
348 : | transfer8x8_copy( dst+8, src+8, stride); | ||
349 : | break; | ||
350 : | case 1: | ||
351 : | Ops->H_Pass_Avrg(dst, src, 8, stride, rounding); | ||
352 : | break; | ||
353 : | case 2: | ||
354 : | Ops->H_Pass(dst, src, 8, stride, rounding); | ||
355 : | break; | ||
356 : | case 3: | ||
357 : | Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding); | ||
358 : | break; | ||
359 : | case 4: | ||
360 : | Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding); | ||
361 : | break; | ||
362 : | case 5: | ||
363 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
364 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
365 : | break; | ||
366 : | case 6: | ||
367 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
368 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
369 : | break; | ||
370 : | case 7: | ||
371 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
372 : | Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding); | ||
373 : | break; | ||
374 : | case 8: | ||
375 : | Ops->V_Pass_8(dst, src, 16, stride, rounding); | ||
376 : | break; | ||
377 : | case 9: | ||
378 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
379 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
380 : | break; | ||
381 : | case 10: | ||
382 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
383 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
384 : | break; | ||
385 : | case 11: | ||
386 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
387 : | Ops->V_Pass_8(dst, tmp, 16, stride, rounding); | ||
388 : | break; | ||
389 : | case 12: | ||
390 : | Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding); | ||
391 : | break; | ||
392 : | case 13: | ||
393 : | Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding); | ||
394 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding); | ||
395 : | break; | ||
396 : | case 14: | ||
397 : | Ops->H_Pass(tmp, src, 9, stride, rounding); | ||
398 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding); | ||
399 : | break; | ||
400 : | case 15: | ||
401 : | Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding); | ||
402 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding); | ||
403 : | break; | ||
404 : | } | ||
405 : | } | ||
406 : | |||
407 : | static void __inline | ||
408 : | edgomez | 1.3 | interpolate8x8_quarterpel(uint8_t * const cur, |
409 : | edgomez | 1.2 | uint8_t * const refn, |
410 : | uint8_t * const refh, | ||
411 : | uint8_t * const refv, | ||
412 : | uint8_t * const refhv, | ||
413 : | const uint32_t x, const uint32_t y, | ||
414 : | const int32_t dx, const int dy, | ||
415 : | const uint32_t stride, | ||
416 : | const uint32_t rounding) | ||
417 : | { | ||
418 : | const uint8_t *src; | ||
419 : | uint8_t *dst; | ||
420 : | uint8_t *tmp; | ||
421 : | int32_t quads; | ||
422 : | const XVID_QP_FUNCS *Ops; | ||
423 : | |||
424 : | int32_t x_int, y_int; | ||
425 : | |||
426 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
427 : | const int32_t yRef = (int)y*4 + dy; | ||
428 : | edgomez | 1.2 | |
429 : | edgomez | 1.3 | Ops = xvid_QP_Funcs; |
430 : | edgomez | 1.2 | quads = (dx&3) | ((dy&3)<<2); |
431 : | |||
432 : | Skal | 1.7 | x_int = xRef >> 2; |
433 : | y_int = yRef >> 2; | ||
434 : | edgomez | 1.2 | |
435 : | dst = cur + y * stride + x; | ||
436 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
437 : | edgomez | 1.2 | |
438 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
439 : | |||
440 : | switch(quads) { | ||
441 : | case 0: | ||
442 : | transfer8x8_copy( dst, src, stride); | ||
443 : | break; | ||
444 : | case 1: | ||
445 : | Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
446 : | break; | ||
447 : | case 2: | ||
448 : | Ops->H_Pass_8(dst, src, 8, stride, rounding); | ||
449 : | break; | ||
450 : | case 3: | ||
451 : | Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
452 : | break; | ||
453 : | case 4: | ||
454 : | Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
455 : | break; | ||
456 : | case 5: | ||
457 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
458 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
459 : | break; | ||
460 : | case 6: | ||
461 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
462 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
463 : | break; | ||
464 : | case 7: | ||
465 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
466 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
467 : | break; | ||
468 : | case 8: | ||
469 : | Ops->V_Pass_8(dst, src, 8, stride, rounding); | ||
470 : | break; | ||
471 : | case 9: | ||
472 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
473 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
474 : | break; | ||
475 : | case 10: | ||
476 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
477 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
478 : | break; | ||
479 : | case 11: | ||
480 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
481 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
482 : | break; | ||
483 : | case 12: | ||
484 : | Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
485 : | break; | ||
486 : | case 13: | ||
487 : | Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
488 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
489 : | break; | ||
490 : | case 14: | ||
491 : | Ops->H_Pass_8(tmp, src, 9, stride, rounding); | ||
492 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); | ||
493 : | break; | ||
494 : | case 15: | ||
495 : | Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
496 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
497 : | break; | ||
498 : | } | ||
499 : | } | ||
500 : | |||
501 : | edgomez | 1.3 | static void __inline |
502 : | interpolate8x8_add_quarterpel(uint8_t * const cur, | ||
503 : | uint8_t * const refn, | ||
504 : | uint8_t * const refh, | ||
505 : | uint8_t * const refv, | ||
506 : | uint8_t * const refhv, | ||
507 : | const uint32_t x, const uint32_t y, | ||
508 : | const int32_t dx, const int dy, | ||
509 : | const uint32_t stride, | ||
510 : | const uint32_t rounding) | ||
511 : | { | ||
512 : | const uint8_t *src; | ||
513 : | uint8_t *dst; | ||
514 : | uint8_t *tmp; | ||
515 : | int32_t quads; | ||
516 : | const XVID_QP_FUNCS *Ops; | ||
517 : | const XVID_QP_FUNCS *Ops_Copy; | ||
518 : | |||
519 : | int32_t x_int, y_int; | ||
520 : | |||
521 : | edgomez | 1.4 | const int32_t xRef = (int)x*4 + dx; |
522 : | const int32_t yRef = (int)y*4 + dy; | ||
523 : | edgomez | 1.3 | |
524 : | Ops = xvid_QP_Add_Funcs; | ||
525 : | Ops_Copy = xvid_QP_Funcs; | ||
526 : | quads = (dx&3) | ((dy&3)<<2); | ||
527 : | |||
528 : | Skal | 1.7 | x_int = xRef >> 2; |
529 : | y_int = yRef >> 2; | ||
530 : | edgomez | 1.3 | |
531 : | dst = cur + y * stride + x; | ||
532 : | edgomez | 1.4 | src = refn + y_int * (int)stride + x_int; |
533 : | edgomez | 1.3 | |
534 : | tmp = refh; /* we need at least a 16 x stride scratch block */ | ||
535 : | |||
536 : | switch(quads) { | ||
537 : | case 0: | ||
538 : | /* Misleading function name, there is no halfpel involved | ||
539 : | * just dst and src averaging with rounding=0 */ | ||
540 : | interpolate8x8_halfpel_add(dst, src, stride, rounding); | ||
541 : | break; | ||
542 : | case 1: | ||
543 : | Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
544 : | break; | ||
545 : | case 2: | ||
546 : | Ops->H_Pass_8(dst, src, 8, stride, rounding); | ||
547 : | break; | ||
548 : | case 3: | ||
549 : | Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
550 : | break; | ||
551 : | case 4: | ||
552 : | Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); | ||
553 : | break; | ||
554 : | case 5: | ||
555 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
556 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
557 : | break; | ||
558 : | case 6: | ||
559 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
560 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
561 : | break; | ||
562 : | case 7: | ||
563 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
564 : | Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); | ||
565 : | break; | ||
566 : | case 8: | ||
567 : | Ops->V_Pass_8(dst, src, 8, stride, rounding); | ||
568 : | break; | ||
569 : | case 9: | ||
570 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
571 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
572 : | break; | ||
573 : | case 10: | ||
574 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
575 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
576 : | break; | ||
577 : | case 11: | ||
578 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
579 : | Ops->V_Pass_8(dst, tmp, 8, stride, rounding); | ||
580 : | break; | ||
581 : | case 12: | ||
582 : | Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); | ||
583 : | break; | ||
584 : | case 13: | ||
585 : | Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); | ||
586 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
587 : | break; | ||
588 : | case 14: | ||
589 : | Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); | ||
590 : | Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); | ||
591 : | break; | ||
592 : | case 15: | ||
593 : | Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); | ||
594 : | Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); | ||
595 : | break; | ||
596 : | } | ||
597 : | } | ||
598 : | |||
599 : | edgomez | 1.2 | #endif /* _XVID_QPEL_H_ */ |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |