1 |
|
#ifndef _INTERPOLATE8X8_H_ |
2 |
|
#define _INTERPOLATE8X8_H_ |
3 |
|
|
4 |
#include "../utils/mem_transfer.h" |
#include "../utils/mem_transfer.h" |
5 |
|
|
6 |
typedef void (INTERPOLATE8X8)(uint8_t * const dst, |
typedef void (INTERPOLATE8X8)(uint8_t * const dst, |
9 |
const uint32_t rounding); |
const uint32_t rounding); |
10 |
typedef INTERPOLATE8X8 * INTERPOLATE8X8_PTR; |
typedef INTERPOLATE8X8 * INTERPOLATE8X8_PTR; |
11 |
|
|
12 |
|
typedef void (INTERPOLATE8X8_AVG2) (uint8_t *dst, |
13 |
|
const uint8_t *src1, |
14 |
|
const uint8_t *src2, |
15 |
|
const uint32_t stride, |
16 |
|
const uint32_t rounding); |
17 |
|
typedef INTERPOLATE8X8_AVG2 *INTERPOLATE8X8_AVG2_PTR; |
18 |
|
|
19 |
|
typedef void (INTERPOLATE8X8_AVG4) (uint8_t *dst, |
20 |
|
const uint8_t *src1, |
21 |
|
const uint8_t *src2, |
22 |
|
const uint8_t *src3, |
23 |
|
const uint8_t *src4, |
24 |
|
const uint32_t stride, |
25 |
|
const uint32_t rounding); |
26 |
|
typedef INTERPOLATE8X8_AVG4 *INTERPOLATE8X8_AVG4_PTR; |
27 |
|
|
28 |
|
typedef void (INTERPOLATE_LOWPASS) (uint8_t *dst, |
29 |
|
uint8_t *src, |
30 |
|
int32_t stride, |
31 |
|
int32_t rounding); |
32 |
|
|
33 |
|
typedef INTERPOLATE_LOWPASS *INTERPOLATE_LOWPASS_PTR; |
34 |
|
|
35 |
|
typedef void (INTERPOLATE_LOWPASS_HV) (uint8_t *dst1, |
36 |
|
uint8_t *dst2, |
37 |
|
uint8_t *src, |
38 |
|
int32_t stride, |
39 |
|
int32_t rounding); |
40 |
|
|
41 |
|
typedef INTERPOLATE_LOWPASS_HV *INTERPOLATE_LOWPASS_HV_PTR; |
42 |
|
|
43 |
|
typedef void (INTERPOLATE8X8_6TAP_LOWPASS) (uint8_t *dst, |
44 |
|
uint8_t *src, |
45 |
|
int32_t stride, |
46 |
|
int32_t rounding); |
47 |
|
|
48 |
|
typedef INTERPOLATE8X8_6TAP_LOWPASS *INTERPOLATE8X8_6TAP_LOWPASS_PTR; |
49 |
|
|
50 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
51 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
52 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
53 |
|
|
54 |
|
extern INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
55 |
|
extern INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
56 |
|
|
57 |
|
extern INTERPOLATE_LOWPASS_PTR interpolate8x8_lowpass_h; |
58 |
|
extern INTERPOLATE_LOWPASS_PTR interpolate8x8_lowpass_v; |
59 |
|
|
60 |
|
extern INTERPOLATE_LOWPASS_PTR interpolate16x16_lowpass_h; |
61 |
|
extern INTERPOLATE_LOWPASS_PTR interpolate16x16_lowpass_v; |
62 |
|
|
63 |
|
extern INTERPOLATE_LOWPASS_HV_PTR interpolate8x8_lowpass_hv; |
64 |
|
extern INTERPOLATE_LOWPASS_HV_PTR interpolate16x16_lowpass_hv; |
65 |
|
|
66 |
|
extern INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_h; |
67 |
|
extern INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_v; |
68 |
|
|
69 |
INTERPOLATE8X8 interpolate8x8_halfpel_h_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_h_c; |
70 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_c; |
71 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_c; |
82 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_3dn; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_3dn; |
83 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dn; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dn; |
84 |
|
|
85 |
static __inline void interpolate8x8_switch(uint8_t * const cur, |
INTERPOLATE8X8 interpolate8x8_halfpel_h_ia64; |
86 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_v_ia64; |
87 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_hv_ia64; |
88 |
|
|
89 |
|
INTERPOLATE8X8_AVG2 interpolate8x8_avg2_c; |
90 |
|
INTERPOLATE8X8_AVG4 interpolate8x8_avg4_c; |
91 |
|
|
92 |
|
INTERPOLATE8X8_AVG2 interpolate8x8_avg2_mmx; |
93 |
|
INTERPOLATE8X8_AVG4 interpolate8x8_avg4_mmx; |
94 |
|
|
95 |
|
INTERPOLATE_LOWPASS interpolate8x8_lowpass_h_c; |
96 |
|
INTERPOLATE_LOWPASS interpolate8x8_lowpass_v_c; |
97 |
|
|
98 |
|
INTERPOLATE_LOWPASS interpolate16x16_lowpass_h_c; |
99 |
|
INTERPOLATE_LOWPASS interpolate16x16_lowpass_v_c; |
100 |
|
|
101 |
|
INTERPOLATE_LOWPASS_HV interpolate8x8_lowpass_hv_c; |
102 |
|
INTERPOLATE_LOWPASS_HV interpolate16x16_lowpass_hv_c; |
103 |
|
|
104 |
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_h_c; |
105 |
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_v_c; |
106 |
|
|
107 |
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_h_mmx; |
108 |
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_v_mmx; |
109 |
|
|
110 |
|
static __inline void |
111 |
|
interpolate8x8_switch(uint8_t * const cur, |
112 |
const uint8_t * const refn, |
const uint8_t * const refn, |
113 |
const uint32_t x, const uint32_t y, |
const uint32_t x, |
114 |
const int32_t dx, const int dy, |
const uint32_t y, |
115 |
|
const int32_t dx, |
116 |
|
const int dy, |
117 |
const uint32_t stride, |
const uint32_t stride, |
118 |
const uint32_t rounding) |
const uint32_t rounding) |
119 |
{ |
{ |
124 |
case 0 : |
case 0 : |
125 |
ddx = dx/2; |
ddx = dx/2; |
126 |
ddy = dy/2; |
ddy = dy/2; |
127 |
transfer8x8_copy(cur + y*stride + x, refn + (y+ddy)*stride + x + ddx, stride); |
transfer8x8_copy(cur + y * stride + x, |
128 |
|
refn + (int)((y + ddy) * stride + x + ddx), stride); |
129 |
break; |
break; |
130 |
|
|
131 |
case 1 : |
case 1 : |
132 |
ddx = dx/2; |
ddx = dx/2; |
133 |
ddy = (dy-1)/2; |
ddy = (dy-1)/2; |
134 |
interpolate8x8_halfpel_v(cur + y*stride + x, |
interpolate8x8_halfpel_v(cur + y*stride + x, |
135 |
refn + (y+ddy)*stride + x + ddx, stride, rounding); |
refn + (int)((y + ddy) * stride + x + ddx), stride, |
136 |
|
rounding); |
137 |
break; |
break; |
138 |
|
|
139 |
case 2 : |
case 2 : |
140 |
ddx = (dx-1)/2; |
ddx = (dx-1)/2; |
141 |
ddy = dy/2; |
ddy = dy/2; |
142 |
interpolate8x8_halfpel_h(cur + y*stride + x, |
interpolate8x8_halfpel_h(cur + y*stride + x, |
143 |
refn + (y+ddy)*stride + x + ddx, stride, rounding); |
refn + (int)((y + ddy) * stride + x + ddx), stride, |
144 |
|
rounding); |
145 |
break; |
break; |
146 |
|
|
147 |
default : |
default : |
148 |
ddx = (dx-1)/2; |
ddx = (dx-1)/2; |
149 |
ddy = (dy-1)/2; |
ddy = (dy-1)/2; |
150 |
interpolate8x8_halfpel_hv(cur + y*stride + x, |
interpolate8x8_halfpel_hv(cur + y*stride + x, |
151 |
refn + (y+ddy)*stride + x + ddx, stride, rounding); |
refn + (int)((y + ddy) * stride + x + ddx), stride, |
152 |
|
rounding); |
153 |
|
break; |
154 |
|
} |
155 |
|
} |
156 |
|
|
157 |
|
static __inline uint8_t * |
158 |
|
interpolate8x8_switch2(uint8_t * const buffer, |
159 |
|
const uint8_t * const refn, |
160 |
|
const uint32_t x, |
161 |
|
const uint32_t y, |
162 |
|
const int32_t dx, |
163 |
|
const int dy, |
164 |
|
const uint32_t stride, |
165 |
|
const uint32_t rounding) |
166 |
|
{ |
167 |
|
int32_t ddx, ddy; |
168 |
|
|
169 |
|
switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) |
170 |
|
{ |
171 |
|
case 0: |
172 |
|
return (uint8_t *)refn + (int)((y + dy/2) * stride + x + dx/2); |
173 |
|
|
174 |
|
case 1: |
175 |
|
ddx = dx / 2; |
176 |
|
ddy = (dy - 1) / 2; |
177 |
|
interpolate8x8_halfpel_v(buffer, |
178 |
|
refn + (int)((y + ddy) * stride + x + ddx), stride, |
179 |
|
rounding); |
180 |
|
break; |
181 |
|
|
182 |
|
case 2: |
183 |
|
ddx = (dx - 1) / 2; |
184 |
|
ddy = dy / 2; |
185 |
|
interpolate8x8_halfpel_h(buffer, |
186 |
|
refn + (int)((y + ddy) * stride + x + ddx), stride, |
187 |
|
rounding); |
188 |
|
break; |
189 |
|
|
190 |
|
default: |
191 |
|
ddx = (dx - 1) / 2; |
192 |
|
ddy = (dy - 1) / 2; |
193 |
|
interpolate8x8_halfpel_hv(buffer, |
194 |
|
refn + (int)((y + ddy) * stride + x + ddx), stride, |
195 |
|
rounding); |
196 |
|
break; |
197 |
|
} |
198 |
|
return buffer; |
199 |
|
} |
200 |
|
|
201 |
|
|
202 |
|
static __inline void interpolate8x8_quarterpel(uint8_t * const cur, |
203 |
|
uint8_t * const refn, |
204 |
|
uint8_t * const refh, |
205 |
|
uint8_t * const refv, |
206 |
|
uint8_t * const refhv, |
207 |
|
const uint32_t x, const uint32_t y, |
208 |
|
const int32_t dx, const int dy, |
209 |
|
const uint32_t stride, |
210 |
|
const uint32_t rounding) |
211 |
|
{ |
212 |
|
const int32_t xRef = x*4 + dx; |
213 |
|
const int32_t yRef = y*4 + dy; |
214 |
|
|
215 |
|
uint8_t *src, *dst; |
216 |
|
uint8_t *halfpel_h, *halfpel_v, *halfpel_hv; |
217 |
|
int32_t x_int, y_int, x_frac, y_frac; |
218 |
|
|
219 |
|
x_int = xRef/4; |
220 |
|
if (xRef < 0 && xRef % 4) |
221 |
|
x_int--; |
222 |
|
|
223 |
|
x_frac = xRef - (4*x_int); |
224 |
|
|
225 |
|
y_int = yRef/4; |
226 |
|
if (yRef < 0 && yRef % 4) |
227 |
|
y_int--; |
228 |
|
|
229 |
|
y_frac = yRef - (4*y_int); |
230 |
|
|
231 |
|
src = refn + y_int * stride + x_int; |
232 |
|
halfpel_h = refh; |
233 |
|
halfpel_v = refv; |
234 |
|
halfpel_hv = refhv; |
235 |
|
|
236 |
|
dst = cur + y * stride + x; |
237 |
|
|
238 |
|
switch((y_frac << 2) | (x_frac)) { |
239 |
|
|
240 |
|
case 0: |
241 |
|
transfer8x8_copy(dst, src, stride); |
242 |
|
break; |
243 |
|
|
244 |
|
case 1: |
245 |
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
246 |
|
interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding); |
247 |
|
break; |
248 |
|
|
249 |
|
case 2: |
250 |
|
interpolate8x8_lowpass_h(dst, src, stride, rounding); |
251 |
|
break; |
252 |
|
|
253 |
|
case 3: |
254 |
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
255 |
|
interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding); |
256 |
|
break; |
257 |
|
|
258 |
|
case 4: |
259 |
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
260 |
|
interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding); |
261 |
|
break; |
262 |
|
|
263 |
|
case 5: |
264 |
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
265 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
266 |
|
interpolate8x8_avg4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
267 |
|
break; |
268 |
|
|
269 |
|
case 6: |
270 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
271 |
|
interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding); |
272 |
|
break; |
273 |
|
|
274 |
|
case 7: |
275 |
|
interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); |
276 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
277 |
|
interpolate8x8_avg4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
278 |
|
break; |
279 |
|
|
280 |
|
case 8: |
281 |
|
interpolate8x8_lowpass_v(dst, src, stride, rounding); |
282 |
|
break; |
283 |
|
|
284 |
|
case 9: |
285 |
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
286 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
287 |
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
288 |
|
break; |
289 |
|
|
290 |
|
case 10: |
291 |
|
interpolate8x8_lowpass_hv(dst, halfpel_h, src, stride, rounding); |
292 |
|
break; |
293 |
|
|
294 |
|
case 11: |
295 |
|
interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); |
296 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
297 |
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
298 |
|
break; |
299 |
|
|
300 |
|
case 12: |
301 |
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
302 |
|
interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding); |
303 |
|
break; |
304 |
|
|
305 |
|
case 13: |
306 |
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
307 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
308 |
|
interpolate8x8_avg4(dst, src+stride, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
309 |
|
break; |
310 |
|
|
311 |
|
case 14: |
312 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
313 |
|
interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding); |
314 |
|
break; |
315 |
|
|
316 |
|
case 15: |
317 |
|
interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); |
318 |
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
319 |
|
interpolate8x8_avg4(dst, src+stride+1, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
320 |
break; |
break; |
321 |
} |
} |
322 |
} |
} |
323 |
|
|
324 |
|
|
325 |
|
static __inline void interpolate16x16_quarterpel(uint8_t * const cur, |
326 |
|
uint8_t * const refn, |
327 |
|
uint8_t * const refh, |
328 |
|
uint8_t * const refv, |
329 |
|
uint8_t * const refhv, |
330 |
|
const uint32_t x, const uint32_t y, |
331 |
|
const int32_t dx, const int dy, |
332 |
|
const uint32_t stride, |
333 |
|
const uint32_t rounding) |
334 |
|
{ |
335 |
|
const int32_t xRef = x*4 + dx; |
336 |
|
const int32_t yRef = y*4 + dy; |
337 |
|
|
338 |
|
uint8_t *src, *dst; |
339 |
|
uint8_t *halfpel_h, *halfpel_v, *halfpel_hv; |
340 |
|
int32_t x_int, y_int, x_frac, y_frac; |
341 |
|
|
342 |
|
x_int = xRef/4; |
343 |
|
if (xRef < 0 && xRef % 4) |
344 |
|
x_int--; |
345 |
|
|
346 |
|
x_frac = xRef - (4*x_int); |
347 |
|
|
348 |
|
y_int = yRef/4; |
349 |
|
if (yRef < 0 && yRef % 4) |
350 |
|
y_int--; |
351 |
|
|
352 |
|
y_frac = yRef - (4*y_int); |
353 |
|
|
354 |
|
src = refn + y_int * stride + x_int; |
355 |
|
halfpel_h = refh; |
356 |
|
halfpel_v = refv; |
357 |
|
halfpel_hv = refhv; |
358 |
|
|
359 |
|
dst = cur + y * stride + x; |
360 |
|
|
361 |
|
switch((y_frac << 2) | (x_frac)) { |
362 |
|
|
363 |
|
case 0: |
364 |
|
transfer8x8_copy(dst, src, stride); |
365 |
|
transfer8x8_copy(dst+8, src+8, stride); |
366 |
|
transfer8x8_copy(dst+8*stride, src+8*stride, stride); |
367 |
|
transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); |
368 |
|
break; |
369 |
|
|
370 |
|
case 1: |
371 |
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
372 |
|
interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding); |
373 |
|
interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding); |
374 |
|
interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding); |
375 |
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding); |
376 |
|
break; |
377 |
|
|
378 |
|
case 2: |
379 |
|
interpolate16x16_lowpass_h(dst, src, stride, rounding); |
380 |
|
break; |
381 |
|
|
382 |
|
case 3: |
383 |
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
384 |
|
interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding); |
385 |
|
interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding); |
386 |
|
interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding); |
387 |
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding); |
388 |
|
break; |
389 |
|
|
390 |
|
case 4: |
391 |
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
392 |
|
interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding); |
393 |
|
interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding); |
394 |
|
interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding); |
395 |
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding); |
396 |
|
break; |
397 |
|
|
398 |
|
case 5: |
399 |
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
400 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
401 |
|
interpolate8x8_avg4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
402 |
|
interpolate8x8_avg4(dst+8, src+8, halfpel_h+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
403 |
|
interpolate8x8_avg4(dst+8*stride, src+8*stride, halfpel_h+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
404 |
|
interpolate8x8_avg4(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
405 |
|
break; |
406 |
|
|
407 |
|
case 6: |
408 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
409 |
|
interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding); |
410 |
|
interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding); |
411 |
|
interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding); |
412 |
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
413 |
|
break; |
414 |
|
|
415 |
|
case 7: |
416 |
|
interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); |
417 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
418 |
|
interpolate8x8_avg4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
419 |
|
interpolate8x8_avg4(dst+8, src+8+1, halfpel_h+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
420 |
|
interpolate8x8_avg4(dst+8*stride, src+1+8*stride, halfpel_h+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
421 |
|
interpolate8x8_avg4(dst+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
422 |
|
break; |
423 |
|
|
424 |
|
case 8: |
425 |
|
interpolate16x16_lowpass_v(dst, src, stride, rounding); |
426 |
|
break; |
427 |
|
|
428 |
|
case 9: |
429 |
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
430 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
431 |
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
432 |
|
interpolate8x8_avg2(dst+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
433 |
|
interpolate8x8_avg2(dst+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
434 |
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
435 |
|
break; |
436 |
|
|
437 |
|
case 10: |
438 |
|
interpolate16x16_lowpass_hv(dst, halfpel_h, src, stride, rounding); |
439 |
|
break; |
440 |
|
|
441 |
|
case 11: |
442 |
|
interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); |
443 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
444 |
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
445 |
|
interpolate8x8_avg2(dst+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
446 |
|
interpolate8x8_avg2(dst+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
447 |
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
448 |
|
break; |
449 |
|
|
450 |
|
case 12: |
451 |
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
452 |
|
interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding); |
453 |
|
interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding); |
454 |
|
interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding); |
455 |
|
interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding); |
456 |
|
break; |
457 |
|
|
458 |
|
case 13: |
459 |
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
460 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
461 |
|
interpolate8x8_avg4(dst, src+stride, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
462 |
|
interpolate8x8_avg4(dst+8, src+stride+8, halfpel_h+stride+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
463 |
|
interpolate8x8_avg4(dst+8*stride, src+stride+8*stride, halfpel_h+stride+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
464 |
|
interpolate8x8_avg4(dst+8*stride+8, src+stride+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
465 |
|
break; |
466 |
|
|
467 |
|
case 14: |
468 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
469 |
|
interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding); |
470 |
|
interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding); |
471 |
|
interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding); |
472 |
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
473 |
|
break; |
474 |
|
|
475 |
|
case 15: |
476 |
|
interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); |
477 |
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
478 |
|
interpolate8x8_avg4(dst, src+stride+1, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
479 |
|
interpolate8x8_avg4(dst+8, src+stride+1+8, halfpel_h+stride+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
480 |
|
interpolate8x8_avg4(dst+8*stride, src+stride+1+8*stride, halfpel_h+stride+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
481 |
|
interpolate8x8_avg4(dst+8*stride+8, src+stride+1+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
482 |
|
break; |
483 |
|
default: |
484 |
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x, y, dx, dy, stride, rounding); |
485 |
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x+8, y, dx, dy, stride, rounding); |
486 |
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x, y+8, dx, dy, stride, rounding); |
487 |
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x+8, y+8, dx, dy, stride, rounding); |
488 |
|
return; |
489 |
|
break; |
490 |
|
} |
491 |
|
} |
492 |
|
|
493 |
|
#endif |