73 |
|
|
74 |
typedef INTERPOLATE8X8_6TAP_LOWPASS *INTERPOLATE8X8_6TAP_LOWPASS_PTR; |
typedef INTERPOLATE8X8_6TAP_LOWPASS *INTERPOLATE8X8_6TAP_LOWPASS_PTR; |
75 |
|
|
76 |
|
/* These function do: dst = interpolate(src) */ |
77 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
78 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
79 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
80 |
|
|
81 |
|
/* These functions do: dst = (dst+interpolate(src) + 1)/2 |
82 |
|
* Suitable for direct/interpolated bvop prediction block |
83 |
|
* building w/o the need for intermediate interpolated result |
84 |
|
* storing/reading |
85 |
|
* NB: the rounding applies to the interpolation, but not |
86 |
|
* the averaging step which will always use rounding=0 */ |
87 |
|
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_add; |
88 |
|
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h_add; |
89 |
|
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v_add; |
90 |
|
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv_add; |
91 |
|
|
92 |
extern INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
extern INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
93 |
extern INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
extern INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
94 |
|
|
107 |
INTERPOLATE8X8 interpolate8x8_halfpel_h_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_h_c; |
108 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_c; |
109 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_c; |
110 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_add_c; |
111 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_h_add_c; |
112 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_v_add_c; |
113 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_hv_add_c; |
114 |
|
|
115 |
#ifdef ARCH_IS_IA32 |
#ifdef ARCH_IS_IA32 |
116 |
INTERPOLATE8X8 interpolate8x8_halfpel_h_mmx; |
INTERPOLATE8X8 interpolate8x8_halfpel_h_mmx; |
117 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_mmx; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_mmx; |
118 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_mmx; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_mmx; |
119 |
|
|
120 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_add_mmx; |
121 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_h_add_mmx; |
122 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_v_add_mmx; |
123 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_hv_add_mmx; |
124 |
|
|
125 |
INTERPOLATE8X8 interpolate8x8_halfpel_h_xmm; |
INTERPOLATE8X8 interpolate8x8_halfpel_h_xmm; |
126 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_xmm; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_xmm; |
127 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_xmm; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_xmm; |
128 |
|
|
129 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_add_xmm; |
130 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_h_add_xmm; |
131 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_v_add_xmm; |
132 |
|
INTERPOLATE8X8 interpolate8x8_halfpel_hv_add_xmm; |
133 |
|
|
134 |
INTERPOLATE8X8 interpolate8x8_halfpel_h_3dn; |
INTERPOLATE8X8 interpolate8x8_halfpel_h_3dn; |
135 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_3dn; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_3dn; |
136 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dn; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dn; |
216 |
} |
} |
217 |
} |
} |
218 |
|
|
219 |
|
static __inline void |
220 |
|
interpolate8x8_add_switch(uint8_t * const cur, |
221 |
|
const uint8_t * const refn, |
222 |
|
const uint32_t x, |
223 |
|
const uint32_t y, |
224 |
|
const int32_t dx, |
225 |
|
const int dy, |
226 |
|
const uint32_t stride, |
227 |
|
const uint32_t rounding) |
228 |
|
{ |
229 |
|
|
230 |
|
const uint8_t * const src = refn + (int)((y + (dy>>1)) * stride + x + (dx>>1)); |
231 |
|
uint8_t * const dst = cur + (int)(y * stride + x); |
232 |
|
|
233 |
|
switch (((dx & 1) << 1) + (dy & 1)) { /* ((dx%2)?2:0)+((dy%2)?1:0) */ |
234 |
|
case 0: |
235 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
236 |
|
break; |
237 |
|
case 1: |
238 |
|
interpolate8x8_halfpel_v_add(dst, src, stride, rounding); |
239 |
|
break; |
240 |
|
case 2: |
241 |
|
interpolate8x8_halfpel_h_add(dst, src, stride, rounding); |
242 |
|
break; |
243 |
|
default: |
244 |
|
interpolate8x8_halfpel_hv_add(dst, src, stride, rounding); |
245 |
|
break; |
246 |
|
} |
247 |
|
} |
248 |
|
|
249 |
static __inline void |
static __inline void |
250 |
interpolate16x16_switch(uint8_t * const cur, |
interpolate16x16_switch(uint8_t * const cur, |
262 |
interpolate8x8_switch(cur, refn, x+8, y+8, dx, dy, stride, rounding); |
interpolate8x8_switch(cur, refn, x+8, y+8, dx, dy, stride, rounding); |
263 |
} |
} |
264 |
|
|
265 |
|
static __inline void |
266 |
|
interpolate16x16_add_switch(uint8_t * const cur, |
267 |
|
const uint8_t * const refn, |
268 |
|
const uint32_t x, |
269 |
|
const uint32_t y, |
270 |
|
const int32_t dx, |
271 |
|
const int dy, |
272 |
|
const uint32_t stride, |
273 |
|
const uint32_t rounding) |
274 |
|
{ |
275 |
|
interpolate8x8_add_switch(cur, refn, x, y, dx, dy, stride, rounding); |
276 |
|
interpolate8x8_add_switch(cur, refn, x+8, y, dx, dy, stride, rounding); |
277 |
|
interpolate8x8_add_switch(cur, refn, x, y+8, dx, dy, stride, rounding); |
278 |
|
interpolate8x8_add_switch(cur, refn, x+8, y+8, dx, dy, stride, rounding); |
279 |
|
} |
280 |
|
|
281 |
static __inline void |
static __inline void |
282 |
interpolate32x32_switch(uint8_t * const cur, |
interpolate32x32_switch(uint8_t * const cur, |
294 |
interpolate16x16_switch(cur, refn, x+16, y+16, dx, dy, stride, rounding); |
interpolate16x16_switch(cur, refn, x+16, y+16, dx, dy, stride, rounding); |
295 |
} |
} |
296 |
|
|
297 |
|
static __inline void |
298 |
|
interpolate32x32_add_switch(uint8_t * const cur, |
299 |
|
const uint8_t * const refn, |
300 |
|
const uint32_t x, |
301 |
|
const uint32_t y, |
302 |
|
const int32_t dx, |
303 |
|
const int dy, |
304 |
|
const uint32_t stride, |
305 |
|
const uint32_t rounding) |
306 |
|
{ |
307 |
|
interpolate16x16_add_switch(cur, refn, x, y, dx, dy, stride, rounding); |
308 |
|
interpolate16x16_add_switch(cur, refn, x+16, y, dx, dy, stride, rounding); |
309 |
|
interpolate16x16_add_switch(cur, refn, x, y+16, dx, dy, stride, rounding); |
310 |
|
interpolate16x16_add_switch(cur, refn, x+16, y+16, dx, dy, stride, rounding); |
311 |
|
} |
312 |
|
|
313 |
static __inline uint8_t * |
static __inline uint8_t * |
314 |
interpolate8x8_switch2(uint8_t * const buffer, |
interpolate8x8_switch2(uint8_t * const buffer, |
339 |
return buffer; |
return buffer; |
340 |
} |
} |
341 |
|
|
|
static __inline void interpolate8x8_quarterpel(uint8_t * const cur, |
|
|
uint8_t * const refn, |
|
|
uint8_t * const refh, |
|
|
uint8_t * const refv, |
|
|
uint8_t * const refhv, |
|
|
const uint32_t x, const uint32_t y, |
|
|
const int32_t dx, const int dy, |
|
|
const uint32_t stride, |
|
|
const uint32_t rounding) |
|
|
{ |
|
|
const int32_t xRef = x*4 + dx; |
|
|
const int32_t yRef = y*4 + dy; |
|
|
|
|
|
uint8_t *src, *dst; |
|
|
uint8_t *halfpel_h, *halfpel_v, *halfpel_hv; |
|
|
int32_t x_int, y_int, x_frac, y_frac; |
|
|
|
|
|
x_int = xRef/4; |
|
|
if (xRef < 0 && xRef % 4) |
|
|
x_int--; |
|
|
|
|
|
x_frac = xRef - (4*x_int); |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
|
|
|
|
y_frac = yRef - (4*y_int); |
|
|
|
|
|
src = refn + y_int * stride + x_int; |
|
|
halfpel_h = refh; |
|
|
halfpel_v = refv; |
|
|
halfpel_hv = refhv; |
|
|
|
|
|
dst = cur + y * stride + x; |
|
|
|
|
|
switch((y_frac << 2) | (x_frac)) { |
|
|
|
|
|
case 0: |
|
|
transfer8x8_copy(dst, src, stride); |
|
|
break; |
|
|
|
|
|
case 1: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 2: |
|
|
interpolate8x8_lowpass_h(dst, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 3: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 4: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 5: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 6: |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 7: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 8: |
|
|
interpolate8x8_lowpass_v(dst, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 9: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_lowpass_v(dst, halfpel_v, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 10: |
|
|
interpolate8x8_lowpass_hv(dst, halfpel_h, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 11: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_lowpass_v(dst, halfpel_v, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 12: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 13: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_v+stride, halfpel_hv, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 14: |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 15: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_hv, halfpel_v + stride, stride, rounding, 8); |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
static __inline void interpolate16x16_quarterpel(uint8_t * const cur, |
|
|
uint8_t * const refn, |
|
|
uint8_t * const refh, |
|
|
uint8_t * const refv, |
|
|
uint8_t * const refhv, |
|
|
const uint32_t x, const uint32_t y, |
|
|
const int32_t dx, const int dy, |
|
|
const uint32_t stride, |
|
|
const uint32_t rounding) |
|
|
{ |
|
|
const int32_t xRef = x*4 + dx; |
|
|
const int32_t yRef = y*4 + dy; |
|
|
|
|
|
uint8_t *src, *dst; |
|
|
uint8_t *halfpel_h, *halfpel_v, *halfpel_hv; |
|
|
int32_t x_int, y_int, x_frac, y_frac; |
|
|
|
|
|
x_int = xRef/4; |
|
|
if (xRef < 0 && xRef % 4) |
|
|
x_int--; |
|
|
|
|
|
x_frac = xRef - (4*x_int); |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
|
|
|
|
y_frac = yRef - (4*y_int); |
|
|
|
|
|
src = refn + y_int * stride + x_int; |
|
|
halfpel_h = refh; |
|
|
halfpel_v = refv; |
|
|
halfpel_hv = refhv; |
|
|
|
|
|
dst = cur + y * stride + x; |
|
|
|
|
|
switch((y_frac << 2) | (x_frac)) { |
|
|
|
|
|
case 0: |
|
|
transfer16x16_copy(dst, src, stride); |
|
|
break; |
|
|
|
|
|
case 1: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 2: |
|
|
interpolate16x16_lowpass_h(dst, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 3: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 4: |
|
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 5: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8, src + 8, halfpel_h+8, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride, src + 8*stride, halfpel_h+8*stride, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); |
|
|
|
|
|
interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 6: |
|
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 7: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src+1, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8, src+1 + 8, halfpel_h+8, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride, src+1 + 8*stride, halfpel_h+8*stride, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); |
|
|
|
|
|
interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 8: |
|
|
interpolate16x16_lowpass_v(dst, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 9: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8, src + 8, halfpel_h+8, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride, src + 8*stride, halfpel_h+8*stride, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); |
|
|
interpolate16x16_lowpass_v(dst, halfpel_v, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 10: |
|
|
interpolate16x16_lowpass_hv(dst, halfpel_h, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 11: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src+1, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8, src+1 + 8, halfpel_h+8, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride, src+1 + 8*stride, halfpel_h+8*stride, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); |
|
|
interpolate16x16_lowpass_v(dst, halfpel_v, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 12: |
|
|
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 13: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8, src + 8, halfpel_h+8, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride, src + 8*stride, halfpel_h+8*stride, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); |
|
|
|
|
|
interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 14: |
|
|
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
|
|
|
case 15: |
|
|
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(halfpel_v, src+1, halfpel_h, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8, src+1 + 8, halfpel_h+8, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride, src+1 + 8*stride, halfpel_h+8*stride, stride, rounding, 9); |
|
|
interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); |
|
|
|
|
|
interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 8); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 8); |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
342 |
#endif |
#endif |