--- interpolate8x8.h 2002/10/13 13:52:01 1.5.2.4 +++ interpolate8x8.h 2002/12/28 15:40:41 1.5.2.10 @@ -13,7 +13,8 @@ const uint8_t *src1, const uint8_t *src2, const uint32_t stride, - const uint32_t rounding); + const uint32_t rounding, + const uint32_t height); typedef INTERPOLATE8X8_AVG2 *INTERPOLATE8X8_AVG2_PTR; typedef void (INTERPOLATE8X8_AVG4) (uint8_t *dst, @@ -82,6 +83,10 @@ INTERPOLATE8X8 interpolate8x8_halfpel_v_3dn; INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dn; +INTERPOLATE8X8 interpolate8x8_halfpel_h_3dne; +INTERPOLATE8X8 interpolate8x8_halfpel_v_3dne; +INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dne; + INTERPOLATE8X8 interpolate8x8_halfpel_h_ia64; INTERPOLATE8X8 interpolate8x8_halfpel_v_ia64; INTERPOLATE8X8 interpolate8x8_halfpel_hv_ia64; @@ -154,6 +159,41 @@ } } + +static __inline void +interpolate16x16_switch(uint8_t * const cur, + const uint8_t * const refn, + const uint32_t x, + const uint32_t y, + const int32_t dx, + const int dy, + const uint32_t stride, + const uint32_t rounding) +{ + interpolate8x8_switch(cur, refn, x, y, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+8, y, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x, y+8, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+8, y+8, dx, dy, stride, rounding); +} + + +static __inline void +interpolate32x32_switch(uint8_t * const cur, + const uint8_t * const refn, + const uint32_t x, + const uint32_t y, + const int32_t dx, + const int dy, + const uint32_t stride, + const uint32_t rounding) +{ + interpolate16x16_switch(cur, refn, x, y, dx, dy, stride, rounding); + interpolate16x16_switch(cur, refn, x+16, y, dx, dy, stride, rounding); + interpolate16x16_switch(cur, refn, x, y+16, dx, dy, stride, rounding); + interpolate16x16_switch(cur, refn, x+16, y+16, dx, dy, stride, rounding); +} + + static __inline uint8_t * interpolate8x8_switch2(uint8_t * const buffer, const uint8_t * const refn, @@ -198,7 +238,6 @@ return buffer; } - static __inline void interpolate8x8_quarterpel(uint8_t * const cur, uint8_t * const refn, uint8_t * const refh, @@ -243,7 +282,7 @@ case 1: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding); + interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 8); break; case 2: @@ -252,29 +291,31 @@ case 3: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding); + interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 8); break; case 4: interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 8); break; case 5: - interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); + interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); + interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 8); break; case 6: interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding); + interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 8); break; case 7: - interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); - interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); + interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); + interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 8); break; case 8: @@ -282,9 +323,9 @@ break; case 9: - interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); + interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); + interpolate8x8_lowpass_v(dst, halfpel_v, stride, rounding); break; case 10: @@ -292,36 +333,37 @@ break; case 11: - interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); - interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); + interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); + interpolate8x8_lowpass_v(dst, halfpel_v, stride, rounding); break; case 12: interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 8); break; case 13: - interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src+stride, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); + interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); + interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_v+stride, halfpel_hv, stride, rounding, 8); break; case 14: interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding); + interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 8); break; case 15: - interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); - interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src+stride+1, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); + interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); + interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v + stride, stride, rounding, 8); break; } } - static __inline void interpolate16x16_quarterpel(uint8_t * const cur, uint8_t * const refn, uint8_t * const refh, @@ -361,18 +403,15 @@ switch((y_frac << 2) | (x_frac)) { case 0: - transfer8x8_copy(dst, src, stride); - transfer8x8_copy(dst+8, src+8, stride); - transfer8x8_copy(dst+8*stride, src+8*stride, stride); - transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); + transfer16x16_copy(dst, src, stride); break; case 1: interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding); - interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding); + interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 8); break; case 2: @@ -381,44 +420,54 @@ case 3: interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding); - interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding); + interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding, 8); break; case 4: interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding); + interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 5: - interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); - interpolate8x8_avg4(dst+8, src+8, halfpel_h+8, halfpel_v+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg4(dst+8*stride, src+8*stride, halfpel_h+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg4(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8, src + 8, halfpel_h+8, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride, src + 8*stride, halfpel_h+8*stride, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + + interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 6: interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding); - interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 8); break; case 7: - interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); - interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); - interpolate8x8_avg4(dst+8, src+8+1, halfpel_h+8, halfpel_v+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg4(dst+8*stride, src+1+8*stride, halfpel_h+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg4(dst+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src+1, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8, src+1 + 8, halfpel_h+8, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride, src+1 + 8*stride, halfpel_h+8*stride, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + + interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 8: @@ -426,12 +475,12 @@ break; case 9: - interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); - interpolate8x8_avg2(dst+8, halfpel_v+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8, src + 8, halfpel_h+8, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride, src + 8*stride, halfpel_h+8*stride, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + interpolate16x16_lowpass_v(dst, halfpel_v, stride, rounding); break; case 10: @@ -439,53 +488,56 @@ break; case 11: - interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); - interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); - interpolate8x8_avg2(dst+8, halfpel_v+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src+1, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8, src+1 + 8, halfpel_h+8, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride, src+1 + 8*stride, halfpel_h+8*stride, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + interpolate16x16_lowpass_v(dst, halfpel_v, stride, rounding); break; case 12: interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding); + interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 13: - interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src+stride, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); - interpolate8x8_avg4(dst+8, src+stride+8, halfpel_h+stride+8, halfpel_v+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg4(dst+8*stride, src+stride+8*stride, halfpel_h+stride+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg4(dst+8*stride+8, src+stride+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8, src + 8, halfpel_h+8, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride, src + 8*stride, halfpel_h+8*stride, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + + interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 8); break; case 14: interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding); - interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); + interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 8); break; case 15: - interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); - interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg4(dst, src+stride+1, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); - interpolate8x8_avg4(dst+8, src+stride+1+8, halfpel_h+stride+8, halfpel_v+8, halfpel_hv+8, stride, rounding); - interpolate8x8_avg4(dst+8*stride, src+stride+1+8*stride, halfpel_h+stride+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); - interpolate8x8_avg4(dst+8*stride+8, src+stride+1+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); - break; - default: - interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x, y, dx, dy, stride, rounding); - interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x+8, y, dx, dy, stride, rounding); - interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x, y+8, dx, dy, stride, rounding); - interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x+8, y+8, dx, dy, stride, rounding); - return; + interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); + interpolate8x8_avg2(halfpel_v, src+1, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8, src+1 + 8, halfpel_h+8, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride, src+1 + 8*stride, halfpel_h+8*stride, stride, rounding, 9); + interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + + interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 8); break; } }