--- interpolate8x8.h 2002/11/20 19:53:12 1.5.2.6 +++ interpolate8x8.h 2003/06/09 13:54:07 1.10.2.2 @@ -1,3 +1,28 @@ +/***************************************************************************** + * + * XVID MPEG-4 VIDEO CODEC + * - Interpolation related header - + * + * Copyright(C) 2001-2003 Peter Ross + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * $Id: interpolate8x8.h,v 1.10.2.2 2003/06/09 13:54:07 edgomez Exp $ + * + ****************************************************************************/ + #ifndef _INTERPOLATE8X8_H_ #define _INTERPOLATE8X8_H_ @@ -83,6 +108,10 @@ INTERPOLATE8X8 interpolate8x8_halfpel_v_3dn; INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dn; +INTERPOLATE8X8 interpolate8x8_halfpel_h_3dne; +INTERPOLATE8X8 interpolate8x8_halfpel_v_3dne; +INTERPOLATE8X8 interpolate8x8_halfpel_hv_3dne; + INTERPOLATE8X8 interpolate8x8_halfpel_h_ia64; INTERPOLATE8X8 interpolate8x8_halfpel_v_ia64; INTERPOLATE8X8 interpolate8x8_halfpel_hv_ia64; @@ -120,7 +149,7 @@ { int32_t ddx, ddy; - switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) + switch (((dx & 1) << 1) + (dy & 1)) /* ((dx%2)?2:0)+((dy%2)?1:0) */ { case 0: ddx = dx / 2; @@ -155,6 +184,41 @@ } } + +static __inline void +interpolate16x16_switch(uint8_t * const cur, + const uint8_t * const refn, + const uint32_t x, + const uint32_t y, + const int32_t dx, + const int dy, + const uint32_t stride, + const uint32_t rounding) +{ + interpolate8x8_switch(cur, refn, x, y, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+8, y, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x, y+8, dx, dy, stride, rounding); + interpolate8x8_switch(cur, refn, x+8, y+8, dx, dy, stride, rounding); +} + + +static __inline void +interpolate32x32_switch(uint8_t * const cur, + const uint8_t * const refn, + const uint32_t x, + const uint32_t y, + const int32_t dx, + const int dy, + const uint32_t stride, + const uint32_t rounding) +{ + interpolate16x16_switch(cur, refn, x, y, dx, dy, stride, rounding); + interpolate16x16_switch(cur, refn, x+16, y, dx, dy, stride, rounding); + interpolate16x16_switch(cur, refn, x, y+16, dx, dy, stride, rounding); + interpolate16x16_switch(cur, refn, x+16, y+16, dx, dy, stride, rounding); +} + + static __inline uint8_t * interpolate8x8_switch2(uint8_t * const buffer, const uint8_t * const refn, @@ -167,7 +231,7 @@ { int32_t ddx, ddy; - switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) + switch (((dx & 1) << 1) + (dy & 1)) /* ((dx%2)?2:0)+((dy%2)?1:0) */ { case 0: return (uint8_t *)refn + (int)((y + dy/2) * stride + x + dx/2); @@ -243,7 +307,7 @@ case 1: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 8); break; case 2: @@ -252,31 +316,31 @@ case 3: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 9); + interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 8); break; case 4: interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 9); + interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 8); break; case 5: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 8); break; case 6: interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 8); break; case 7: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding, 8); break; case 8: @@ -301,26 +365,26 @@ case 12: interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 9); + interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 8); break; case 13: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); interpolate8x8_avg2(halfpel_v, src, halfpel_h, stride, rounding, 9); interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_v+stride, halfpel_hv, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_v+stride, halfpel_hv, stride, rounding, 8); break; case 14: interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 8); break; case 15: interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); interpolate8x8_avg2(halfpel_v, src + 1, halfpel_h, stride, rounding, 9); interpolate8x8_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_hv, halfpel_v + stride, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v + stride, stride, rounding, 8); break; } } @@ -364,18 +428,15 @@ switch((y_frac << 2) | (x_frac)) { case 0: - transfer8x8_copy(dst, src, stride); - transfer8x8_copy(dst+8, src+8, stride); - transfer8x8_copy(dst+8*stride, src+8*stride, stride); - transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); + transfer16x16_copy(dst, src, stride); break; case 1: interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 9); - interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 8); break; case 2: @@ -384,18 +445,18 @@ case 3: interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 9); - interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding, 8); break; case 4: interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 9); - interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 5: @@ -406,18 +467,18 @@ interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 9); - interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 6: interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 9); - interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 8); break; case 7: @@ -428,10 +489,10 @@ interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 9); - interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 8: @@ -462,10 +523,10 @@ case 12: interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); - interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 9); - interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding, 8); + interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding, 8); break; case 13: @@ -476,18 +537,18 @@ interpolate8x8_avg2(halfpel_v+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 8); break; case 14: interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); - interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 9); - interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding, 8); break; case 15: @@ -498,11 +559,12 @@ interpolate8x8_avg2(halfpel_v+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, stride, rounding, 9); interpolate16x16_lowpass_v(halfpel_hv, halfpel_v, stride, rounding); - interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 9); - interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 9); + interpolate8x8_avg2(dst, halfpel_hv, halfpel_v+stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8, halfpel_hv+8, halfpel_v+stride+8, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride, halfpel_hv+8*stride, halfpel_v+stride+8*stride, stride, rounding, 8); + interpolate8x8_avg2(dst+8*stride+8, halfpel_hv+8*stride+8, halfpel_v+stride+8*stride+8, stride, rounding, 8); break; } } + #endif