1 |
#ifndef _INTERPOLATE8X8_H_ |
/***************************************************************************** |
2 |
#define _INTERPOLATE8X8_H_ |
* |
3 |
|
* XVID MPEG-4 VIDEO CODEC |
4 |
|
* - 8x8 block-based halfpel interpolation - headers |
5 |
|
* |
6 |
|
* Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 |
|
* |
8 |
|
* This program is an implementation of a part of one or more MPEG-4 |
9 |
|
* Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
10 |
|
* to use this software module in hardware or software products are |
11 |
|
* advised that its use may infringe existing patents or copyrights, and |
12 |
|
* any such use would be at such party's own risk. The original |
13 |
|
* developer of this software module and his/her company, and subsequent |
14 |
|
* editors and their companies, will have no liability for use of this |
15 |
|
* software or modifications or derivatives thereof. |
16 |
|
* |
17 |
|
* This program is free software; you can redistribute it and/or modify |
18 |
|
* it under the terms of the GNU General Public License as published by |
19 |
|
* the Free Software Foundation; either version 2 of the License, or |
20 |
|
* (at your option) any later version. |
21 |
|
* |
22 |
|
* This program is distributed in the hope that it will be useful, |
23 |
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
24 |
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
25 |
|
* GNU General Public License for more details. |
26 |
|
* |
27 |
|
* You should have received a copy of the GNU General Public License |
28 |
|
* along with this program; if not, write to the Free Software |
29 |
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
30 |
|
* |
31 |
|
****************************************************************************/ |
32 |
|
|
33 |
#include "../utils/mem_transfer.h" |
#include "../utils/mem_transfer.h" |
34 |
|
|
38 |
const uint32_t rounding); |
const uint32_t rounding); |
39 |
typedef INTERPOLATE8X8 *INTERPOLATE8X8_PTR; |
typedef INTERPOLATE8X8 *INTERPOLATE8X8_PTR; |
40 |
|
|
|
typedef void (INTERPOLATE8X8_AVG2) (uint8_t *dst, |
|
|
const uint8_t *src1, |
|
|
const uint8_t *src2, |
|
|
const uint32_t stride, |
|
|
const uint32_t rounding); |
|
|
typedef INTERPOLATE8X8_AVG2 *INTERPOLATE8X8_AVG2_PTR; |
|
|
|
|
|
typedef void (INTERPOLATE8X8_AVG4) (uint8_t *dst, |
|
|
const uint8_t *src1, |
|
|
const uint8_t *src2, |
|
|
const uint8_t *src3, |
|
|
const uint8_t *src4, |
|
|
const uint32_t stride, |
|
|
const uint32_t rounding); |
|
|
typedef INTERPOLATE8X8_AVG4 *INTERPOLATE8X8_AVG4_PTR; |
|
|
|
|
|
typedef void (INTERPOLATE_LOWPASS) (uint8_t *dst, |
|
|
uint8_t *src, |
|
|
int32_t stride, |
|
|
int32_t rounding); |
|
|
|
|
|
typedef INTERPOLATE_LOWPASS *INTERPOLATE_LOWPASS_PTR; |
|
|
|
|
|
typedef void (INTERPOLATE_LOWPASS_HV) (uint8_t *dst1, |
|
|
uint8_t *dst2, |
|
|
uint8_t *src, |
|
|
int32_t stride, |
|
|
int32_t rounding); |
|
|
|
|
|
typedef INTERPOLATE_LOWPASS_HV *INTERPOLATE_LOWPASS_HV_PTR; |
|
|
|
|
|
typedef void (INTERPOLATE8X8_6TAP_LOWPASS) (uint8_t *dst, |
|
|
uint8_t *src, |
|
|
int32_t stride, |
|
|
int32_t rounding); |
|
|
|
|
|
typedef INTERPOLATE8X8_6TAP_LOWPASS *INTERPOLATE8X8_6TAP_LOWPASS_PTR; |
|
|
|
|
41 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
42 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
43 |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
extern INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
44 |
|
|
|
extern INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
|
|
extern INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
|
|
|
|
|
extern INTERPOLATE_LOWPASS_PTR interpolate8x8_lowpass_h; |
|
|
extern INTERPOLATE_LOWPASS_PTR interpolate8x8_lowpass_v; |
|
|
|
|
|
extern INTERPOLATE_LOWPASS_PTR interpolate16x16_lowpass_h; |
|
|
extern INTERPOLATE_LOWPASS_PTR interpolate16x16_lowpass_v; |
|
|
|
|
|
extern INTERPOLATE_LOWPASS_HV_PTR interpolate8x8_lowpass_hv; |
|
|
extern INTERPOLATE_LOWPASS_HV_PTR interpolate16x16_lowpass_hv; |
|
|
|
|
|
extern INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_h; |
|
|
extern INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_v; |
|
|
|
|
45 |
INTERPOLATE8X8 interpolate8x8_halfpel_h_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_h_c; |
46 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_c; |
47 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_c; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_c; |
62 |
INTERPOLATE8X8 interpolate8x8_halfpel_v_ia64; |
INTERPOLATE8X8 interpolate8x8_halfpel_v_ia64; |
63 |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_ia64; |
INTERPOLATE8X8 interpolate8x8_halfpel_hv_ia64; |
64 |
|
|
65 |
INTERPOLATE8X8_AVG2 interpolate8x8_avg2_c; |
void interpolate8x8_lowpass_h(uint8_t *dst, uint8_t *src, int32_t dst_stride, int32_t src_stride, int32_t rounding); |
66 |
INTERPOLATE8X8_AVG4 interpolate8x8_avg4_c; |
void interpolate8x8_lowpass_v(uint8_t *dst, uint8_t *src, int32_t dst_stride, int32_t src_stride, int32_t rounding); |
67 |
|
void interpolate8x8_lowpass_hv(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int32_t dst1_stride, int32_t dst2_stride, int32_t src_stride, int32_t rounding); |
68 |
INTERPOLATE8X8_AVG2 interpolate8x8_avg2_mmx; |
void interpolate8x8_bilinear2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int32_t dst_stride, int32_t src_stride, int32_t rounding); |
69 |
INTERPOLATE8X8_AVG4 interpolate8x8_avg4_mmx; |
void interpolate8x8_bilinear4(uint8_t *dst, uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4, int32_t stride, int32_t rounding); |
|
|
|
|
INTERPOLATE_LOWPASS interpolate8x8_lowpass_h_c; |
|
|
INTERPOLATE_LOWPASS interpolate8x8_lowpass_v_c; |
|
70 |
|
|
71 |
INTERPOLATE_LOWPASS interpolate16x16_lowpass_h_c; |
void interpolate8x8_c(uint8_t * const dst, |
72 |
INTERPOLATE_LOWPASS interpolate16x16_lowpass_v_c; |
const uint8_t * const src, |
73 |
|
const uint32_t x, |
74 |
INTERPOLATE_LOWPASS_HV interpolate8x8_lowpass_hv_c; |
const uint32_t y, |
75 |
INTERPOLATE_LOWPASS_HV interpolate16x16_lowpass_hv_c; |
const uint32_t stride); |
|
|
|
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_h_c; |
|
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_v_c; |
|
|
|
|
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_h_mmx; |
|
|
INTERPOLATE8X8_6TAP_LOWPASS interpolate8x8_6tap_lowpass_v_mmx; |
|
76 |
|
|
77 |
static __inline void |
static __inline void |
78 |
interpolate8x8_switch(uint8_t * const cur, |
interpolate8x8_switch(uint8_t * const cur, |
121 |
} |
} |
122 |
} |
} |
123 |
|
|
|
static __inline uint8_t * |
|
|
interpolate8x8_switch2(uint8_t * const buffer, |
|
|
const uint8_t * const refn, |
|
|
const uint32_t x, |
|
|
const uint32_t y, |
|
|
const int32_t dx, |
|
|
const int dy, |
|
|
const uint32_t stride, |
|
|
const uint32_t rounding) |
|
|
{ |
|
|
int32_t ddx, ddy; |
|
|
|
|
|
switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) |
|
|
{ |
|
|
case 0: |
|
|
return (uint8_t *)refn + (int)((y + dy/2) * stride + x + dx/2); |
|
|
|
|
|
case 1: |
|
|
ddx = dx / 2; |
|
|
ddy = (dy - 1) / 2; |
|
|
interpolate8x8_halfpel_v(buffer, |
|
|
refn + (int)((y + ddy) * stride + x + ddx), stride, |
|
|
rounding); |
|
|
break; |
|
|
|
|
|
case 2: |
|
|
ddx = (dx - 1) / 2; |
|
|
ddy = dy / 2; |
|
|
interpolate8x8_halfpel_h(buffer, |
|
|
refn + (int)((y + ddy) * stride + x + ddx), stride, |
|
|
rounding); |
|
|
break; |
|
|
|
|
|
default: |
|
|
ddx = (dx - 1) / 2; |
|
|
ddy = (dy - 1) / 2; |
|
|
interpolate8x8_halfpel_hv(buffer, |
|
|
refn + (int)((y + ddy) * stride + x + ddx), stride, |
|
|
rounding); |
|
|
break; |
|
|
} |
|
|
return buffer; |
|
|
} |
|
|
|
|
124 |
|
|
125 |
static __inline void interpolate8x8_quarterpel(uint8_t * const cur, |
static __inline void interpolate8x8_quarterpel(uint8_t * const cur, |
126 |
uint8_t * const refn, |
uint8_t * const refn, |
|
uint8_t * const refh, |
|
|
uint8_t * const refv, |
|
|
uint8_t * const refhv, |
|
127 |
const uint32_t x, const uint32_t y, |
const uint32_t x, const uint32_t y, |
128 |
const int32_t dx, const int dy, |
const int32_t dx, const int dy, |
129 |
const uint32_t stride, |
const uint32_t stride, |
133 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = y*4 + dy; |
134 |
|
|
135 |
uint8_t *src, *dst; |
uint8_t *src, *dst; |
|
uint8_t *halfpel_h, *halfpel_v, *halfpel_hv; |
|
136 |
int32_t x_int, y_int, x_frac, y_frac; |
int32_t x_int, y_int, x_frac, y_frac; |
137 |
|
|
138 |
x_int = xRef/4; |
uint8_t halfpel_h[72]; |
139 |
if (xRef < 0 && xRef % 4) |
uint8_t halfpel_v[64]; |
140 |
x_int--; |
uint8_t halfpel_hv[64]; |
|
|
|
|
x_frac = xRef - (4*x_int); |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
|
|
|
|
y_frac = yRef - (4*y_int); |
|
|
|
|
|
src = refn + y_int * stride + x_int; |
|
|
halfpel_h = refh; |
|
|
halfpel_v = refv; |
|
|
halfpel_hv = refhv; |
|
|
|
|
|
dst = cur + y * stride + x; |
|
|
|
|
|
switch((y_frac << 2) | (x_frac)) { |
|
|
|
|
|
case 0: |
|
|
transfer8x8_copy(dst, src, stride); |
|
|
break; |
|
|
|
|
|
case 1: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 2: |
|
|
interpolate8x8_lowpass_h(dst, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 3: |
|
|
interpolate8x8_lowpass_h(halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 4: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 5: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 6: |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 7: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 8: |
|
|
interpolate8x8_lowpass_v(dst, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 9: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 10: |
|
|
interpolate8x8_lowpass_hv(dst, halfpel_h, src, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 11: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 12: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 13: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src, stride, rounding); |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg4(dst, src+stride, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 14: |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
|
|
|
case 15: |
|
|
interpolate8x8_lowpass_v(halfpel_v, src+1, stride, rounding); |
|
|
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
|
|
interpolate8x8_avg4(dst, src+stride+1, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
static __inline void interpolate16x16_quarterpel(uint8_t * const cur, |
|
|
uint8_t * const refn, |
|
|
uint8_t * const refh, |
|
|
uint8_t * const refv, |
|
|
uint8_t * const refhv, |
|
|
const uint32_t x, const uint32_t y, |
|
|
const int32_t dx, const int dy, |
|
|
const uint32_t stride, |
|
|
const uint32_t rounding) |
|
|
{ |
|
|
const int32_t xRef = x*4 + dx; |
|
|
const int32_t yRef = y*4 + dy; |
|
|
|
|
|
uint8_t *src, *dst; |
|
|
uint8_t *halfpel_h, *halfpel_v, *halfpel_hv; |
|
|
int32_t x_int, y_int, x_frac, y_frac; |
|
141 |
|
|
142 |
x_int = xRef/4; |
x_int = xRef/4; |
143 |
if (xRef < 0 && xRef % 4) |
if (xRef < 0 && xRef % 4) |
152 |
y_frac = yRef - (4*y_int); |
y_frac = yRef - (4*y_int); |
153 |
|
|
154 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * stride + x_int; |
|
halfpel_h = refh; |
|
|
halfpel_v = refv; |
|
|
halfpel_hv = refhv; |
|
|
|
|
155 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
156 |
|
|
157 |
switch((y_frac << 2) | (x_frac)) { |
switch((y_frac << 2) | (x_frac)) { |
158 |
|
|
159 |
case 0: |
case 0: |
160 |
transfer8x8_copy(dst, src, stride); |
transfer8x8_copy(dst, src, stride); |
|
transfer8x8_copy(dst+8, src+8, stride); |
|
|
transfer8x8_copy(dst+8*stride, src+8*stride, stride); |
|
|
transfer8x8_copy(dst+8*stride+8, src+8*stride+8, stride); |
|
161 |
break; |
break; |
162 |
|
|
163 |
case 1: |
case 1: |
164 |
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_h(halfpel_h, src, 8, stride, rounding); |
165 |
interpolate8x8_avg2(dst, src, halfpel_h, stride, rounding); |
interpolate8x8_bilinear2(dst, src, halfpel_h, stride, stride, rounding); |
|
interpolate8x8_avg2(dst+8, src+8, halfpel_h+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_h+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, stride, rounding); |
|
166 |
break; |
break; |
167 |
|
|
168 |
case 2: |
case 2: |
169 |
interpolate16x16_lowpass_h(dst, src, stride, rounding); |
interpolate8x8_lowpass_h(dst, src, stride, stride, rounding); |
170 |
break; |
break; |
171 |
|
|
172 |
case 3: |
case 3: |
173 |
interpolate16x16_lowpass_h(halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_h(halfpel_h, src, 8, stride, rounding); |
174 |
interpolate8x8_avg2(dst, src + 1, halfpel_h, stride, rounding); |
interpolate8x8_bilinear2(dst, src+1, halfpel_h, stride, stride, rounding); |
|
interpolate8x8_avg2(dst+8, src + 8 + 1, halfpel_h+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, src + 8*stride + 1, halfpel_h+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8 + 1, halfpel_h+8*stride+8, stride, rounding); |
|
175 |
break; |
break; |
176 |
|
|
177 |
case 4: |
case 4: |
178 |
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src, 8, stride, rounding); |
179 |
interpolate8x8_avg2(dst, src, halfpel_v, stride, rounding); |
interpolate8x8_bilinear2(dst, src, halfpel_v, stride, stride, rounding); |
|
interpolate8x8_avg2(dst+8, src+8, halfpel_v+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, src+8*stride, halfpel_v+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+8*stride+8, halfpel_v+8*stride+8, stride, rounding); |
|
180 |
break; |
break; |
181 |
|
|
182 |
case 5: |
case 5: |
183 |
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src, 8, stride, rounding); |
184 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
185 |
interpolate8x8_avg4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear4(dst, src, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
|
interpolate8x8_avg4(dst+8, src+8, halfpel_h+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride, src+8*stride, halfpel_h+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride+8, src+8*stride+8, halfpel_h+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
186 |
break; |
break; |
187 |
|
|
188 |
case 6: |
case 6: |
189 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
190 |
interpolate8x8_avg2(dst, halfpel_h, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear2(dst, halfpel_h, halfpel_hv, stride, 8, 1-rounding); |
|
interpolate8x8_avg2(dst+8, halfpel_h+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_h+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_h+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
191 |
break; |
break; |
192 |
|
|
193 |
case 7: |
case 7: |
194 |
interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src+1, 8, stride, 16-rounding); |
195 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
196 |
interpolate8x8_avg4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear4(dst, src+1, halfpel_h, halfpel_v, halfpel_hv, stride, rounding); |
|
interpolate8x8_avg4(dst+8, src+8+1, halfpel_h+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride, src+1+8*stride, halfpel_h+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride+8, src+1+8*stride+8, halfpel_h+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
197 |
break; |
break; |
198 |
|
|
199 |
case 8: |
case 8: |
200 |
interpolate16x16_lowpass_v(dst, src, stride, rounding); |
interpolate8x8_lowpass_v(dst, src, stride, stride, rounding); |
201 |
break; |
break; |
202 |
|
|
203 |
case 9: |
case 9: |
204 |
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src, 8, stride, 16-rounding); |
205 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
206 |
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear2(dst, halfpel_v, halfpel_hv, stride, 8, rounding); |
|
interpolate8x8_avg2(dst+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
207 |
break; |
break; |
208 |
|
|
209 |
case 10: |
case 10: |
210 |
interpolate16x16_lowpass_hv(dst, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(dst, halfpel_h, src, stride, 8, stride, rounding); |
211 |
break; |
break; |
212 |
|
|
213 |
case 11: |
case 11: |
214 |
interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src+1, 8, stride, 16-rounding); |
215 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
216 |
interpolate8x8_avg2(dst, halfpel_v, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear2(dst, halfpel_v, halfpel_hv, stride, 8, rounding); |
|
interpolate8x8_avg2(dst+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
217 |
break; |
break; |
218 |
|
|
219 |
case 12: |
case 12: |
220 |
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src, 8, stride, rounding); |
221 |
interpolate8x8_avg2(dst, src+stride, halfpel_v, stride, rounding); |
interpolate8x8_bilinear2(dst, src+stride, halfpel_v, stride, stride, rounding); |
|
interpolate8x8_avg2(dst+8, src+stride+8, halfpel_v+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, src+stride+8*stride, halfpel_v+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, src+stride+8*stride+8, halfpel_v+8*stride+8, stride, rounding); |
|
222 |
break; |
break; |
223 |
|
|
224 |
case 13: |
case 13: |
225 |
interpolate16x16_lowpass_v(halfpel_v, src, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src, 8, stride, rounding); |
226 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
227 |
interpolate8x8_avg4(dst, src+stride, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear4(dst, src+stride, halfpel_h+8, halfpel_v, halfpel_hv, stride, rounding); |
|
interpolate8x8_avg4(dst+8, src+stride+8, halfpel_h+stride+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride, src+stride+8*stride, halfpel_h+stride+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride+8, src+stride+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
228 |
break; |
break; |
229 |
|
|
230 |
case 14: |
case 14: |
231 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
232 |
interpolate8x8_avg2(dst, halfpel_h+stride, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear2(dst, halfpel_h+8, halfpel_hv, stride, 8, rounding); |
|
interpolate8x8_avg2(dst+8, halfpel_h+stride+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride, halfpel_h+stride+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg2(dst+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
233 |
break; |
break; |
234 |
|
|
235 |
case 15: |
case 15: |
236 |
interpolate16x16_lowpass_v(halfpel_v, src+1, stride, rounding); |
interpolate8x8_lowpass_v(halfpel_v, src+1, 8, stride, rounding); |
237 |
interpolate16x16_lowpass_hv(halfpel_hv, halfpel_h, src, stride, rounding); |
interpolate8x8_lowpass_hv(halfpel_hv, halfpel_h, src, 8, 8, stride, rounding); |
238 |
interpolate8x8_avg4(dst, src+stride+1, halfpel_h+stride, halfpel_v, halfpel_hv, stride, rounding); |
interpolate8x8_bilinear4(dst, src+stride+1, halfpel_h+8, halfpel_v, halfpel_hv, stride, rounding); |
|
interpolate8x8_avg4(dst+8, src+stride+1+8, halfpel_h+stride+8, halfpel_v+8, halfpel_hv+8, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride, src+stride+1+8*stride, halfpel_h+stride+8*stride, halfpel_v+8*stride, halfpel_hv+8*stride, stride, rounding); |
|
|
interpolate8x8_avg4(dst+8*stride+8, src+stride+1+8*stride+8, halfpel_h+stride+8*stride+8, halfpel_v+8*stride+8, halfpel_hv+8*stride+8, stride, rounding); |
|
|
break; |
|
|
default: |
|
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x, y, dx, dy, stride, rounding); |
|
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x+8, y, dx, dy, stride, rounding); |
|
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x, y+8, dx, dy, stride, rounding); |
|
|
interpolate8x8_quarterpel(cur, refn, refh, refv, refhv, x+8, y+8, dx, dy, stride, rounding); |
|
|
return; |
|
239 |
break; |
break; |
240 |
} |
} |
241 |
} |
} |
|
|
|
|
#endif |
|