1 |
/************************************************************************** |
/***************************************************************************** |
2 |
* |
* |
3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* 8x8 block-based halfpel interpolation |
* - 8x8 block-based halfpel interpolation - |
5 |
|
* |
6 |
|
* Copyright(C) 2001-2003 Peter Ross <pross@xvid.org> |
7 |
* |
* |
8 |
* This program is free software; you can redistribute it and/or modify |
* This program is free software; you can redistribute it and/or modify |
9 |
* it under the terms of the GNU General Public License as published by |
* it under the terms of the GNU General Public License as published by |
17 |
* |
* |
18 |
* You should have received a copy of the GNU General Public License |
* You should have received a copy of the GNU General Public License |
19 |
* along with this program; if not, write to the Free Software |
* along with this program; if not, write to the Free Software |
20 |
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
* |
|
|
*************************************************************************/ |
|
|
|
|
|
/************************************************************************** |
|
21 |
* |
* |
22 |
* History: |
* $Id$ |
23 |
* |
* |
24 |
* 05.10.2002 new bilinear and qpel interpolation code - Isibaar |
****************************************************************************/ |
|
* 27.12.2001 modified "compensate_halfpel" |
|
|
* 05.11.2001 initial version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
* |
|
|
*************************************************************************/ |
|
|
|
|
25 |
|
|
26 |
#include "../portab.h" |
#include "../portab.h" |
27 |
#include "../global.h" |
#include "../global.h" |
28 |
#include "interpolate8x8.h" |
#include "interpolate8x8.h" |
29 |
|
|
30 |
// function pointers |
/* function pointers */ |
31 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
32 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
33 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
34 |
|
|
35 |
|
INTERPOLATE8X8_PTR interpolate8x4_halfpel_h; |
36 |
|
INTERPOLATE8X8_PTR interpolate8x4_halfpel_v; |
37 |
|
INTERPOLATE8X8_PTR interpolate8x4_halfpel_hv; |
38 |
|
|
39 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_add; |
40 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_h_add; |
41 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v_add; |
42 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv_add; |
43 |
|
|
44 |
INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
45 |
INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
46 |
|
|
56 |
INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_h; |
INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_h; |
57 |
INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_v; |
INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_v; |
58 |
|
|
59 |
void interpolate8x8_avg2_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint32_t stride, const uint32_t rounding, const uint32_t height) |
void |
60 |
|
interpolate8x8_avg2_c(uint8_t * dst, const uint8_t * src1, const uint8_t *src2, const uint32_t stride, const uint32_t rounding, const uint32_t height) |
61 |
{ |
{ |
62 |
uint32_t i; |
uint32_t i; |
63 |
const int32_t round = 1 - rounding; |
const int32_t round = 1 - rounding; |
64 |
|
|
65 |
for(i = 0; i < height; i++) |
for(i = 0; i < height; i++) { |
|
{ |
|
66 |
dst[0] = (src1[0] + src2[0] + round) >> 1; |
dst[0] = (src1[0] + src2[0] + round) >> 1; |
67 |
dst[1] = (src1[1] + src2[1] + round) >> 1; |
dst[1] = (src1[1] + src2[1] + round) >> 1; |
68 |
dst[2] = (src1[2] + src2[2] + round) >> 1; |
dst[2] = (src1[2] + src2[2] + round) >> 1; |
78 |
} |
} |
79 |
} |
} |
80 |
|
|
81 |
|
void |
82 |
|
interpolate8x8_halfpel_add_c(uint8_t * const dst, const uint8_t * const src, const uint32_t stride, const uint32_t rounding) |
83 |
|
{ |
84 |
|
interpolate8x8_avg2_c(dst, dst, src, stride, 0, 8); |
85 |
|
} |
86 |
|
|
87 |
void interpolate8x8_avg4_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, const uint32_t stride, const uint32_t rounding) |
void interpolate8x8_avg4_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, const uint32_t stride, const uint32_t rounding) |
88 |
{ |
{ |
89 |
int32_t i; |
int32_t i; |
90 |
const int32_t round = 2 - rounding; |
const int32_t round = 2 - rounding; |
91 |
|
|
92 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 8; i++) { |
|
{ |
|
93 |
dst[0] = (src1[0] + src2[0] + src3[0] + src4[0] + round) >> 2; |
dst[0] = (src1[0] + src2[0] + src3[0] + src4[0] + round) >> 2; |
94 |
dst[1] = (src1[1] + src2[1] + src3[1] + src4[1] + round) >> 2; |
dst[1] = (src1[1] + src2[1] + src3[1] + src4[1] + round) >> 2; |
95 |
dst[2] = (src1[2] + src2[2] + src3[2] + src4[2] + round) >> 2; |
dst[2] = (src1[2] + src2[2] + src3[2] + src4[2] + round) >> 2; |
107 |
} |
} |
108 |
} |
} |
109 |
|
|
110 |
// dst = interpolate(src) |
/* dst = interpolate(src) */ |
111 |
|
|
112 |
void |
void |
113 |
interpolate8x8_halfpel_h_c(uint8_t * const dst, |
interpolate8x8_halfpel_h_c(uint8_t * const dst, |
115 |
const uint32_t stride, |
const uint32_t stride, |
116 |
const uint32_t rounding) |
const uint32_t rounding) |
117 |
{ |
{ |
118 |
uint32_t i, j; |
uintptr_t j; |
119 |
|
|
120 |
for (j = 0; j < 8; j++) { |
if (rounding) { |
121 |
for (i = 0; i < 8; i++) { |
for (j = 0; j < 8*stride; j+=stride) { |
122 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] )>>1); |
123 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] )>>1); |
124 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] )>>1); |
125 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] )>>1); |
126 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] )>>1); |
127 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] )>>1); |
128 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] )>>1); |
129 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] )>>1); |
130 |
|
} |
131 |
|
} else { |
132 |
|
for (j = 0; j < 8*stride; j+=stride) { |
133 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] + 1)>>1); |
134 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] + 1)>>1); |
135 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] + 1)>>1); |
136 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] + 1)>>1); |
137 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] + 1)>>1); |
138 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] + 1)>>1); |
139 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] + 1)>>1); |
140 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] + 1)>>1); |
141 |
|
} |
142 |
|
} |
143 |
|
} |
144 |
|
|
145 |
int16_t tot = |
/* dst = interpolate(src) */ |
146 |
(int32_t) src[j * stride + i] + (int32_t) src[j * stride + i + |
|
147 |
1]; |
void |
148 |
|
interpolate8x4_halfpel_h_c(uint8_t * const dst, |
149 |
|
const uint8_t * const src, |
150 |
|
const uint32_t stride, |
151 |
|
const uint32_t rounding) |
152 |
|
{ |
153 |
|
uintptr_t j; |
154 |
|
|
155 |
tot = (int32_t) ((tot + 1 - rounding) >> 1); |
if (rounding) { |
156 |
dst[j * stride + i] = (uint8_t) tot; |
for (j = 0; j < 4*stride; j+=stride) { |
157 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] )>>1); |
158 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] )>>1); |
159 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] )>>1); |
160 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] )>>1); |
161 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] )>>1); |
162 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] )>>1); |
163 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] )>>1); |
164 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] )>>1); |
165 |
|
} |
166 |
|
} else { |
167 |
|
for (j = 0; j < 4*stride; j+=stride) { |
168 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] + 1)>>1); |
169 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] + 1)>>1); |
170 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] + 1)>>1); |
171 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] + 1)>>1); |
172 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] + 1)>>1); |
173 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] + 1)>>1); |
174 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] + 1)>>1); |
175 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] + 1)>>1); |
176 |
} |
} |
177 |
} |
} |
178 |
} |
} |
179 |
|
|
180 |
|
/* dst = (dst + interpolate(src)/2 */ |
181 |
|
|
182 |
|
void |
183 |
|
interpolate8x8_halfpel_h_add_c(uint8_t * const dst, |
184 |
|
const uint8_t * const src, |
185 |
|
const uint32_t stride, |
186 |
|
const uint32_t rounding) |
187 |
|
{ |
188 |
|
uintptr_t j; |
189 |
|
|
190 |
|
if (rounding) { |
191 |
|
for (j = 0; j < 8*stride; j+=stride) { |
192 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + 1] )>>1) + dst[j+0] + 1)>>1); |
193 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + 2] )>>1) + dst[j+1] + 1)>>1); |
194 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + 3] )>>1) + dst[j+2] + 1)>>1); |
195 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + 4] )>>1) + dst[j+3] + 1)>>1); |
196 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + 5] )>>1) + dst[j+4] + 1)>>1); |
197 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + 6] )>>1) + dst[j+5] + 1)>>1); |
198 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + 7] )>>1) + dst[j+6] + 1)>>1); |
199 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + 8] )>>1) + dst[j+7] + 1)>>1); |
200 |
|
} |
201 |
|
} else { |
202 |
|
for (j = 0; j < 8*stride; j+=stride) { |
203 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + 1] + 1)>>1) + dst[j+0] + 1)>>1); |
204 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + 2] + 1)>>1) + dst[j+1] + 1)>>1); |
205 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + 3] + 1)>>1) + dst[j+2] + 1)>>1); |
206 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + 4] + 1)>>1) + dst[j+3] + 1)>>1); |
207 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + 5] + 1)>>1) + dst[j+4] + 1)>>1); |
208 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + 6] + 1)>>1) + dst[j+5] + 1)>>1); |
209 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + 7] + 1)>>1) + dst[j+6] + 1)>>1); |
210 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + 8] + 1)>>1) + dst[j+7] + 1)>>1); |
211 |
|
} |
212 |
|
} |
213 |
|
} |
214 |
|
|
215 |
|
/* dst = interpolate(src) */ |
216 |
|
|
217 |
void |
void |
218 |
interpolate8x8_halfpel_v_c(uint8_t * const dst, |
interpolate8x8_halfpel_v_c(uint8_t * const dst, |
220 |
const uint32_t stride, |
const uint32_t stride, |
221 |
const uint32_t rounding) |
const uint32_t rounding) |
222 |
{ |
{ |
223 |
uint32_t i, j; |
uintptr_t j; |
224 |
|
|
|
for (j = 0; j < 8; j++) { |
|
|
for (i = 0; i < 8; i++) { |
|
|
int16_t tot = src[j * stride + i] + src[j * stride + i + stride]; |
|
225 |
|
|
226 |
tot = ((tot + 1 - rounding) >> 1); |
if (rounding) { |
227 |
dst[j * stride + i] = (uint8_t) tot; |
for (j = 0; j < 8*stride; j+=stride) { |
228 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] )>>1); |
229 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] )>>1); |
230 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] )>>1); |
231 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] )>>1); |
232 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] )>>1); |
233 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] )>>1); |
234 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] )>>1); |
235 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] )>>1); |
236 |
|
} |
237 |
|
} else { |
238 |
|
for (j = 0; j < 8*stride; j+=stride) { |
239 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] + 1)>>1); |
240 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] + 1)>>1); |
241 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] + 1)>>1); |
242 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] + 1)>>1); |
243 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] + 1)>>1); |
244 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] + 1)>>1); |
245 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] + 1)>>1); |
246 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] + 1)>>1); |
247 |
} |
} |
248 |
} |
} |
249 |
} |
} |
250 |
|
|
251 |
|
/* dst = interpolate(src) */ |
252 |
|
|
253 |
|
void |
254 |
|
interpolate8x4_halfpel_v_c(uint8_t * const dst, |
255 |
|
const uint8_t * const src, |
256 |
|
const uint32_t stride, |
257 |
|
const uint32_t rounding) |
258 |
|
{ |
259 |
|
uintptr_t j; |
260 |
|
|
261 |
|
|
262 |
|
if (rounding) { |
263 |
|
for (j = 0; j < 4*stride; j+=stride) { |
264 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] )>>1); |
265 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] )>>1); |
266 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] )>>1); |
267 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] )>>1); |
268 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] )>>1); |
269 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] )>>1); |
270 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] )>>1); |
271 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] )>>1); |
272 |
|
} |
273 |
|
} else { |
274 |
|
for (j = 0; j < 4*stride; j+=stride) { |
275 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] + 1)>>1); |
276 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] + 1)>>1); |
277 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] + 1)>>1); |
278 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] + 1)>>1); |
279 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] + 1)>>1); |
280 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] + 1)>>1); |
281 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] + 1)>>1); |
282 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] + 1)>>1); |
283 |
|
} |
284 |
|
} |
285 |
|
} |
286 |
|
|
287 |
|
/* dst = (dst + interpolate(src))/2 */ |
288 |
|
|
289 |
|
void |
290 |
|
interpolate8x8_halfpel_v_add_c(uint8_t * const dst, |
291 |
|
const uint8_t * const src, |
292 |
|
const uint32_t stride, |
293 |
|
const uint32_t rounding) |
294 |
|
{ |
295 |
|
uintptr_t j; |
296 |
|
|
297 |
|
|
298 |
|
if (rounding) { |
299 |
|
for (j = 0; j < 8*stride; j+=stride) { |
300 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + stride + 0] )>>1) + dst[j+0] + 1)>>1); |
301 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + stride + 1] )>>1) + dst[j+1] + 1)>>1); |
302 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + stride + 2] )>>1) + dst[j+2] + 1)>>1); |
303 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + stride + 3] )>>1) + dst[j+3] + 1)>>1); |
304 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + stride + 4] )>>1) + dst[j+4] + 1)>>1); |
305 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + stride + 5] )>>1) + dst[j+5] + 1)>>1); |
306 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + stride + 6] )>>1) + dst[j+6] + 1)>>1); |
307 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + stride + 7] )>>1) + dst[j+7] + 1)>>1); |
308 |
|
} |
309 |
|
} else { |
310 |
|
for (j = 0; j < 8*stride; j+=stride) { |
311 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + stride + 0] + 1)>>1) + dst[j+0] + 1)>>1); |
312 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + stride + 1] + 1)>>1) + dst[j+1] + 1)>>1); |
313 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + stride + 2] + 1)>>1) + dst[j+2] + 1)>>1); |
314 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + stride + 3] + 1)>>1) + dst[j+3] + 1)>>1); |
315 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + stride + 4] + 1)>>1) + dst[j+4] + 1)>>1); |
316 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + stride + 5] + 1)>>1) + dst[j+5] + 1)>>1); |
317 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + stride + 6] + 1)>>1) + dst[j+6] + 1)>>1); |
318 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + stride + 7] + 1)>>1) + dst[j+7] + 1)>>1); |
319 |
|
} |
320 |
|
} |
321 |
|
} |
322 |
|
|
323 |
|
/* dst = interpolate(src) */ |
324 |
|
|
325 |
void |
void |
326 |
interpolate8x8_halfpel_hv_c(uint8_t * const dst, |
interpolate8x8_halfpel_hv_c(uint8_t * const dst, |
328 |
const uint32_t stride, |
const uint32_t stride, |
329 |
const uint32_t rounding) |
const uint32_t rounding) |
330 |
{ |
{ |
331 |
uint32_t i, j; |
uintptr_t j; |
332 |
|
|
333 |
for (j = 0; j < 8; j++) { |
if (rounding) { |
334 |
for (i = 0; i < 8; i++) { |
for (j = 0; j < 8*stride; j+=stride) { |
335 |
int16_t tot = |
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +1)>>2); |
336 |
src[j * stride + i] + src[j * stride + i + 1] + |
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +1)>>2); |
337 |
src[j * stride + i + stride] + src[j * stride + i + stride + |
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +1)>>2); |
338 |
1]; |
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +1)>>2); |
339 |
tot = ((tot + 2 - rounding) >> 2); |
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +1)>>2); |
340 |
dst[j * stride + i] = (uint8_t) tot; |
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +1)>>2); |
341 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +1)>>2); |
342 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +1)>>2); |
343 |
|
} |
344 |
|
} else { |
345 |
|
for (j = 0; j < 8*stride; j+=stride) { |
346 |
|
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +2)>>2); |
347 |
|
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +2)>>2); |
348 |
|
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +2)>>2); |
349 |
|
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +2)>>2); |
350 |
|
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +2)>>2); |
351 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +2)>>2); |
352 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +2)>>2); |
353 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +2)>>2); |
354 |
|
} |
355 |
|
} |
356 |
|
} |
357 |
|
|
358 |
|
/* dst = interpolate(src) */ |
359 |
|
|
360 |
|
void |
361 |
|
interpolate8x4_halfpel_hv_c(uint8_t * const dst, |
362 |
|
const uint8_t * const src, |
363 |
|
const uint32_t stride, |
364 |
|
const uint32_t rounding) |
365 |
|
{ |
366 |
|
uintptr_t j; |
367 |
|
|
368 |
|
if (rounding) { |
369 |
|
for (j = 0; j < 4*stride; j+=stride) { |
370 |
|
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +1)>>2); |
371 |
|
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +1)>>2); |
372 |
|
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +1)>>2); |
373 |
|
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +1)>>2); |
374 |
|
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +1)>>2); |
375 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +1)>>2); |
376 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +1)>>2); |
377 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +1)>>2); |
378 |
|
} |
379 |
|
} else { |
380 |
|
for (j = 0; j < 4*stride; j+=stride) { |
381 |
|
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +2)>>2); |
382 |
|
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +2)>>2); |
383 |
|
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +2)>>2); |
384 |
|
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +2)>>2); |
385 |
|
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +2)>>2); |
386 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +2)>>2); |
387 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +2)>>2); |
388 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +2)>>2); |
389 |
|
} |
390 |
|
} |
391 |
|
} |
392 |
|
|
393 |
|
/* dst = (interpolate(src) + dst)/2 */ |
394 |
|
|
395 |
|
void |
396 |
|
interpolate8x8_halfpel_hv_add_c(uint8_t * const dst, |
397 |
|
const uint8_t * const src, |
398 |
|
const uint32_t stride, |
399 |
|
const uint32_t rounding) |
400 |
|
{ |
401 |
|
uintptr_t j; |
402 |
|
|
403 |
|
if (rounding) { |
404 |
|
for (j = 0; j < 8*stride; j+=stride) { |
405 |
|
dst[j + 0] = (uint8_t)((((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +1)>>2) + dst[j+0])>>1); |
406 |
|
dst[j + 1] = (uint8_t)((((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +1)>>2) + dst[j+1])>>1); |
407 |
|
dst[j + 2] = (uint8_t)((((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +1)>>2) + dst[j+2])>>1); |
408 |
|
dst[j + 3] = (uint8_t)((((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +1)>>2) + dst[j+3])>>1); |
409 |
|
dst[j + 4] = (uint8_t)((((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +1)>>2) + dst[j+4])>>1); |
410 |
|
dst[j + 5] = (uint8_t)((((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +1)>>2) + dst[j+5])>>1); |
411 |
|
dst[j + 6] = (uint8_t)((((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +1)>>2) + dst[j+6])>>1); |
412 |
|
dst[j + 7] = (uint8_t)((((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +1)>>2) + dst[j+7])>>1); |
413 |
|
} |
414 |
|
} else { |
415 |
|
for (j = 0; j < 8*stride; j+=stride) { |
416 |
|
dst[j + 0] = (uint8_t)((((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +2)>>2) + dst[j+0] + 1)>>1); |
417 |
|
dst[j + 1] = (uint8_t)((((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +2)>>2) + dst[j+1] + 1)>>1); |
418 |
|
dst[j + 2] = (uint8_t)((((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +2)>>2) + dst[j+2] + 1)>>1); |
419 |
|
dst[j + 3] = (uint8_t)((((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +2)>>2) + dst[j+3] + 1)>>1); |
420 |
|
dst[j + 4] = (uint8_t)((((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +2)>>2) + dst[j+4] + 1)>>1); |
421 |
|
dst[j + 5] = (uint8_t)((((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +2)>>2) + dst[j+5] + 1)>>1); |
422 |
|
dst[j + 6] = (uint8_t)((((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +2)>>2) + dst[j+6] + 1)>>1); |
423 |
|
dst[j + 7] = (uint8_t)((((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +2)>>2) + dst[j+7] + 1)>>1); |
424 |
} |
} |
425 |
} |
} |
426 |
} |
} |