3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* - 8x8 block-based halfpel interpolation - |
* - 8x8 block-based halfpel interpolation - |
5 |
* |
* |
6 |
* Copyright(C) 2002 Peter Ross <pross@xvid.org> |
* Copyright(C) 2001-2003 Peter Ross <pross@xvid.org> |
|
* Copyright(C) 2002 MinChen <chenm002@163.com> |
|
7 |
* |
* |
8 |
* This file is part of XviD, a free MPEG-4 video encoder/decoder |
* This program is free software ; you can redistribute it and/or modify |
9 |
* |
* it under the terms of the GNU General Public License as published by |
|
* XviD is free software; you can redistribute it and/or modify it |
|
|
* under the terms of the GNU General Public License as published by |
|
10 |
* the Free Software Foundation; either version 2 of the License, or |
* the Free Software Foundation; either version 2 of the License, or |
11 |
* (at your option) any later version. |
* (at your option) any later version. |
12 |
* |
* |
19 |
* along with this program; if not, write to the Free Software |
* along with this program; if not, write to the Free Software |
20 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
* |
* |
|
* Under section 8 of the GNU General Public License, the copyright |
|
|
* holders of XVID explicitly forbid distribution in the following |
|
|
* countries: |
|
|
* |
|
|
* - Japan |
|
|
* - United States of America |
|
|
* |
|
|
* Linking XviD statically or dynamically with other modules is making a |
|
|
* combined work based on XviD. Thus, the terms and conditions of the |
|
|
* GNU General Public License cover the whole combination. |
|
|
* |
|
|
* As a special exception, the copyright holders of XviD give you |
|
|
* permission to link XviD with independent modules that communicate with |
|
|
* XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
* license terms of these independent modules, and to copy and distribute |
|
|
* the resulting combined work under terms of your choice, provided that |
|
|
* every copy of the combined work is accompanied by a complete copy of |
|
|
* the source code of XviD (the version of XviD used to produce the |
|
|
* combined work), being distributed under the terms of the GNU General |
|
|
* Public License plus this exception. An independent module is a module |
|
|
* which is not derived from or based on XviD. |
|
|
* |
|
|
* Note that people who make modified versions of XviD are not obligated |
|
|
* to grant this special exception for their modified versions; it is |
|
|
* their choice whether to do so. The GNU General Public License gives |
|
|
* permission to release a modified version without this exception; this |
|
|
* exception also makes it possible to release a modified version which |
|
|
* carries forward this exception. |
|
|
* |
|
22 |
* $Id$ |
* $Id$ |
23 |
* |
* |
24 |
****************************************************************************/ |
****************************************************************************/ |
25 |
|
|
26 |
#include "../portab.h" |
#include "../portab.h" |
27 |
|
#include "../global.h" |
28 |
#include "interpolate8x8.h" |
#include "interpolate8x8.h" |
29 |
|
|
30 |
/* function pointers */ |
/* function pointers */ |
32 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
33 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
34 |
|
|
35 |
|
INTERPOLATE8X8_PTR interpolate8x4_halfpel_h; |
36 |
|
INTERPOLATE8X8_PTR interpolate8x4_halfpel_v; |
37 |
|
INTERPOLATE8X8_PTR interpolate8x4_halfpel_hv; |
38 |
|
|
39 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_add; |
40 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_h_add; |
41 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v_add; |
42 |
|
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv_add; |
43 |
|
|
44 |
|
INTERPOLATE8X8_AVG2_PTR interpolate8x8_avg2; |
45 |
|
INTERPOLATE8X8_AVG4_PTR interpolate8x8_avg4; |
46 |
|
|
47 |
|
INTERPOLATE_LOWPASS_PTR interpolate8x8_lowpass_h; |
48 |
|
INTERPOLATE_LOWPASS_PTR interpolate8x8_lowpass_v; |
49 |
|
|
50 |
|
INTERPOLATE_LOWPASS_PTR interpolate16x16_lowpass_h; |
51 |
|
INTERPOLATE_LOWPASS_PTR interpolate16x16_lowpass_v; |
52 |
|
|
53 |
|
INTERPOLATE_LOWPASS_HV_PTR interpolate8x8_lowpass_hv; |
54 |
|
INTERPOLATE_LOWPASS_HV_PTR interpolate16x16_lowpass_hv; |
55 |
|
|
56 |
|
INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_h; |
57 |
|
INTERPOLATE8X8_6TAP_LOWPASS_PTR interpolate8x8_6tap_lowpass_v; |
58 |
|
|
59 |
|
void |
60 |
|
interpolate8x8_avg2_c(uint8_t * dst, const uint8_t * src1, const uint8_t *src2, const uint32_t stride, const uint32_t rounding, const uint32_t height) |
61 |
|
{ |
62 |
|
uint32_t i; |
63 |
|
const int32_t round = 1 - rounding; |
64 |
|
|
65 |
|
for(i = 0; i < height; i++) { |
66 |
|
dst[0] = (src1[0] + src2[0] + round) >> 1; |
67 |
|
dst[1] = (src1[1] + src2[1] + round) >> 1; |
68 |
|
dst[2] = (src1[2] + src2[2] + round) >> 1; |
69 |
|
dst[3] = (src1[3] + src2[3] + round) >> 1; |
70 |
|
dst[4] = (src1[4] + src2[4] + round) >> 1; |
71 |
|
dst[5] = (src1[5] + src2[5] + round) >> 1; |
72 |
|
dst[6] = (src1[6] + src2[6] + round) >> 1; |
73 |
|
dst[7] = (src1[7] + src2[7] + round) >> 1; |
74 |
|
|
75 |
|
dst += stride; |
76 |
|
src1 += stride; |
77 |
|
src2 += stride; |
78 |
|
} |
79 |
|
} |
80 |
|
|
81 |
|
void |
82 |
|
interpolate8x8_halfpel_add_c(uint8_t * const dst, const uint8_t * const src, const uint32_t stride, const uint32_t rounding) |
83 |
|
{ |
84 |
|
interpolate8x8_avg2_c(dst, dst, src, stride, 0, 8); |
85 |
|
} |
86 |
|
|
87 |
|
void interpolate8x8_avg4_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, const uint32_t stride, const uint32_t rounding) |
88 |
|
{ |
89 |
|
int32_t i; |
90 |
|
const int32_t round = 2 - rounding; |
91 |
|
|
92 |
|
for(i = 0; i < 8; i++) { |
93 |
|
dst[0] = (src1[0] + src2[0] + src3[0] + src4[0] + round) >> 2; |
94 |
|
dst[1] = (src1[1] + src2[1] + src3[1] + src4[1] + round) >> 2; |
95 |
|
dst[2] = (src1[2] + src2[2] + src3[2] + src4[2] + round) >> 2; |
96 |
|
dst[3] = (src1[3] + src2[3] + src3[3] + src4[3] + round) >> 2; |
97 |
|
dst[4] = (src1[4] + src2[4] + src3[4] + src4[4] + round) >> 2; |
98 |
|
dst[5] = (src1[5] + src2[5] + src3[5] + src4[5] + round) >> 2; |
99 |
|
dst[6] = (src1[6] + src2[6] + src3[6] + src4[6] + round) >> 2; |
100 |
|
dst[7] = (src1[7] + src2[7] + src3[7] + src4[7] + round) >> 2; |
101 |
|
|
102 |
|
dst += stride; |
103 |
|
src1 += stride; |
104 |
|
src2 += stride; |
105 |
|
src3 += stride; |
106 |
|
src4 += stride; |
107 |
|
} |
108 |
|
} |
109 |
|
|
110 |
/* dst = interpolate(src) */ |
/* dst = interpolate(src) */ |
111 |
|
|
115 |
const uint32_t stride, |
const uint32_t stride, |
116 |
const uint32_t rounding) |
const uint32_t rounding) |
117 |
{ |
{ |
118 |
uint32_t i, j; |
uintptr_t j; |
119 |
|
|
120 |
for (j = 0; j < 8; j++) { |
if (rounding) { |
121 |
for (i = 0; i < 8; i++) { |
for (j = 0; j < 8*stride; j+=stride) { |
122 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] )>>1); |
123 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] )>>1); |
124 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] )>>1); |
125 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] )>>1); |
126 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] )>>1); |
127 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] )>>1); |
128 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] )>>1); |
129 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] )>>1); |
130 |
|
} |
131 |
|
} else { |
132 |
|
for (j = 0; j < 8*stride; j+=stride) { |
133 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] + 1)>>1); |
134 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] + 1)>>1); |
135 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] + 1)>>1); |
136 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] + 1)>>1); |
137 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] + 1)>>1); |
138 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] + 1)>>1); |
139 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] + 1)>>1); |
140 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] + 1)>>1); |
141 |
|
} |
142 |
|
} |
143 |
|
} |
144 |
|
|
145 |
int32_t tot = |
/* dst = interpolate(src) */ |
146 |
(int32_t) src[j * stride + i] + (int32_t) src[j * stride + i + 1]; |
|
147 |
|
void |
148 |
|
interpolate8x4_halfpel_h_c(uint8_t * const dst, |
149 |
|
const uint8_t * const src, |
150 |
|
const uint32_t stride, |
151 |
|
const uint32_t rounding) |
152 |
|
{ |
153 |
|
uintptr_t j; |
154 |
|
|
155 |
tot = (tot + 1 - rounding) >> 1; |
if (rounding) { |
156 |
dst[j * stride + i] = (uint8_t) tot; |
for (j = 0; j < 4*stride; j+=stride) { |
157 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] )>>1); |
158 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] )>>1); |
159 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] )>>1); |
160 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] )>>1); |
161 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] )>>1); |
162 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] )>>1); |
163 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] )>>1); |
164 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] )>>1); |
165 |
|
} |
166 |
|
} else { |
167 |
|
for (j = 0; j < 4*stride; j+=stride) { |
168 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + 1] + 1)>>1); |
169 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + 2] + 1)>>1); |
170 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + 3] + 1)>>1); |
171 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + 4] + 1)>>1); |
172 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + 5] + 1)>>1); |
173 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + 6] + 1)>>1); |
174 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + 7] + 1)>>1); |
175 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + 8] + 1)>>1); |
176 |
} |
} |
177 |
} |
} |
178 |
} |
} |
179 |
|
|
180 |
|
/* dst = (dst + interpolate(src)/2 */ |
181 |
|
|
182 |
|
void |
183 |
|
interpolate8x8_halfpel_h_add_c(uint8_t * const dst, |
184 |
|
const uint8_t * const src, |
185 |
|
const uint32_t stride, |
186 |
|
const uint32_t rounding) |
187 |
|
{ |
188 |
|
uintptr_t j; |
189 |
|
|
190 |
|
if (rounding) { |
191 |
|
for (j = 0; j < 8*stride; j+=stride) { |
192 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + 1] )>>1) + dst[j+0] + 1)>>1); |
193 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + 2] )>>1) + dst[j+1] + 1)>>1); |
194 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + 3] )>>1) + dst[j+2] + 1)>>1); |
195 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + 4] )>>1) + dst[j+3] + 1)>>1); |
196 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + 5] )>>1) + dst[j+4] + 1)>>1); |
197 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + 6] )>>1) + dst[j+5] + 1)>>1); |
198 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + 7] )>>1) + dst[j+6] + 1)>>1); |
199 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + 8] )>>1) + dst[j+7] + 1)>>1); |
200 |
|
} |
201 |
|
} else { |
202 |
|
for (j = 0; j < 8*stride; j+=stride) { |
203 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + 1] + 1)>>1) + dst[j+0] + 1)>>1); |
204 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + 2] + 1)>>1) + dst[j+1] + 1)>>1); |
205 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + 3] + 1)>>1) + dst[j+2] + 1)>>1); |
206 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + 4] + 1)>>1) + dst[j+3] + 1)>>1); |
207 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + 5] + 1)>>1) + dst[j+4] + 1)>>1); |
208 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + 6] + 1)>>1) + dst[j+5] + 1)>>1); |
209 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + 7] + 1)>>1) + dst[j+6] + 1)>>1); |
210 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + 8] + 1)>>1) + dst[j+7] + 1)>>1); |
211 |
|
} |
212 |
|
} |
213 |
|
} |
214 |
|
|
215 |
|
/* dst = interpolate(src) */ |
216 |
|
|
217 |
void |
void |
218 |
interpolate8x8_halfpel_v_c(uint8_t * const dst, |
interpolate8x8_halfpel_v_c(uint8_t * const dst, |
220 |
const uint32_t stride, |
const uint32_t stride, |
221 |
const uint32_t rounding) |
const uint32_t rounding) |
222 |
{ |
{ |
223 |
uint32_t i, j; |
uintptr_t j; |
224 |
|
|
|
for (j = 0; j < 8; j++) { |
|
|
for (i = 0; i < 8; i++) { |
|
|
int32_t tot = |
|
|
(int32_t)src[j * stride + i] + (int32_t)src[j * stride + i + stride]; |
|
225 |
|
|
226 |
tot = ((tot + 1 - rounding) >> 1); |
if (rounding) { |
227 |
dst[j * stride + i] = (uint8_t) tot; |
for (j = 0; j < 8*stride; j+=stride) { |
228 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] )>>1); |
229 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] )>>1); |
230 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] )>>1); |
231 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] )>>1); |
232 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] )>>1); |
233 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] )>>1); |
234 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] )>>1); |
235 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] )>>1); |
236 |
|
} |
237 |
|
} else { |
238 |
|
for (j = 0; j < 8*stride; j+=stride) { |
239 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] + 1)>>1); |
240 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] + 1)>>1); |
241 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] + 1)>>1); |
242 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] + 1)>>1); |
243 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] + 1)>>1); |
244 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] + 1)>>1); |
245 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] + 1)>>1); |
246 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] + 1)>>1); |
247 |
|
} |
248 |
|
} |
249 |
|
} |
250 |
|
|
251 |
|
/* dst = interpolate(src) */ |
252 |
|
|
253 |
|
void |
254 |
|
interpolate8x4_halfpel_v_c(uint8_t * const dst, |
255 |
|
const uint8_t * const src, |
256 |
|
const uint32_t stride, |
257 |
|
const uint32_t rounding) |
258 |
|
{ |
259 |
|
uintptr_t j; |
260 |
|
|
261 |
|
|
262 |
|
if (rounding) { |
263 |
|
for (j = 0; j < 4*stride; j+=stride) { |
264 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] )>>1); |
265 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] )>>1); |
266 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] )>>1); |
267 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] )>>1); |
268 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] )>>1); |
269 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] )>>1); |
270 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] )>>1); |
271 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] )>>1); |
272 |
|
} |
273 |
|
} else { |
274 |
|
for (j = 0; j < 4*stride; j+=stride) { |
275 |
|
dst[j + 0] = (uint8_t)((src[j + 0] + src[j + stride + 0] + 1)>>1); |
276 |
|
dst[j + 1] = (uint8_t)((src[j + 1] + src[j + stride + 1] + 1)>>1); |
277 |
|
dst[j + 2] = (uint8_t)((src[j + 2] + src[j + stride + 2] + 1)>>1); |
278 |
|
dst[j + 3] = (uint8_t)((src[j + 3] + src[j + stride + 3] + 1)>>1); |
279 |
|
dst[j + 4] = (uint8_t)((src[j + 4] + src[j + stride + 4] + 1)>>1); |
280 |
|
dst[j + 5] = (uint8_t)((src[j + 5] + src[j + stride + 5] + 1)>>1); |
281 |
|
dst[j + 6] = (uint8_t)((src[j + 6] + src[j + stride + 6] + 1)>>1); |
282 |
|
dst[j + 7] = (uint8_t)((src[j + 7] + src[j + stride + 7] + 1)>>1); |
283 |
|
} |
284 |
|
} |
285 |
|
} |
286 |
|
|
287 |
|
/* dst = (dst + interpolate(src))/2 */ |
288 |
|
|
289 |
|
void |
290 |
|
interpolate8x8_halfpel_v_add_c(uint8_t * const dst, |
291 |
|
const uint8_t * const src, |
292 |
|
const uint32_t stride, |
293 |
|
const uint32_t rounding) |
294 |
|
{ |
295 |
|
uintptr_t j; |
296 |
|
|
297 |
|
|
298 |
|
if (rounding) { |
299 |
|
for (j = 0; j < 8*stride; j+=stride) { |
300 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + stride + 0] )>>1) + dst[j+0] + 1)>>1); |
301 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + stride + 1] )>>1) + dst[j+1] + 1)>>1); |
302 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + stride + 2] )>>1) + dst[j+2] + 1)>>1); |
303 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + stride + 3] )>>1) + dst[j+3] + 1)>>1); |
304 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + stride + 4] )>>1) + dst[j+4] + 1)>>1); |
305 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + stride + 5] )>>1) + dst[j+5] + 1)>>1); |
306 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + stride + 6] )>>1) + dst[j+6] + 1)>>1); |
307 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + stride + 7] )>>1) + dst[j+7] + 1)>>1); |
308 |
|
} |
309 |
|
} else { |
310 |
|
for (j = 0; j < 8*stride; j+=stride) { |
311 |
|
dst[j + 0] = (uint8_t)((((src[j + 0] + src[j + stride + 0] + 1)>>1) + dst[j+0] + 1)>>1); |
312 |
|
dst[j + 1] = (uint8_t)((((src[j + 1] + src[j + stride + 1] + 1)>>1) + dst[j+1] + 1)>>1); |
313 |
|
dst[j + 2] = (uint8_t)((((src[j + 2] + src[j + stride + 2] + 1)>>1) + dst[j+2] + 1)>>1); |
314 |
|
dst[j + 3] = (uint8_t)((((src[j + 3] + src[j + stride + 3] + 1)>>1) + dst[j+3] + 1)>>1); |
315 |
|
dst[j + 4] = (uint8_t)((((src[j + 4] + src[j + stride + 4] + 1)>>1) + dst[j+4] + 1)>>1); |
316 |
|
dst[j + 5] = (uint8_t)((((src[j + 5] + src[j + stride + 5] + 1)>>1) + dst[j+5] + 1)>>1); |
317 |
|
dst[j + 6] = (uint8_t)((((src[j + 6] + src[j + stride + 6] + 1)>>1) + dst[j+6] + 1)>>1); |
318 |
|
dst[j + 7] = (uint8_t)((((src[j + 7] + src[j + stride + 7] + 1)>>1) + dst[j+7] + 1)>>1); |
319 |
} |
} |
320 |
} |
} |
321 |
} |
} |
322 |
|
|
323 |
|
/* dst = interpolate(src) */ |
324 |
|
|
325 |
void |
void |
326 |
interpolate8x8_halfpel_hv_c(uint8_t * const dst, |
interpolate8x8_halfpel_hv_c(uint8_t * const dst, |
328 |
const uint32_t stride, |
const uint32_t stride, |
329 |
const uint32_t rounding) |
const uint32_t rounding) |
330 |
{ |
{ |
331 |
uint32_t i, j; |
uintptr_t j; |
332 |
|
|
333 |
for (j = 0; j < 8; j++) { |
if (rounding) { |
334 |
for (i = 0; i < 8; i++) { |
for (j = 0; j < 8*stride; j+=stride) { |
335 |
int32_t tot = |
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +1)>>2); |
336 |
(int32_t)src[j * stride + i] + (int32_t)src[j * stride + i + 1] + |
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +1)>>2); |
337 |
(int32_t)src[j * stride + i + stride] + (int32_t)src[j * stride + i + stride + 1]; |
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +1)>>2); |
338 |
tot = ((tot + 2 - rounding) >> 2); |
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +1)>>2); |
339 |
dst[j * stride + i] = (uint8_t) tot; |
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +1)>>2); |
340 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +1)>>2); |
341 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +1)>>2); |
342 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +1)>>2); |
343 |
|
} |
344 |
|
} else { |
345 |
|
for (j = 0; j < 8*stride; j+=stride) { |
346 |
|
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +2)>>2); |
347 |
|
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +2)>>2); |
348 |
|
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +2)>>2); |
349 |
|
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +2)>>2); |
350 |
|
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +2)>>2); |
351 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +2)>>2); |
352 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +2)>>2); |
353 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +2)>>2); |
354 |
} |
} |
355 |
} |
} |
356 |
} |
} |
357 |
|
|
358 |
/* add by MinChen <chenm001@163.com> */ |
/* dst = interpolate(src) */ |
359 |
/* interpolate8x8 two pred block */ |
|
360 |
void |
void |
361 |
interpolate8x8_c(uint8_t * const dst, |
interpolate8x4_halfpel_hv_c(uint8_t * const dst, |
362 |
const uint8_t * const src, |
const uint8_t * const src, |
363 |
const uint32_t x, |
const uint32_t stride, |
364 |
const uint32_t y, |
const uint32_t rounding) |
|
const uint32_t stride) |
|
365 |
{ |
{ |
366 |
uint32_t i, j; |
uintptr_t j; |
367 |
|
|
368 |
for (j = 0; j < 8; j++) { |
if (rounding) { |
369 |
for (i = 0; i < 8; i++) { |
for (j = 0; j < 4*stride; j+=stride) { |
370 |
int32_t tot = |
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +1)>>2); |
371 |
(((int32_t)src[(y + j) * stride + x + i] + |
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +1)>>2); |
372 |
(int32_t)dst[(y + j) * stride + x + i] + 1) >> 1); |
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +1)>>2); |
373 |
dst[(y + j) * stride + x + i] = (uint8_t) tot; |
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +1)>>2); |
374 |
|
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +1)>>2); |
375 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +1)>>2); |
376 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +1)>>2); |
377 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +1)>>2); |
378 |
|
} |
379 |
|
} else { |
380 |
|
for (j = 0; j < 4*stride; j+=stride) { |
381 |
|
dst[j + 0] = (uint8_t)((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +2)>>2); |
382 |
|
dst[j + 1] = (uint8_t)((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +2)>>2); |
383 |
|
dst[j + 2] = (uint8_t)((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +2)>>2); |
384 |
|
dst[j + 3] = (uint8_t)((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +2)>>2); |
385 |
|
dst[j + 4] = (uint8_t)((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +2)>>2); |
386 |
|
dst[j + 5] = (uint8_t)((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +2)>>2); |
387 |
|
dst[j + 6] = (uint8_t)((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +2)>>2); |
388 |
|
dst[j + 7] = (uint8_t)((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +2)>>2); |
389 |
|
} |
390 |
|
} |
391 |
|
} |
392 |
|
|
393 |
|
/* dst = (interpolate(src) + dst)/2 */ |
394 |
|
|
395 |
|
void |
396 |
|
interpolate8x8_halfpel_hv_add_c(uint8_t * const dst, |
397 |
|
const uint8_t * const src, |
398 |
|
const uint32_t stride, |
399 |
|
const uint32_t rounding) |
400 |
|
{ |
401 |
|
uintptr_t j; |
402 |
|
|
403 |
|
if (rounding) { |
404 |
|
for (j = 0; j < 8*stride; j+=stride) { |
405 |
|
dst[j + 0] = (uint8_t)((((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +1)>>2) + dst[j+0])>>1); |
406 |
|
dst[j + 1] = (uint8_t)((((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +1)>>2) + dst[j+1])>>1); |
407 |
|
dst[j + 2] = (uint8_t)((((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +1)>>2) + dst[j+2])>>1); |
408 |
|
dst[j + 3] = (uint8_t)((((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +1)>>2) + dst[j+3])>>1); |
409 |
|
dst[j + 4] = (uint8_t)((((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +1)>>2) + dst[j+4])>>1); |
410 |
|
dst[j + 5] = (uint8_t)((((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +1)>>2) + dst[j+5])>>1); |
411 |
|
dst[j + 6] = (uint8_t)((((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +1)>>2) + dst[j+6])>>1); |
412 |
|
dst[j + 7] = (uint8_t)((((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +1)>>2) + dst[j+7])>>1); |
413 |
|
} |
414 |
|
} else { |
415 |
|
for (j = 0; j < 8*stride; j+=stride) { |
416 |
|
dst[j + 0] = (uint8_t)((((src[j+0] + src[j+1] + src[j+stride+0] + src[j+stride+1] +2)>>2) + dst[j+0] + 1)>>1); |
417 |
|
dst[j + 1] = (uint8_t)((((src[j+1] + src[j+2] + src[j+stride+1] + src[j+stride+2] +2)>>2) + dst[j+1] + 1)>>1); |
418 |
|
dst[j + 2] = (uint8_t)((((src[j+2] + src[j+3] + src[j+stride+2] + src[j+stride+3] +2)>>2) + dst[j+2] + 1)>>1); |
419 |
|
dst[j + 3] = (uint8_t)((((src[j+3] + src[j+4] + src[j+stride+3] + src[j+stride+4] +2)>>2) + dst[j+3] + 1)>>1); |
420 |
|
dst[j + 4] = (uint8_t)((((src[j+4] + src[j+5] + src[j+stride+4] + src[j+stride+5] +2)>>2) + dst[j+4] + 1)>>1); |
421 |
|
dst[j + 5] = (uint8_t)((((src[j+5] + src[j+6] + src[j+stride+5] + src[j+stride+6] +2)>>2) + dst[j+5] + 1)>>1); |
422 |
|
dst[j + 6] = (uint8_t)((((src[j+6] + src[j+7] + src[j+stride+6] + src[j+stride+7] +2)>>2) + dst[j+6] + 1)>>1); |
423 |
|
dst[j + 7] = (uint8_t)((((src[j+7] + src[j+8] + src[j+stride+7] + src[j+stride+8] +2)>>2) + dst[j+7] + 1)>>1); |
424 |
} |
} |
425 |
} |
} |
426 |
} |
} |
429 |
* QPEL STUFF STARTS HERE * |
* QPEL STUFF STARTS HERE * |
430 |
*************************************************************/ |
*************************************************************/ |
431 |
|
|
432 |
#define CLIP(X,A,B) (X < A) ? (A) : ((X > B) ? (B) : (X)) |
void interpolate8x8_6tap_lowpass_h_c(uint8_t *dst, uint8_t *src, int32_t stride, int32_t rounding) |
|
|
|
|
void interpolate8x8_lowpass_h(uint8_t *dst, uint8_t *src, int32_t dst_stride, int32_t src_stride, int32_t rounding) |
|
433 |
{ |
{ |
434 |
int32_t i; |
int32_t i; |
435 |
|
uint8_t round_add = 16 - rounding; |
436 |
|
|
437 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 8; i++) |
438 |
{ |
{ |
|
dst[0] = CLIP((((src[0] + src[1]) * 160 - (src[0] + src[2]) * 48 + (src[1] + src[3]) * 24 - (src[2] + src[4]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[1] = CLIP((((src[1] + src[2]) * 160 - (src[0] + src[3]) * 48 + (src[0] + src[4]) * 24 - (src[1] + src[5]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[2] = CLIP((((src[2] + src[3]) * 160 - (src[1] + src[4]) * 48 + (src[0] + src[5]) * 24 - (src[0] + src[6]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[3] = CLIP((((src[3] + src[4]) * 160 - (src[2] + src[5]) * 48 + (src[1] + src[6]) * 24 - (src[0] + src[7]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[4] = CLIP((((src[4] + src[5]) * 160 - (src[3] + src[6]) * 48 + (src[2] + src[7]) * 24 - (src[1] + src[8]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[5] = CLIP((((src[5] + src[6]) * 160 - (src[4] + src[7]) * 48 + (src[3] + src[8]) * 24 - (src[2] + src[8]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[6] = CLIP((((src[6] + src[7]) * 160 - (src[5] + src[8]) * 48 + (src[4] + src[8]) * 24 - (src[3] + src[7]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
|
dst[7] = CLIP((((src[7] + src[8]) * 160 - (src[6] + src[8]) * 48 + (src[5] + src[7]) * 24 - (src[4] + src[6]) * 8 + (128 - rounding)) / 256), 0, 255); |
|
439 |
|
|
440 |
dst += dst_stride; |
dst[0] = CLIP((((src[-2] + src[3]) + 5 * (((src[0] + src[1])<<2) - (src[-1] + src[2])) + round_add) >> 5), 0, 255); |
441 |
src += src_stride; |
dst[1] = CLIP((((src[-1] + src[4]) + 5 * (((src[1] + src[2])<<2) - (src[0] + src[3])) + round_add) >> 5), 0, 255); |
442 |
|
dst[2] = CLIP((((src[0] + src[5]) + 5 * (((src[2] + src[3])<<2) - (src[1] + src[4])) + round_add) >> 5), 0, 255); |
443 |
|
dst[3] = CLIP((((src[1] + src[6]) + 5 * (((src[3] + src[4])<<2) - (src[2] + src[5])) + round_add) >> 5), 0, 255); |
444 |
|
dst[4] = CLIP((((src[2] + src[7]) + 5 * (((src[4] + src[5])<<2) - (src[3] + src[6])) + round_add) >> 5), 0, 255); |
445 |
|
dst[5] = CLIP((((src[3] + src[8]) + 5 * (((src[5] + src[6])<<2) - (src[4] + src[7])) + round_add) >> 5), 0, 255); |
446 |
|
dst[6] = CLIP((((src[4] + src[9]) + 5 * (((src[6] + src[7])<<2) - (src[5] + src[8])) + round_add) >> 5), 0, 255); |
447 |
|
dst[7] = CLIP((((src[5] + src[10]) + 5 * (((src[7] + src[8])<<2) - (src[6] + src[9])) + round_add) >> 5), 0, 255); |
448 |
|
|
449 |
|
dst += stride; |
450 |
|
src += stride; |
451 |
} |
} |
452 |
} |
} |
453 |
|
|
454 |
void interpolate8x8_lowpass_v(uint8_t *dst, uint8_t *src, int32_t dst_stride, int32_t src_stride, int32_t rounding) |
void interpolate16x16_lowpass_h_c(uint8_t *dst, uint8_t *src, int32_t stride, int32_t rounding) |
455 |
{ |
{ |
456 |
int32_t i; |
int32_t i; |
457 |
|
uint8_t round_add = 16 - rounding; |
458 |
|
|
459 |
|
for(i = 0; i < 17; i++) |
460 |
|
{ |
461 |
|
|
462 |
|
dst[0] = CLIP(((7 * ((src[0]<<1) - src[2]) + 23 * src[1] + 3 * src[3] - src[4] + round_add) >> 5), 0, 255); |
463 |
|
dst[1] = CLIP(((19 * src[1] + 20 * src[2] - src[5] + 3 * (src[4] - src[0] - (src[3]<<1)) + round_add) >> 5), 0, 255); |
464 |
|
dst[2] = CLIP(((20 * (src[2] + src[3]) + (src[0]<<1) + 3 * (src[5] - ((src[1] + src[4])<<1)) - src[6] + round_add) >> 5), 0, 255); |
465 |
|
|
466 |
|
dst[3] = CLIP(((20 * (src[3] + src[4]) + 3 * ((src[6] + src[1]) - ((src[2] + src[5])<<1)) - (src[0] + src[7]) + round_add) >> 5), 0, 255); |
467 |
|
dst[4] = CLIP(((20 * (src[4] + src[5]) - 3 * (((src[3] + src[6])<<1) - (src[2] + src[7])) - (src[1] + src[8]) + round_add) >> 5), 0, 255); |
468 |
|
dst[5] = CLIP(((20 * (src[5] + src[6]) - 3 * (((src[4] + src[7])<<1) - (src[3] + src[8])) - (src[2] + src[9]) + round_add) >> 5), 0, 255); |
469 |
|
dst[6] = CLIP(((20 * (src[6] + src[7]) - 3 * (((src[5] + src[8])<<1) - (src[4] + src[9])) - (src[3] + src[10]) + round_add) >> 5), 0, 255); |
470 |
|
dst[7] = CLIP(((20 * (src[7] + src[8]) - 3 * (((src[6] + src[9])<<1) - (src[5] + src[10])) - (src[4] + src[11]) + round_add) >> 5), 0, 255); |
471 |
|
dst[8] = CLIP(((20 * (src[8] + src[9]) - 3 * (((src[7] + src[10])<<1) - (src[6] + src[11])) - (src[5] + src[12]) + round_add) >> 5), 0, 255); |
472 |
|
dst[9] = CLIP(((20 * (src[9] + src[10]) - 3 * (((src[8] + src[11])<<1) - (src[7] + src[12])) - (src[6] + src[13]) + round_add) >> 5), 0, 255); |
473 |
|
dst[10] = CLIP(((20 * (src[10] + src[11]) - 3 * (((src[9] + src[12])<<1) - (src[8] + src[13])) - (src[7] + src[14]) + round_add) >> 5), 0, 255); |
474 |
|
dst[11] = CLIP(((20 * (src[11] + src[12]) - 3 * (((src[10] + src[13])<<1) - (src[9] + src[14])) - (src[8] + src[15]) + round_add) >> 5), 0, 255); |
475 |
|
dst[12] = CLIP(((20 * (src[12] + src[13]) - 3 * (((src[11] + src[14])<<1) - (src[10] + src[15])) - (src[9] + src[16]) + round_add) >> 5), 0, 255); |
476 |
|
|
477 |
|
dst[13] = CLIP(((20 * (src[13] + src[14]) + (src[16]<<1) + 3 * (src[11] - ((src[12] + src[15]) << 1)) - src[10] + round_add) >> 5), 0, 255); |
478 |
|
dst[14] = CLIP(((19 * src[15] + 20 * src[14] + 3 * (src[12] - src[16] - (src[13] << 1)) - src[11] + round_add) >> 5), 0, 255); |
479 |
|
dst[15] = CLIP(((23 * src[15] + 7 * ((src[16]<<1) - src[14]) + 3 * src[13] - src[12] + round_add) >> 5), 0, 255); |
480 |
|
|
481 |
|
dst += stride; |
482 |
|
src += stride; |
483 |
|
} |
484 |
|
} |
485 |
|
|
486 |
|
void interpolate8x8_lowpass_h_c(uint8_t *dst, uint8_t *src, int32_t stride, int32_t rounding) |
487 |
|
{ |
488 |
|
int32_t i; |
489 |
|
uint8_t round_add = 16 - rounding; |
490 |
|
|
491 |
|
for(i = 0; i < 9; i++) |
492 |
|
{ |
493 |
|
|
494 |
|
dst[0] = CLIP(((7 * ((src[0]<<1) - src[2]) + 23 * src[1] + 3 * src[3] - src[4] + round_add) >> 5), 0, 255); |
495 |
|
dst[1] = CLIP(((19 * src[1] + 20 * src[2] - src[5] + 3 * (src[4] - src[0] - (src[3]<<1)) + round_add) >> 5), 0, 255); |
496 |
|
dst[2] = CLIP(((20 * (src[2] + src[3]) + (src[0]<<1) + 3 * (src[5] - ((src[1] + src[4])<<1)) - src[6] + round_add) >> 5), 0, 255); |
497 |
|
dst[3] = CLIP(((20 * (src[3] + src[4]) + 3 * ((src[6] + src[1]) - ((src[2] + src[5])<<1)) - (src[0] + src[7]) + round_add) >> 5), 0, 255); |
498 |
|
dst[4] = CLIP(((20 * (src[4] + src[5]) - 3 * (((src[3] + src[6])<<1) - (src[2] + src[7])) - (src[1] + src[8]) + round_add) >> 5), 0, 255); |
499 |
|
dst[5] = CLIP(((20 * (src[5] + src[6]) + (src[8]<<1) + 3 * (src[3] - ((src[4] + src[7]) << 1)) - src[2] + round_add) >> 5), 0, 255); |
500 |
|
dst[6] = CLIP(((19 * src[7] + 20 * src[6] + 3 * (src[4] - src[8] - (src[5] << 1)) - src[3] + round_add) >> 5), 0, 255); |
501 |
|
dst[7] = CLIP(((23 * src[7] + 7 * ((src[8]<<1) - src[6]) + 3 * src[5] - src[4] + round_add) >> 5), 0, 255); |
502 |
|
|
503 |
|
dst += stride; |
504 |
|
src += stride; |
505 |
|
} |
506 |
|
} |
507 |
|
|
508 |
|
void interpolate8x8_6tap_lowpass_v_c(uint8_t *dst, uint8_t *src, int32_t stride, int32_t rounding) |
509 |
|
{ |
510 |
|
int32_t i; |
511 |
|
uint8_t round_add = 16 - rounding; |
512 |
|
|
513 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 8; i++) |
514 |
{ |
{ |
515 |
|
int32_t src_2 = src[-2*stride]; |
516 |
|
int32_t src_1 = src[-stride]; |
517 |
int32_t src0 = src[0]; |
int32_t src0 = src[0]; |
518 |
int32_t src1 = src[src_stride]; |
int32_t src1 = src[stride]; |
519 |
int32_t src2 = src[2 * src_stride]; |
int32_t src2 = src[2 * stride]; |
520 |
int32_t src3 = src[3 * src_stride]; |
int32_t src3 = src[3 * stride]; |
521 |
int32_t src4 = src[4 * src_stride]; |
int32_t src4 = src[4 * stride]; |
522 |
int32_t src5 = src[5 * src_stride]; |
int32_t src5 = src[5 * stride]; |
523 |
int32_t src6 = src[6 * src_stride]; |
int32_t src6 = src[6 * stride]; |
524 |
int32_t src7 = src[7 * src_stride]; |
int32_t src7 = src[7 * stride]; |
525 |
int32_t src8 = src[8 * src_stride]; |
int32_t src8 = src[8 * stride]; |
526 |
|
int32_t src9 = src[9 * stride]; |
527 |
dst[0] = CLIP((((src0 + src1) * 160 - (src0 + src2) * 48 + (src1 + src3) * 24 - (src2 + src4) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src10 = src[10 * stride]; |
528 |
dst[dst_stride] = CLIP((((src1 + src2) * 160 - (src0 + src3) * 48 + (src0 + src4) * 24 - (src1 + src5) * 8 + (128 - rounding)) / 256), 0, 255); |
|
529 |
dst[2 * dst_stride] = CLIP((((src2 + src3) * 160 - (src1 + src4) * 48 + (src0 + src5) * 24 - (src0 + src6) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[0] = CLIP((((src_2 + src3) + 5 * (((src0 + src1)<<2) - (src_1 + src2)) + round_add) >> 5), 0, 255); |
530 |
dst[3 * dst_stride] = CLIP((((src3 + src4) * 160 - (src2 + src5) * 48 + (src1 + src6) * 24 - (src0 + src7) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[stride] = CLIP((((src_1 + src4) + 5 * (((src1 + src2)<<2) - (src0 + src3)) + round_add) >> 5), 0, 255); |
531 |
dst[4 * dst_stride] = CLIP((((src4 + src5) * 160 - (src3 + src6) * 48 + (src2 + src7) * 24 - (src1 + src8) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[2 * stride] = CLIP((((src0 + src5) + 5 * (((src2 + src3)<<2) - (src1 + src4)) + round_add) >> 5), 0, 255); |
532 |
dst[5 * dst_stride] = CLIP((((src5 + src6) * 160 - (src4 + src7) * 48 + (src3 + src8) * 24 - (src2 + src8) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[3 * stride] = CLIP((((src1 + src6) + 5 * (((src3 + src4)<<2) - (src2 + src5)) + round_add) >> 5), 0, 255); |
533 |
dst[6 * dst_stride] = CLIP((((src6 + src7) * 160 - (src5 + src8) * 48 + (src4 + src8) * 24 - (src3 + src7) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[4 * stride] = CLIP((((src2 + src7) + 5 * (((src4 + src5)<<2) - (src3 + src6)) + round_add) >> 5), 0, 255); |
534 |
dst[7 * dst_stride] = CLIP((((src7 + src8) * 160 - (src6 + src8) * 48 + (src5 + src7) * 24 - (src4 + src6) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[5 * stride] = CLIP((((src3 + src8) + 5 * (((src5 + src6)<<2) - (src4 + src7)) + round_add) >> 5), 0, 255); |
535 |
|
dst[6 * stride] = CLIP((((src4 + src9) + 5 * (((src6 + src7)<<2) - (src5 + src8)) + round_add) >> 5), 0, 255); |
536 |
|
dst[7 * stride] = CLIP((((src5 + src10) + 5 * (((src7 + src8)<<2) - (src6 + src9)) + round_add) >> 5), 0, 255); |
537 |
|
|
538 |
dst++; |
dst++; |
539 |
src++; |
src++; |
540 |
} |
} |
541 |
} |
} |
542 |
|
|
543 |
void interpolate8x8_lowpass_hv(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int32_t dst1_stride, int32_t dst2_stride, int32_t src_stride, int32_t rounding) |
void interpolate16x16_lowpass_v_c(uint8_t *dst, uint8_t *src, int32_t stride, int32_t rounding) |
544 |
{ |
{ |
|
uint8_t data[72]; |
|
|
|
|
545 |
int32_t i; |
int32_t i; |
546 |
|
uint8_t round_add = 16 - rounding; |
547 |
|
|
548 |
|
for(i = 0; i < 17; i++) |
|
for(i = 0; i < 9; i++) |
|
549 |
{ |
{ |
550 |
dst2[0] = data[8 * i + 0] = CLIP((((src[0] + src[1]) * 160 - (src[0] + src[2]) * 48 + (src[1] + src[3]) * 24 - (src[2] + src[4]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src0 = src[0]; |
551 |
dst2[1] = data[8 * i + 1] = CLIP((((src[1] + src[2]) * 160 - (src[0] + src[3]) * 48 + (src[0] + src[4]) * 24 - (src[1] + src[5]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src1 = src[stride]; |
552 |
dst2[2] = data[8 * i + 2] = CLIP((((src[2] + src[3]) * 160 - (src[1] + src[4]) * 48 + (src[0] + src[5]) * 24 - (src[0] + src[6]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src2 = src[2 * stride]; |
553 |
dst2[3] = data[8 * i + 3] = CLIP((((src[3] + src[4]) * 160 - (src[2] + src[5]) * 48 + (src[1] + src[6]) * 24 - (src[0] + src[7]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src3 = src[3 * stride]; |
554 |
dst2[4] = data[8 * i + 4] = CLIP((((src[4] + src[5]) * 160 - (src[3] + src[6]) * 48 + (src[2] + src[7]) * 24 - (src[1] + src[8]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src4 = src[4 * stride]; |
555 |
dst2[5] = data[8 * i + 5] = CLIP((((src[5] + src[6]) * 160 - (src[4] + src[7]) * 48 + (src[3] + src[8]) * 24 - (src[2] + src[8]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src5 = src[5 * stride]; |
556 |
dst2[6] = data[8 * i + 6] = CLIP((((src[6] + src[7]) * 160 - (src[5] + src[8]) * 48 + (src[4] + src[8]) * 24 - (src[3] + src[7]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src6 = src[6 * stride]; |
557 |
dst2[7] = data[8 * i + 7] = CLIP((((src[7] + src[8]) * 160 - (src[6] + src[8]) * 48 + (src[5] + src[7]) * 24 - (src[4] + src[6]) * 8 + (128 - rounding)) / 256), 0, 255); |
int32_t src7 = src[7 * stride]; |
558 |
|
int32_t src8 = src[8 * stride]; |
559 |
|
int32_t src9 = src[9 * stride]; |
560 |
|
int32_t src10 = src[10 * stride]; |
561 |
|
int32_t src11 = src[11 * stride]; |
562 |
|
int32_t src12 = src[12 * stride]; |
563 |
|
int32_t src13 = src[13 * stride]; |
564 |
|
int32_t src14 = src[14 * stride]; |
565 |
|
int32_t src15 = src[15 * stride]; |
566 |
|
int32_t src16 = src[16 * stride]; |
567 |
|
|
568 |
|
|
569 |
|
dst[0] = CLIP(((7 * ((src0<<1) - src2) + 23 * src1 + 3 * src3 - src4 + round_add) >> 5), 0, 255); |
570 |
|
dst[stride] = CLIP(((19 * src1 + 20 * src2 - src5 + 3 * (src4 - src0 - (src3<<1)) + round_add) >> 5), 0, 255); |
571 |
|
dst[2*stride] = CLIP(((20 * (src2 + src3) + (src0<<1) + 3 * (src5 - ((src1 + src4)<<1)) - src6 + round_add) >> 5), 0, 255); |
572 |
|
|
573 |
|
dst[3*stride] = CLIP(((20 * (src3 + src4) + 3 * ((src6 + src1) - ((src2 + src5)<<1)) - (src0 + src7) + round_add) >> 5), 0, 255); |
574 |
|
dst[4*stride] = CLIP(((20 * (src4 + src5) - 3 * (((src3 + src6)<<1) - (src2 + src7)) - (src1 + src8) + round_add) >> 5), 0, 255); |
575 |
|
dst[5*stride] = CLIP(((20 * (src5 + src6) - 3 * (((src4 + src7)<<1) - (src3 + src8)) - (src2 + src9) + round_add) >> 5), 0, 255); |
576 |
|
dst[6*stride] = CLIP(((20 * (src6 + src7) - 3 * (((src5 + src8)<<1) - (src4 + src9)) - (src3 + src10) + round_add) >> 5), 0, 255); |
577 |
|
dst[7*stride] = CLIP(((20 * (src7 + src8) - 3 * (((src6 + src9)<<1) - (src5 + src10)) - (src4 + src11) + round_add) >> 5), 0, 255); |
578 |
|
dst[8*stride] = CLIP(((20 * (src8 + src9) - 3 * (((src7 + src10)<<1) - (src6 + src11)) - (src5 + src12) + round_add) >> 5), 0, 255); |
579 |
|
dst[9*stride] = CLIP(((20 * (src9 + src10) - 3 * (((src8 + src11)<<1) - (src7 + src12)) - (src6 + src13) + round_add) >> 5), 0, 255); |
580 |
|
dst[10*stride] = CLIP(((20 * (src10 + src11) - 3 * (((src9 + src12)<<1) - (src8 + src13)) - (src7 + src14) + round_add) >> 5), 0, 255); |
581 |
|
dst[11*stride] = CLIP(((20 * (src11 + src12) - 3 * (((src10 + src13)<<1) - (src9 + src14)) - (src8 + src15) + round_add) >> 5), 0, 255); |
582 |
|
dst[12*stride] = CLIP(((20 * (src12 + src13) - 3 * (((src11 + src14)<<1) - (src10 + src15)) - (src9 + src16) + round_add) >> 5), 0, 255); |
583 |
|
|
584 |
|
dst[13*stride] = CLIP(((20 * (src13 + src14) + (src16<<1) + 3 * (src11 - ((src12 + src15) << 1)) - src10 + round_add) >> 5), 0, 255); |
585 |
|
dst[14*stride] = CLIP(((19 * src15 + 20 * src14 + 3 * (src12 - src16 - (src13 << 1)) - src11 + round_add) >> 5), 0, 255); |
586 |
|
dst[15*stride] = CLIP(((23 * src15 + 7 * ((src16<<1) - src14) + 3 * src13 - src12 + round_add) >> 5), 0, 255); |
587 |
|
|
588 |
src += src_stride; |
dst++; |
589 |
dst2 += dst2_stride; |
src++; |
590 |
|
} |
591 |
} |
} |
592 |
|
|
593 |
for(i = 0; i < 8; i++) |
void interpolate8x8_lowpass_v_c(uint8_t *dst, uint8_t *src, int32_t stride, int32_t rounding) |
594 |
|
{ |
595 |
|
int32_t i; |
596 |
|
uint8_t round_add = 16 - rounding; |
597 |
|
|
598 |
|
for(i = 0; i < 9; i++) |
599 |
{ |
{ |
600 |
int32_t src0 = data[i]; |
int32_t src0 = src[0]; |
601 |
int32_t src1 = data[8 + i]; |
int32_t src1 = src[stride]; |
602 |
int32_t src2 = data[2 * 8 + i]; |
int32_t src2 = src[2 * stride]; |
603 |
int32_t src3 = data[3 * 8 + i]; |
int32_t src3 = src[3 * stride]; |
604 |
int32_t src4 = data[4 * 8 + i]; |
int32_t src4 = src[4 * stride]; |
605 |
int32_t src5 = data[5 * 8 + i]; |
int32_t src5 = src[5 * stride]; |
606 |
int32_t src6 = data[6 * 8 + i]; |
int32_t src6 = src[6 * stride]; |
607 |
int32_t src7 = data[7 * 8 + i]; |
int32_t src7 = src[7 * stride]; |
608 |
int32_t src8 = data[8 * 8 + i]; |
int32_t src8 = src[8 * stride]; |
609 |
|
|
610 |
dst1[0] = CLIP((((src0 + src1) * 160 - (src0 + src2) * 48 + (src1 + src3) * 24 - (src2 + src4) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[0] = CLIP(((7 * ((src0<<1) - src2) + 23 * src1 + 3 * src3 - src4 + round_add) >> 5), 0, 255); |
611 |
dst1[dst1_stride] = CLIP((((src1 + src2) * 160 - (src0 + src3) * 48 + (src0 + src4) * 24 - (src1 + src5) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[stride] = CLIP(((19 * src1 + 20 * src2 - src5 + 3 * (src4 - src0 - (src3 << 1)) + round_add) >> 5), 0, 255); |
612 |
dst1[2 * dst1_stride] = CLIP((((src2 + src3) * 160 - (src1 + src4) * 48 + (src0 + src5) * 24 - (src0 + src6) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[2 * stride] = CLIP(((20 * (src2 + src3) + (src0<<1) + 3 * (src5 - ((src1 + src4) <<1 )) - src6 + round_add) >> 5), 0, 255); |
613 |
dst1[3 * dst1_stride] = CLIP((((src3 + src4) * 160 - (src2 + src5) * 48 + (src1 + src6) * 24 - (src0 + src7) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[3 * stride] = CLIP(((20 * (src3 + src4) + 3 * ((src6 + src1) - ((src2 + src5)<<1)) - (src0 + src7) + round_add) >> 5), 0, 255); |
614 |
dst1[4 * dst1_stride] = CLIP((((src4 + src5) * 160 - (src3 + src6) * 48 + (src2 + src7) * 24 - (src1 + src8) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[4 * stride] = CLIP(((20 * (src4 + src5) + 3 * ((src2 + src7) - ((src3 + src6)<<1)) - (src1 + src8) + round_add) >> 5), 0, 255); |
615 |
dst1[5 * dst1_stride] = CLIP((((src5 + src6) * 160 - (src4 + src7) * 48 + (src3 + src8) * 24 - (src2 + src8) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[5 * stride] = CLIP(((20 * (src5 + src6) + (src8<<1) + 3 * (src3 - ((src4 + src7) << 1)) - src2 + round_add) >> 5), 0, 255); |
616 |
dst1[6 * dst1_stride] = CLIP((((src6 + src7) * 160 - (src5 + src8) * 48 + (src4 + src8) * 24 - (src3 + src7) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[6 * stride] = CLIP(((19 * src7 + 20 * src6 - src3 + 3 * (src4 - src8 - (src5 << 1)) + round_add) >> 5), 0, 255); |
617 |
dst1[7 * dst1_stride] = CLIP((((src7 + src8) * 160 - (src6 + src8) * 48 + (src5 + src7) * 24 - (src4 + src6) * 8 + (128 - rounding)) / 256), 0, 255); |
dst[7 * stride] = CLIP(((7 * ((src8<<1) - src6) + 23 * src7 + 3 * src5 - src4 + round_add) >> 5), 0, 255); |
618 |
|
|
619 |
dst1++; |
dst++; |
620 |
|
src++; |
621 |
} |
} |
622 |
} |
} |
623 |
|
|
624 |
void interpolate8x8_bilinear2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int32_t dst_stride, int32_t src_stride, int32_t rounding) |
void interpolate16x16_lowpass_hv_c(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int32_t stride, int32_t rounding) |
625 |
{ |
{ |
626 |
int32_t i; |
int32_t i; |
627 |
|
uint8_t round_add = 16 - rounding; |
628 |
|
uint8_t *h_ptr = dst2; |
629 |
|
|
630 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 17; i++) |
631 |
{ |
{ |
632 |
dst[0] = (uint8_t)((src1[0] + src2[0] + (1 - rounding)) >> 1); |
|
633 |
dst[1] = (uint8_t)((src1[1] + src2[1] + (1 - rounding)) >> 1); |
h_ptr[0] = CLIP(((7 * ((src[0]<<1) - src[2]) + 23 * src[1] + 3 * src[3] - src[4] + round_add) >> 5), 0, 255); |
634 |
dst[2] = (uint8_t)((src1[2] + src2[2] + (1 - rounding)) >> 1); |
h_ptr[1] = CLIP(((19 * src[1] + 20 * src[2] - src[5] + 3 * (src[4] - src[0] - (src[3]<<1)) + round_add) >> 5), 0, 255); |
635 |
dst[3] = (uint8_t)((src1[3] + src2[3] + (1 - rounding)) >> 1); |
h_ptr[2] = CLIP(((20 * (src[2] + src[3]) + (src[0]<<1) + 3 * (src[5] - ((src[1] + src[4])<<1)) - src[6] + round_add) >> 5), 0, 255); |
636 |
dst[4] = (uint8_t)((src1[4] + src2[4] + (1 - rounding)) >> 1); |
|
637 |
dst[5] = (uint8_t)((src1[5] + src2[5] + (1 - rounding)) >> 1); |
h_ptr[3] = CLIP(((20 * (src[3] + src[4]) + 3 * ((src[6] + src[1]) - ((src[2] + src[5])<<1)) - (src[0] + src[7]) + round_add) >> 5), 0, 255); |
638 |
dst[6] = (uint8_t)((src1[6] + src2[6] + (1 - rounding)) >> 1); |
h_ptr[4] = CLIP(((20 * (src[4] + src[5]) - 3 * (((src[3] + src[6])<<1) - (src[2] + src[7])) - (src[1] + src[8]) + round_add) >> 5), 0, 255); |
639 |
dst[7] = (uint8_t)((src1[7] + src2[7] + (1 - rounding)) >> 1); |
h_ptr[5] = CLIP(((20 * (src[5] + src[6]) - 3 * (((src[4] + src[7])<<1) - (src[3] + src[8])) - (src[2] + src[9]) + round_add) >> 5), 0, 255); |
640 |
|
h_ptr[6] = CLIP(((20 * (src[6] + src[7]) - 3 * (((src[5] + src[8])<<1) - (src[4] + src[9])) - (src[3] + src[10]) + round_add) >> 5), 0, 255); |
641 |
dst += dst_stride; |
h_ptr[7] = CLIP(((20 * (src[7] + src[8]) - 3 * (((src[6] + src[9])<<1) - (src[5] + src[10])) - (src[4] + src[11]) + round_add) >> 5), 0, 255); |
642 |
src1 += src_stride; |
h_ptr[8] = CLIP(((20 * (src[8] + src[9]) - 3 * (((src[7] + src[10])<<1) - (src[6] + src[11])) - (src[5] + src[12]) + round_add) >> 5), 0, 255); |
643 |
src2 += 8; |
h_ptr[9] = CLIP(((20 * (src[9] + src[10]) - 3 * (((src[8] + src[11])<<1) - (src[7] + src[12])) - (src[6] + src[13]) + round_add) >> 5), 0, 255); |
644 |
|
h_ptr[10] = CLIP(((20 * (src[10] + src[11]) - 3 * (((src[9] + src[12])<<1) - (src[8] + src[13])) - (src[7] + src[14]) + round_add) >> 5), 0, 255); |
645 |
|
h_ptr[11] = CLIP(((20 * (src[11] + src[12]) - 3 * (((src[10] + src[13])<<1) - (src[9] + src[14])) - (src[8] + src[15]) + round_add) >> 5), 0, 255); |
646 |
|
h_ptr[12] = CLIP(((20 * (src[12] + src[13]) - 3 * (((src[11] + src[14])<<1) - (src[10] + src[15])) - (src[9] + src[16]) + round_add) >> 5), 0, 255); |
647 |
|
|
648 |
|
h_ptr[13] = CLIP(((20 * (src[13] + src[14]) + (src[16]<<1) + 3 * (src[11] - ((src[12] + src[15]) << 1)) - src[10] + round_add) >> 5), 0, 255); |
649 |
|
h_ptr[14] = CLIP(((19 * src[15] + 20 * src[14] + 3 * (src[12] - src[16] - (src[13] << 1)) - src[11] + round_add) >> 5), 0, 255); |
650 |
|
h_ptr[15] = CLIP(((23 * src[15] + 7 * ((src[16]<<1) - src[14]) + 3 * src[13] - src[12] + round_add) >> 5), 0, 255); |
651 |
|
|
652 |
|
h_ptr += stride; |
653 |
|
src += stride; |
654 |
} |
} |
655 |
|
|
656 |
|
interpolate16x16_lowpass_v_c(dst1, dst2, stride, rounding); |
657 |
|
|
658 |
} |
} |
659 |
|
|
660 |
void interpolate8x8_bilinear4(uint8_t *dst, uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4, int32_t stride, int32_t rounding) |
void interpolate8x8_lowpass_hv_c(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int32_t stride, int32_t rounding) |
661 |
{ |
{ |
662 |
int32_t i; |
int32_t i; |
663 |
|
uint8_t round_add = 16 - rounding; |
664 |
|
uint8_t *h_ptr = dst2; |
665 |
|
|
666 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 9; i++) |
667 |
{ |
{ |
|
dst[0] = (uint8_t)((src1[0] + src2[0] + src3[0] + src4[0] + (2 - rounding)) >> 2); |
|
|
dst[1] = (uint8_t)((src1[1] + src2[1] + src3[1] + src4[1] + (2 - rounding)) >> 2); |
|
|
dst[2] = (uint8_t)((src1[2] + src2[2] + src3[2] + src4[2] + (2 - rounding)) >> 2); |
|
|
dst[3] = (uint8_t)((src1[3] + src2[3] + src3[3] + src4[3] + (2 - rounding)) >> 2); |
|
|
dst[4] = (uint8_t)((src1[4] + src2[4] + src3[4] + src4[4] + (2 - rounding)) >> 2); |
|
|
dst[5] = (uint8_t)((src1[5] + src2[5] + src3[5] + src4[5] + (2 - rounding)) >> 2); |
|
|
dst[6] = (uint8_t)((src1[6] + src2[6] + src3[6] + src4[6] + (2 - rounding)) >> 2); |
|
|
dst[7] = (uint8_t)((src1[7] + src2[7] + src3[7] + src4[7] + (2 - rounding)) >> 2); |
|
668 |
|
|
669 |
dst += stride; |
h_ptr[0] = CLIP(((7 * ((src[0]<<1) - src[2]) + 23 * src[1] + 3 * src[3] - src[4] + round_add) >> 5), 0, 255); |
670 |
src1 += stride; |
h_ptr[1] = CLIP(((19 * src[1] + 20 * src[2] - src[5] + 3 * (src[4] - src[0] - (src[3]<<1)) + round_add) >> 5), 0, 255); |
671 |
src2 += 8; |
h_ptr[2] = CLIP(((20 * (src[2] + src[3]) + (src[0]<<1) + 3 * (src[5] - ((src[1] + src[4])<<1)) - src[6] + round_add) >> 5), 0, 255); |
672 |
src3 += 8; |
h_ptr[3] = CLIP(((20 * (src[3] + src[4]) + 3 * ((src[6] + src[1]) - ((src[2] + src[5])<<1)) - (src[0] + src[7]) + round_add) >> 5), 0, 255); |
673 |
src4 += 8; |
h_ptr[4] = CLIP(((20 * (src[4] + src[5]) - 3 * (((src[3] + src[6])<<1) - (src[2] + src[7])) - (src[1] + src[8]) + round_add) >> 5), 0, 255); |
674 |
|
h_ptr[5] = CLIP(((20 * (src[5] + src[6]) + (src[8]<<1) + 3 * (src[3] - ((src[4] + src[7]) << 1)) - src[2] + round_add) >> 5), 0, 255); |
675 |
|
h_ptr[6] = CLIP(((19 * src[7] + 20 * src[6] + 3 * (src[4] - src[8] - (src[5] << 1)) - src[3] + round_add) >> 5), 0, 255); |
676 |
|
h_ptr[7] = CLIP(((23 * src[7] + 7 * ((src[8]<<1) - src[6]) + 3 * src[5] - src[4] + round_add) >> 5), 0, 255); |
677 |
|
|
678 |
|
h_ptr += stride; |
679 |
|
src += stride; |
680 |
} |
} |
681 |
|
|
682 |
|
interpolate8x8_lowpass_v_c(dst1, dst2, stride, rounding); |
683 |
|
|
684 |
} |
} |