1 |
/***************************************************************************** |
/***************************************************************************** |
2 |
* |
* |
3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* - 8x8 block-based halfpel interpolation |
* - 8x8 block-based halfpel interpolation - |
5 |
* |
* |
6 |
* Copyright(C) 2002 Peter Ross <pross@xvid.org> |
* Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 |
* Copyright(C) 2002 MinChen <chenm002@163.com> |
* Copyright(C) 2002 MinChen <chenm002@163.com> |
8 |
* |
* |
9 |
* This program is an implementation of a part of one or more MPEG-4 |
* This file is part of XviD, a free MPEG-4 video encoder/decoder |
|
* Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
|
|
* to use this software module in hardware or software products are |
|
|
* advised that its use may infringe existing patents or copyrights, and |
|
|
* any such use would be at such party's own risk. The original |
|
|
* developer of this software module and his/her company, and subsequent |
|
|
* editors and their companies, will have no liability for use of this |
|
|
* software or modifications or derivatives thereof. |
|
10 |
* |
* |
11 |
* This program is free software; you can redistribute it and/or modify |
* XviD is free software; you can redistribute it and/or modify it |
12 |
* it under the terms of the GNU General Public License as published by |
* under the terms of the GNU General Public License as published by |
13 |
* the Free Software Foundation; either version 2 of the License, or |
* the Free Software Foundation; either version 2 of the License, or |
14 |
* (at your option) any later version. |
* (at your option) any later version. |
15 |
* |
* |
22 |
* along with this program; if not, write to the Free Software |
* along with this program; if not, write to the Free Software |
23 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 |
* |
* |
25 |
|
* Under section 8 of the GNU General Public License, the copyright |
26 |
|
* holders of XVID explicitly forbid distribution in the following |
27 |
|
* countries: |
28 |
|
* |
29 |
|
* - Japan |
30 |
|
* - United States of America |
31 |
|
* |
32 |
|
* Linking XviD statically or dynamically with other modules is making a |
33 |
|
* combined work based on XviD. Thus, the terms and conditions of the |
34 |
|
* GNU General Public License cover the whole combination. |
35 |
|
* |
36 |
|
* As a special exception, the copyright holders of XviD give you |
37 |
|
* permission to link XviD with independent modules that communicate with |
38 |
|
* XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
39 |
|
* license terms of these independent modules, and to copy and distribute |
40 |
|
* the resulting combined work under terms of your choice, provided that |
41 |
|
* every copy of the combined work is accompanied by a complete copy of |
42 |
|
* the source code of XviD (the version of XviD used to produce the |
43 |
|
* combined work), being distributed under the terms of the GNU General |
44 |
|
* Public License plus this exception. An independent module is a module |
45 |
|
* which is not derived from or based on XviD. |
46 |
|
* |
47 |
|
* Note that people who make modified versions of XviD are not obligated |
48 |
|
* to grant this special exception for their modified versions; it is |
49 |
|
* their choice whether to do so. The GNU General Public License gives |
50 |
|
* permission to release a modified version without this exception; this |
51 |
|
* exception also makes it possible to release a modified version which |
52 |
|
* carries forward this exception. |
53 |
|
* |
54 |
|
* $Id$ |
55 |
|
* |
56 |
****************************************************************************/ |
****************************************************************************/ |
57 |
|
|
58 |
#include "../portab.h" |
#include "../portab.h" |
59 |
#include "interpolate8x8.h" |
#include "interpolate8x8.h" |
60 |
|
|
61 |
// function pointers |
/* function pointers */ |
62 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_h; |
63 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_v; |
64 |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
INTERPOLATE8X8_PTR interpolate8x8_halfpel_hv; |
65 |
|
|
66 |
|
|
67 |
// dst = interpolate(src) |
/* dst = interpolate(src) */ |
68 |
|
|
69 |
void |
void |
70 |
interpolate8x8_halfpel_h_c(uint8_t * const dst, |
interpolate8x8_halfpel_h_c(uint8_t * const dst, |
77 |
for (j = 0; j < 8; j++) { |
for (j = 0; j < 8; j++) { |
78 |
for (i = 0; i < 8; i++) { |
for (i = 0; i < 8; i++) { |
79 |
|
|
80 |
int16_t tot = |
int32_t tot = |
81 |
(int32_t) src[j * stride + i] + (int32_t) src[j * stride + i + |
(int32_t) src[j * stride + i] + (int32_t) src[j * stride + i + 1]; |
|
1]; |
|
82 |
|
|
83 |
tot = (int32_t) ((tot + 1 - rounding) >> 1); |
tot = (tot + 1 - rounding) >> 1; |
84 |
dst[j * stride + i] = (uint8_t) tot; |
dst[j * stride + i] = (uint8_t) tot; |
85 |
} |
} |
86 |
} |
} |
98 |
|
|
99 |
for (j = 0; j < 8; j++) { |
for (j = 0; j < 8; j++) { |
100 |
for (i = 0; i < 8; i++) { |
for (i = 0; i < 8; i++) { |
101 |
int16_t tot = src[j * stride + i] + src[j * stride + i + stride]; |
int32_t tot = |
102 |
|
(int32_t)src[j * stride + i] + (int32_t)src[j * stride + i + stride]; |
103 |
|
|
104 |
tot = ((tot + 1 - rounding) >> 1); |
tot = ((tot + 1 - rounding) >> 1); |
105 |
dst[j * stride + i] = (uint8_t) tot; |
dst[j * stride + i] = (uint8_t) tot; |
118 |
|
|
119 |
for (j = 0; j < 8; j++) { |
for (j = 0; j < 8; j++) { |
120 |
for (i = 0; i < 8; i++) { |
for (i = 0; i < 8; i++) { |
121 |
int16_t tot = |
int32_t tot = |
122 |
src[j * stride + i] + src[j * stride + i + 1] + |
(int32_t)src[j * stride + i] + (int32_t)src[j * stride + i + 1] + |
123 |
src[j * stride + i + stride] + src[j * stride + i + stride + |
(int32_t)src[j * stride + i + stride] + (int32_t)src[j * stride + i + stride + 1]; |
|
1]; |
|
124 |
tot = ((tot + 2 - rounding) >> 2); |
tot = ((tot + 2 - rounding) >> 2); |
125 |
dst[j * stride + i] = (uint8_t) tot; |
dst[j * stride + i] = (uint8_t) tot; |
126 |
} |
} |
127 |
} |
} |
128 |
} |
} |
129 |
|
|
130 |
// add by MinChen <chenm001@163.com> |
/* add by MinChen <chenm001@163.com> */ |
131 |
// interpolate8x8 two pred block |
/* interpolate8x8 two pred block */ |
132 |
void |
void |
133 |
interpolate8x8_c(uint8_t * const dst, |
interpolate8x8_c(uint8_t * const dst, |
134 |
const uint8_t * const src, |
const uint8_t * const src, |
141 |
for (j = 0; j < 8; j++) { |
for (j = 0; j < 8; j++) { |
142 |
for (i = 0; i < 8; i++) { |
for (i = 0; i < 8; i++) { |
143 |
int32_t tot = |
int32_t tot = |
144 |
((src[(y + j) * stride + x + i] + |
(((int32_t)src[(y + j) * stride + x + i] + |
145 |
dst[(y + j) * stride + x + i] + 1) >> 1); |
(int32_t)dst[(y + j) * stride + x + i] + 1) >> 1); |
146 |
dst[(y + j) * stride + x + i] = (uint8_t) tot; |
dst[(y + j) * stride + x + i] = (uint8_t) tot; |
147 |
} |
} |
148 |
} |
} |
257 |
|
|
258 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 8; i++) |
259 |
{ |
{ |
260 |
dst[0] = (src1[0] + src2[0] + (1 - rounding)) >> 1; |
dst[0] = (uint8_t)((src1[0] + src2[0] + (1 - rounding)) >> 1); |
261 |
dst[1] = (src1[1] + src2[1] + (1 - rounding)) >> 1; |
dst[1] = (uint8_t)((src1[1] + src2[1] + (1 - rounding)) >> 1); |
262 |
dst[2] = (src1[2] + src2[2] + (1 - rounding)) >> 1; |
dst[2] = (uint8_t)((src1[2] + src2[2] + (1 - rounding)) >> 1); |
263 |
dst[3] = (src1[3] + src2[3] + (1 - rounding)) >> 1; |
dst[3] = (uint8_t)((src1[3] + src2[3] + (1 - rounding)) >> 1); |
264 |
dst[4] = (src1[4] + src2[4] + (1 - rounding)) >> 1; |
dst[4] = (uint8_t)((src1[4] + src2[4] + (1 - rounding)) >> 1); |
265 |
dst[5] = (src1[5] + src2[5] + (1 - rounding)) >> 1; |
dst[5] = (uint8_t)((src1[5] + src2[5] + (1 - rounding)) >> 1); |
266 |
dst[6] = (src1[6] + src2[6] + (1 - rounding)) >> 1; |
dst[6] = (uint8_t)((src1[6] + src2[6] + (1 - rounding)) >> 1); |
267 |
dst[7] = (src1[7] + src2[7] + (1 - rounding)) >> 1; |
dst[7] = (uint8_t)((src1[7] + src2[7] + (1 - rounding)) >> 1); |
268 |
|
|
269 |
dst += dst_stride; |
dst += dst_stride; |
270 |
src1 += src_stride; |
src1 += src_stride; |
278 |
|
|
279 |
for(i = 0; i < 8; i++) |
for(i = 0; i < 8; i++) |
280 |
{ |
{ |
281 |
dst[0] = (src1[0] + src2[0] + src3[0] + src4[0] + (2 - rounding)) >> 2; |
dst[0] = (uint8_t)((src1[0] + src2[0] + src3[0] + src4[0] + (2 - rounding)) >> 2); |
282 |
dst[1] = (src1[1] + src2[1] + src3[1] + src4[1] + (2 - rounding)) >> 2; |
dst[1] = (uint8_t)((src1[1] + src2[1] + src3[1] + src4[1] + (2 - rounding)) >> 2); |
283 |
dst[2] = (src1[2] + src2[2] + src3[2] + src4[2] + (2 - rounding)) >> 2; |
dst[2] = (uint8_t)((src1[2] + src2[2] + src3[2] + src4[2] + (2 - rounding)) >> 2); |
284 |
dst[3] = (src1[3] + src2[3] + src3[3] + src4[3] + (2 - rounding)) >> 2; |
dst[3] = (uint8_t)((src1[3] + src2[3] + src3[3] + src4[3] + (2 - rounding)) >> 2); |
285 |
dst[4] = (src1[4] + src2[4] + src3[4] + src4[4] + (2 - rounding)) >> 2; |
dst[4] = (uint8_t)((src1[4] + src2[4] + src3[4] + src4[4] + (2 - rounding)) >> 2); |
286 |
dst[5] = (src1[5] + src2[5] + src3[5] + src4[5] + (2 - rounding)) >> 2; |
dst[5] = (uint8_t)((src1[5] + src2[5] + src3[5] + src4[5] + (2 - rounding)) >> 2); |
287 |
dst[6] = (src1[6] + src2[6] + src3[6] + src4[6] + (2 - rounding)) >> 2; |
dst[6] = (uint8_t)((src1[6] + src2[6] + src3[6] + src4[6] + (2 - rounding)) >> 2); |
288 |
dst[7] = (src1[7] + src2[7] + src3[7] + src4[7] + (2 - rounding)) >> 2; |
dst[7] = (uint8_t)((src1[7] + src2[7] + src3[7] + src4[7] + (2 - rounding)) >> 2); |
289 |
|
|
290 |
dst += stride; |
dst += stride; |
291 |
src1 += stride; |
src1 += stride; |