3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* - Native API implementation - |
* - Native API implementation - |
5 |
* |
* |
6 |
* Copyright(C) 2001-2002 Peter Ross <pross@xvid.org> |
* This program is free software ; you can redistribute it and/or modify |
7 |
* |
* it under the terms of the GNU General Public License as published by |
|
* This file is part of XviD, a free MPEG-4 video encoder/decoder |
|
|
* |
|
|
* XviD is free software; you can redistribute it and/or modify it |
|
|
* under the terms of the GNU General Public License as published by |
|
8 |
* the Free Software Foundation; either version 2 of the License, or |
* the Free Software Foundation; either version 2 of the License, or |
9 |
* (at your option) any later version. |
* (at your option) any later version. |
10 |
* |
* |
17 |
* along with this program; if not, write to the Free Software |
* along with this program; if not, write to the Free Software |
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 |
* |
* |
|
* Under section 8 of the GNU General Public License, the copyright |
|
|
* holders of XVID explicitly forbid distribution in the following |
|
|
* countries: |
|
|
* |
|
|
* - Japan |
|
|
* - United States of America |
|
|
* |
|
|
* Linking XviD statically or dynamically with other modules is making a |
|
|
* combined work based on XviD. Thus, the terms and conditions of the |
|
|
* GNU General Public License cover the whole combination. |
|
|
* |
|
|
* As a special exception, the copyright holders of XviD give you |
|
|
* permission to link XviD with independent modules that communicate with |
|
|
* XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
* license terms of these independent modules, and to copy and distribute |
|
|
* the resulting combined work under terms of your choice, provided that |
|
|
* every copy of the combined work is accompanied by a complete copy of |
|
|
* the source code of XviD (the version of XviD used to produce the |
|
|
* combined work), being distributed under the terms of the GNU General |
|
|
* Public License plus this exception. An independent module is a module |
|
|
* which is not derived from or based on XviD. |
|
|
* |
|
|
* Note that people who make modified versions of XviD are not obligated |
|
|
* to grant this special exception for their modified versions; it is |
|
|
* their choice whether to do so. The GNU General Public License gives |
|
|
* permission to release a modified version without this exception; this |
|
|
* exception also makes it possible to release a modified version which |
|
|
* carries forward this exception. |
|
|
* |
|
20 |
* $Id$ |
* $Id$ |
21 |
* |
* |
22 |
****************************************************************************/ |
****************************************************************************/ |
23 |
|
|
24 |
|
#include <stdio.h> |
25 |
|
#include <stdlib.h> |
26 |
|
#include <string.h> |
27 |
|
#include <time.h> |
28 |
|
|
29 |
#include "xvid.h" |
#include "xvid.h" |
30 |
#include "decoder.h" |
#include "decoder.h" |
31 |
#include "encoder.h" |
#include "encoder.h" |
34 |
#include "dct/fdct.h" |
#include "dct/fdct.h" |
35 |
#include "image/colorspace.h" |
#include "image/colorspace.h" |
36 |
#include "image/interpolate8x8.h" |
#include "image/interpolate8x8.h" |
37 |
|
#include "image/reduced.h" |
38 |
#include "utils/mem_transfer.h" |
#include "utils/mem_transfer.h" |
39 |
|
#include "utils/mbfunctions.h" |
40 |
#include "quant/quant_h263.h" |
#include "quant/quant_h263.h" |
41 |
#include "quant/quant_mpeg4.h" |
#include "quant/quant_mpeg4.h" |
42 |
#include "motion/motion.h" |
#include "motion/motion.h" |
45 |
#include "utils/timer.h" |
#include "utils/timer.h" |
46 |
#include "bitstream/mbcoding.h" |
#include "bitstream/mbcoding.h" |
47 |
|
|
48 |
#if defined(ARCH_IS_IA32) && defined(EXPERIMENTAL_SSE2_CODE) |
#if defined(_DEBUG) |
49 |
|
unsigned int xvid_debug = 0; /* xvid debug mask */ |
50 |
|
#endif |
51 |
|
|
52 |
#ifdef _MSC_VER |
#if defined(ARCH_IS_IA32) |
53 |
|
|
54 |
|
#if defined(_MSC_VER) |
55 |
#include <windows.h> |
#include <windows.h> |
56 |
#else |
#else |
57 |
#include <signal.h> |
#include <signal.h> |
58 |
#include <setjmp.h> |
#include <setjmp.h> |
|
#endif |
|
|
|
|
|
|
|
|
#ifndef _MSC_VER |
|
59 |
|
|
60 |
static jmp_buf mark; |
static jmp_buf mark; |
61 |
|
|
68 |
|
|
69 |
|
|
70 |
/* |
/* |
71 |
* Calls the funcptr, and returns whether SIGILL (illegal instruction) was signalled |
* Calls the funcptr, and returns whether SIGILL (illegal instruction) was |
72 |
|
* signalled |
73 |
|
* |
74 |
* Return values: |
* Return values: |
75 |
* -1 : could not determine |
* -1 : could not determine |
76 |
* 0 : SIGILL was *not* signalled |
* 0 : SIGILL was *not* signalled |
80 |
int |
int |
81 |
sigill_check(void (*func)()) |
sigill_check(void (*func)()) |
82 |
{ |
{ |
83 |
#ifdef _MSC_VER |
#if defined(_MSC_VER) |
84 |
_try { |
_try { |
85 |
func(); |
func(); |
86 |
} |
} |
114 |
} |
} |
115 |
#endif |
#endif |
116 |
|
|
117 |
|
|
118 |
|
/* detect cpu flags */ |
119 |
|
static unsigned int |
120 |
|
detect_cpu_flags() |
121 |
|
{ |
122 |
|
/* enable native assembly optimizations by default */ |
123 |
|
unsigned int cpu_flags = XVID_CPU_ASM; |
124 |
|
|
125 |
|
#if defined(ARCH_IS_IA32) |
126 |
|
cpu_flags |= check_cpu_features(); |
127 |
|
if ((cpu_flags & XVID_CPU_SSE) && sigill_check(sse_os_trigger)) |
128 |
|
cpu_flags &= ~XVID_CPU_SSE; |
129 |
|
|
130 |
|
if ((cpu_flags & XVID_CPU_SSE2) && sigill_check(sse2_os_trigger)) |
131 |
|
cpu_flags &= ~XVID_CPU_SSE2; |
132 |
|
#endif |
133 |
|
|
134 |
|
#if defined(ARCH_IS_PPC) |
135 |
|
#if defined(ARCH_IS_PPC_ALTIVEC) |
136 |
|
cpu_flags |= XVID_CPU_ALTIVEC; |
137 |
|
#endif |
138 |
|
#endif |
139 |
|
|
140 |
|
return cpu_flags; |
141 |
|
} |
142 |
|
|
143 |
|
|
144 |
/***************************************************************************** |
/***************************************************************************** |
145 |
* XviD Init Entry point |
* XviD Init Entry point |
146 |
* |
* |
155 |
* |
* |
156 |
****************************************************************************/ |
****************************************************************************/ |
157 |
|
|
|
int |
|
|
xvid_init(void *handle, |
|
|
int opt, |
|
|
void *param1, |
|
|
void *param2) |
|
|
{ |
|
|
int cpu_flags; |
|
|
XVID_INIT_PARAM *init_param; |
|
|
|
|
|
init_param = (XVID_INIT_PARAM *) param1; |
|
|
|
|
|
/* Inform the client the API version */ |
|
|
init_param->api_version = API_VERSION; |
|
158 |
|
|
159 |
/* Inform the client the core build - unused because we're still alpha */ |
static |
160 |
init_param->core_build = 1000; |
int xvid_gbl_init(xvid_gbl_init_t * init) |
|
|
|
|
/* Do we have to force CPU features ? */ |
|
|
if ((init_param->cpu_flags & XVID_CPU_FORCE)) { |
|
|
|
|
|
cpu_flags = init_param->cpu_flags; |
|
|
|
|
|
} else { |
|
|
|
|
|
cpu_flags = check_cpu_features(); |
|
|
|
|
|
#if defined(ARCH_IS_IA32) && defined(EXPERIMENTAL_SSE2_CODE) |
|
|
if ((cpu_flags & XVID_CPU_SSE) && sigill_check(sse_os_trigger)) |
|
|
cpu_flags &= ~XVID_CPU_SSE; |
|
|
|
|
|
if ((cpu_flags & XVID_CPU_SSE2) && sigill_check(sse2_os_trigger)) |
|
|
cpu_flags &= ~XVID_CPU_SSE2; |
|
|
#endif |
|
|
} |
|
|
|
|
|
if ((init_param->cpu_flags & XVID_CPU_CHKONLY)) |
|
161 |
{ |
{ |
162 |
init_param->cpu_flags = cpu_flags; |
unsigned int cpu_flags; |
|
return XVID_ERR_OK; |
|
|
} |
|
163 |
|
|
164 |
init_param->cpu_flags = cpu_flags; |
if (XVID_MAJOR(init->version) != 1) /* v1.x.x */ |
165 |
|
return XVID_ERR_VERSION; |
166 |
|
|
167 |
|
cpu_flags = (init->cpu_flags & XVID_CPU_FORCE) ? init->cpu_flags : detect_cpu_flags(); |
168 |
|
|
169 |
/* Initialize the function pointers */ |
/* Initialize the function pointers */ |
170 |
idct_int32_init(); |
idct_int32_init(); |
195 |
transfer_8to16copy = transfer_8to16copy_c; |
transfer_8to16copy = transfer_8to16copy_c; |
196 |
transfer_16to8copy = transfer_16to8copy_c; |
transfer_16to8copy = transfer_16to8copy_c; |
197 |
transfer_8to16sub = transfer_8to16sub_c; |
transfer_8to16sub = transfer_8to16sub_c; |
198 |
|
transfer_8to16subro = transfer_8to16subro_c; |
199 |
transfer_8to16sub2 = transfer_8to16sub2_c; |
transfer_8to16sub2 = transfer_8to16sub2_c; |
200 |
transfer_16to8add = transfer_16to8add_c; |
transfer_16to8add = transfer_16to8add_c; |
201 |
transfer8x8_copy = transfer8x8_copy_c; |
transfer8x8_copy = transfer8x8_copy_c; |
202 |
|
|
203 |
|
/* Interlacing functions */ |
204 |
|
MBFieldTest = MBFieldTest_c; |
205 |
|
|
206 |
/* Image interpolation related functions */ |
/* Image interpolation related functions */ |
207 |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_c; |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_c; |
208 |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_c; |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_c; |
209 |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_c; |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_c; |
210 |
|
|
211 |
|
interpolate16x16_lowpass_h = interpolate16x16_lowpass_h_c; |
212 |
|
interpolate16x16_lowpass_v = interpolate16x16_lowpass_v_c; |
213 |
|
interpolate16x16_lowpass_hv = interpolate16x16_lowpass_hv_c; |
214 |
|
|
215 |
|
interpolate8x8_lowpass_h = interpolate8x8_lowpass_h_c; |
216 |
|
interpolate8x8_lowpass_v = interpolate8x8_lowpass_v_c; |
217 |
|
interpolate8x8_lowpass_hv = interpolate8x8_lowpass_hv_c; |
218 |
|
|
219 |
|
interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_c; |
220 |
|
interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_c; |
221 |
|
|
222 |
|
interpolate8x8_avg2 = interpolate8x8_avg2_c; |
223 |
|
interpolate8x8_avg4 = interpolate8x8_avg4_c; |
224 |
|
|
225 |
|
/* reduced resoltuion */ |
226 |
|
copy_upsampled_8x8_16to8 = xvid_Copy_Upsampled_8x8_16To8_C; |
227 |
|
add_upsampled_8x8_16to8 = xvid_Add_Upsampled_8x8_16To8_C; |
228 |
|
vfilter_31 = xvid_VFilter_31_C; |
229 |
|
hfilter_31 = xvid_HFilter_31_C; |
230 |
|
filter_18x18_to_8x8 = xvid_Filter_18x18_To_8x8_C; |
231 |
|
filter_diff_18x18_to_8x8 = xvid_Filter_Diff_18x18_To_8x8_C; |
232 |
|
|
233 |
/* Initialize internal colorspace transformation tables */ |
/* Initialize internal colorspace transformation tables */ |
234 |
colorspace_init(); |
colorspace_init(); |
235 |
|
|
236 |
/* All colorspace transformation functions User Format->YV12 */ |
/* All colorspace transformation functions User Format->YV12 */ |
237 |
|
yv12_to_yv12 = yv12_to_yv12_c; |
238 |
rgb555_to_yv12 = rgb555_to_yv12_c; |
rgb555_to_yv12 = rgb555_to_yv12_c; |
239 |
rgb565_to_yv12 = rgb565_to_yv12_c; |
rgb565_to_yv12 = rgb565_to_yv12_c; |
240 |
rgb24_to_yv12 = rgb24_to_yv12_c; |
bgr_to_yv12 = bgr_to_yv12_c; |
241 |
rgb32_to_yv12 = rgb32_to_yv12_c; |
bgra_to_yv12 = bgra_to_yv12_c; |
242 |
yuv_to_yv12 = yuv_to_yv12_c; |
abgr_to_yv12 = abgr_to_yv12_c; |
243 |
|
rgba_to_yv12 = rgba_to_yv12_c; |
244 |
yuyv_to_yv12 = yuyv_to_yv12_c; |
yuyv_to_yv12 = yuyv_to_yv12_c; |
245 |
uyvy_to_yv12 = uyvy_to_yv12_c; |
uyvy_to_yv12 = uyvy_to_yv12_c; |
246 |
|
|
247 |
|
rgb555i_to_yv12 = rgb555i_to_yv12_c; |
248 |
|
rgb565i_to_yv12 = rgb565i_to_yv12_c; |
249 |
|
bgri_to_yv12 = bgri_to_yv12_c; |
250 |
|
bgrai_to_yv12 = bgrai_to_yv12_c; |
251 |
|
abgri_to_yv12 = abgri_to_yv12_c; |
252 |
|
rgbai_to_yv12 = rgbai_to_yv12_c; |
253 |
|
yuyvi_to_yv12 = yuyvi_to_yv12_c; |
254 |
|
uyvyi_to_yv12 = uyvyi_to_yv12_c; |
255 |
|
|
256 |
|
|
257 |
/* All colorspace transformation functions YV12->User format */ |
/* All colorspace transformation functions YV12->User format */ |
258 |
yv12_to_rgb555 = yv12_to_rgb555_c; |
yv12_to_rgb555 = yv12_to_rgb555_c; |
259 |
yv12_to_rgb565 = yv12_to_rgb565_c; |
yv12_to_rgb565 = yv12_to_rgb565_c; |
260 |
yv12_to_rgb24 = yv12_to_rgb24_c; |
yv12_to_bgr = yv12_to_bgr_c; |
261 |
yv12_to_rgb32 = yv12_to_rgb32_c; |
yv12_to_bgra = yv12_to_bgra_c; |
262 |
yv12_to_yuv = yv12_to_yuv_c; |
yv12_to_abgr = yv12_to_abgr_c; |
263 |
|
yv12_to_rgba = yv12_to_rgba_c; |
264 |
yv12_to_yuyv = yv12_to_yuyv_c; |
yv12_to_yuyv = yv12_to_yuyv_c; |
265 |
yv12_to_uyvy = yv12_to_uyvy_c; |
yv12_to_uyvy = yv12_to_uyvy_c; |
266 |
|
|
267 |
|
yv12_to_rgb555i = yv12_to_rgb555i_c; |
268 |
|
yv12_to_rgb565i = yv12_to_rgb565i_c; |
269 |
|
yv12_to_bgri = yv12_to_bgri_c; |
270 |
|
yv12_to_bgrai = yv12_to_bgrai_c; |
271 |
|
yv12_to_abgri = yv12_to_abgri_c; |
272 |
|
yv12_to_rgbai = yv12_to_rgbai_c; |
273 |
|
yv12_to_yuyvi = yv12_to_yuyvi_c; |
274 |
|
yv12_to_uyvyi = yv12_to_uyvyi_c; |
275 |
|
|
276 |
/* Functions used in motion estimation algorithms */ |
/* Functions used in motion estimation algorithms */ |
277 |
calc_cbp = calc_cbp_c; |
calc_cbp = calc_cbp_c; |
278 |
sad16 = sad16_c; |
sad16 = sad16_c; |
280 |
sad16bi = sad16bi_c; |
sad16bi = sad16bi_c; |
281 |
sad8bi = sad8bi_c; |
sad8bi = sad8bi_c; |
282 |
dev16 = dev16_c; |
dev16 = dev16_c; |
283 |
|
sad16v = sad16v_c; |
284 |
|
|
285 |
|
/* Halfpel8_Refine = Halfpel8_Refine_c; */ |
286 |
|
|
287 |
|
#if defined(ARCH_IS_IA32) |
288 |
|
|
289 |
|
if ((cpu_flags & XVID_CPU_ASM)) |
290 |
|
{ |
291 |
|
vfilter_31 = xvid_VFilter_31_x86; |
292 |
|
hfilter_31 = xvid_HFilter_31_x86; |
293 |
|
} |
294 |
|
|
295 |
Halfpel8_Refine = Halfpel8_Refine_c; |
if ((cpu_flags & XVID_CPU_MMX) || (cpu_flags & XVID_CPU_MMXEXT) || |
296 |
|
(cpu_flags & XVID_CPU_3DNOW) || (cpu_flags & XVID_CPU_3DNOWEXT) || |
297 |
|
(cpu_flags & XVID_CPU_SSE) || (cpu_flags & XVID_CPU_SSE2)) |
298 |
|
{ |
299 |
|
/* Restore FPU context : emms_c is a nop functions */ |
300 |
|
emms = emms_mmx; |
301 |
|
} |
302 |
|
|
303 |
#ifdef ARCH_IS_IA32 |
if ((cpu_flags & XVID_CPU_MMX)) { |
|
if ((cpu_flags & XVID_CPU_MMX) > 0) { |
|
304 |
|
|
305 |
/* Forward and Inverse Discrete Cosine Transformation functions */ |
/* Forward and Inverse Discrete Cosine Transformation functions */ |
306 |
fdct = fdct_mmx; |
fdct = fdct_mmx; |
307 |
idct = idct_mmx; |
idct = idct_mmx; |
308 |
|
|
|
/* To restore FPU context after mmx use */ |
|
|
emms = emms_mmx; |
|
|
|
|
309 |
/* Quantization related functions */ |
/* Quantization related functions */ |
310 |
quant_intra = quant_intra_mmx; |
quant_intra = quant_intra_mmx; |
311 |
dequant_intra = dequant_intra_mmx; |
dequant_intra = dequant_intra_mmx; |
321 |
transfer_8to16copy = transfer_8to16copy_mmx; |
transfer_8to16copy = transfer_8to16copy_mmx; |
322 |
transfer_16to8copy = transfer_16to8copy_mmx; |
transfer_16to8copy = transfer_16to8copy_mmx; |
323 |
transfer_8to16sub = transfer_8to16sub_mmx; |
transfer_8to16sub = transfer_8to16sub_mmx; |
324 |
|
transfer_8to16subro = transfer_8to16subro_mmx; |
325 |
transfer_8to16sub2 = transfer_8to16sub2_mmx; |
transfer_8to16sub2 = transfer_8to16sub2_mmx; |
326 |
transfer_16to8add = transfer_16to8add_mmx; |
transfer_16to8add = transfer_16to8add_mmx; |
327 |
transfer8x8_copy = transfer8x8_copy_mmx; |
transfer8x8_copy = transfer8x8_copy_mmx; |
328 |
|
|
329 |
|
/* Interlacing Functions */ |
330 |
|
MBFieldTest = MBFieldTest_mmx; |
331 |
|
|
332 |
/* Image Interpolation related functions */ |
/* Image Interpolation related functions */ |
333 |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_mmx; |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_mmx; |
334 |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_mmx; |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_mmx; |
335 |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_mmx; |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_mmx; |
336 |
|
|
337 |
/* Image RGB->YV12 related functions */ |
interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_mmx; |
338 |
rgb24_to_yv12 = rgb24_to_yv12_mmx; |
interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_mmx; |
339 |
rgb32_to_yv12 = rgb32_to_yv12_mmx; |
|
340 |
yuv_to_yv12 = yuv_to_yv12_mmx; |
interpolate8x8_avg2 = interpolate8x8_avg2_mmx; |
341 |
|
interpolate8x8_avg4 = interpolate8x8_avg4_mmx; |
342 |
|
|
343 |
|
/* reduced resolution */ |
344 |
|
copy_upsampled_8x8_16to8 = xvid_Copy_Upsampled_8x8_16To8_mmx; |
345 |
|
add_upsampled_8x8_16to8 = xvid_Add_Upsampled_8x8_16To8_mmx; |
346 |
|
hfilter_31 = xvid_HFilter_31_mmx; |
347 |
|
filter_18x18_to_8x8 = xvid_Filter_18x18_To_8x8_mmx; |
348 |
|
filter_diff_18x18_to_8x8 = xvid_Filter_Diff_18x18_To_8x8_mmx; |
349 |
|
|
350 |
|
/* image input xxx_to_yv12 related functions */ |
351 |
|
yv12_to_yv12 = yv12_to_yv12_mmx; |
352 |
|
bgr_to_yv12 = bgr_to_yv12_mmx; |
353 |
|
bgra_to_yv12 = bgra_to_yv12_mmx; |
354 |
yuyv_to_yv12 = yuyv_to_yv12_mmx; |
yuyv_to_yv12 = yuyv_to_yv12_mmx; |
355 |
uyvy_to_yv12 = uyvy_to_yv12_mmx; |
uyvy_to_yv12 = uyvy_to_yv12_mmx; |
356 |
|
|
357 |
/* Image YV12->RGB related functions */ |
/* image output yv12_to_xxx related functions */ |
358 |
yv12_to_rgb24 = yv12_to_rgb24_mmx; |
yv12_to_bgr = yv12_to_bgr_mmx; |
359 |
yv12_to_rgb32 = yv12_to_rgb32_mmx; |
yv12_to_bgra = yv12_to_bgra_mmx; |
360 |
yv12_to_yuyv = yv12_to_yuyv_mmx; |
yv12_to_yuyv = yv12_to_yuyv_mmx; |
361 |
yv12_to_uyvy = yv12_to_uyvy_mmx; |
yv12_to_uyvy = yv12_to_uyvy_mmx; |
362 |
|
|
363 |
|
yv12_to_yuyvi = yv12_to_yuyvi_mmx; |
364 |
|
yv12_to_uyvyi = yv12_to_uyvyi_mmx; |
365 |
|
|
366 |
/* Motion estimation related functions */ |
/* Motion estimation related functions */ |
367 |
calc_cbp = calc_cbp_mmx; |
calc_cbp = calc_cbp_mmx; |
368 |
sad16 = sad16_mmx; |
sad16 = sad16_mmx; |
370 |
sad16bi = sad16bi_mmx; |
sad16bi = sad16bi_mmx; |
371 |
sad8bi = sad8bi_mmx; |
sad8bi = sad8bi_mmx; |
372 |
dev16 = dev16_mmx; |
dev16 = dev16_mmx; |
373 |
|
sad16v = sad16v_mmx; |
374 |
} |
} |
375 |
|
|
376 |
/* these 3dnow functions are faster than mmx, but slower than xmm. */ |
/* these 3dnow functions are faster than mmx, but slower than xmm. */ |
377 |
if ((cpu_flags & XVID_CPU_3DNOW) > 0) { |
if ((cpu_flags & XVID_CPU_3DNOW)) { |
378 |
|
|
379 |
|
emms = emms_3dn; |
380 |
|
|
381 |
/* ME functions */ |
/* ME functions */ |
382 |
sad16bi = sad16bi_3dn; |
sad16bi = sad16bi_3dn; |
383 |
sad8bi = sad8bi_3dn; |
sad8bi = sad8bi_3dn; |
384 |
|
|
385 |
|
yuyv_to_yv12 = yuyv_to_yv12_3dn; |
386 |
|
uyvy_to_yv12 = uyvy_to_yv12_3dn; |
387 |
} |
} |
388 |
|
|
389 |
|
|
390 |
if ((cpu_flags & XVID_CPU_MMXEXT) > 0) { |
if ((cpu_flags & XVID_CPU_MMXEXT)) { |
391 |
|
|
392 |
/* Inverse DCT */ |
/* Inverse DCT */ |
393 |
idct = idct_xmm; |
idct = idct_xmm; |
397 |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_xmm; |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_xmm; |
398 |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_xmm; |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_xmm; |
399 |
|
|
400 |
|
/* reduced resolution */ |
401 |
|
copy_upsampled_8x8_16to8 = xvid_Copy_Upsampled_8x8_16To8_xmm; |
402 |
|
add_upsampled_8x8_16to8 = xvid_Add_Upsampled_8x8_16To8_xmm; |
403 |
|
|
404 |
/* Quantization */ |
/* Quantization */ |
405 |
|
quant4_intra = quant4_intra_xmm; |
406 |
|
quant4_inter = quant4_inter_xmm; |
407 |
|
|
408 |
dequant_intra = dequant_intra_xmm; |
dequant_intra = dequant_intra_xmm; |
409 |
dequant_inter = dequant_inter_xmm; |
dequant_inter = dequant_inter_xmm; |
410 |
|
|
412 |
transfer_8to16sub2 = transfer_8to16sub2_xmm; |
transfer_8to16sub2 = transfer_8to16sub2_xmm; |
413 |
|
|
414 |
/* Colorspace transformation */ |
/* Colorspace transformation */ |
415 |
yuv_to_yv12 = yuv_to_yv12_xmm; |
yv12_to_yv12 = yv12_to_yv12_xmm; |
416 |
|
yuyv_to_yv12 = yuyv_to_yv12_xmm; |
417 |
|
uyvy_to_yv12 = uyvy_to_yv12_xmm; |
418 |
|
|
419 |
/* ME functions */ |
/* ME functions */ |
420 |
sad16 = sad16_xmm; |
sad16 = sad16_xmm; |
422 |
sad16bi = sad16bi_xmm; |
sad16bi = sad16bi_xmm; |
423 |
sad8bi = sad8bi_xmm; |
sad8bi = sad8bi_xmm; |
424 |
dev16 = dev16_xmm; |
dev16 = dev16_xmm; |
425 |
|
sad16v = sad16v_xmm; |
426 |
} |
} |
427 |
|
|
428 |
if ((cpu_flags & XVID_CPU_3DNOW) > 0) { |
if ((cpu_flags & XVID_CPU_3DNOW)) { |
429 |
|
|
430 |
/* Interpolation */ |
/* Interpolation */ |
431 |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dn; |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dn; |
433 |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dn; |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dn; |
434 |
} |
} |
435 |
|
|
436 |
if ((cpu_flags & XVID_CPU_SSE2) > 0) { |
if ((cpu_flags & XVID_CPU_3DNOWEXT)) { |
437 |
#ifdef EXPERIMENTAL_SSE2_CODE |
|
438 |
|
/* Inverse DCT */ |
439 |
|
idct = idct_3dne; |
440 |
|
|
441 |
|
/* Buffer transfer */ |
442 |
|
transfer_8to16copy = transfer_8to16copy_3dne; |
443 |
|
transfer_16to8copy = transfer_16to8copy_3dne; |
444 |
|
transfer_8to16sub = transfer_8to16sub_3dne; |
445 |
|
transfer_8to16subro = transfer_8to16subro_3dne; |
446 |
|
transfer_8to16sub2 = transfer_8to16sub2_3dne; |
447 |
|
transfer_16to8add = transfer_16to8add_3dne; |
448 |
|
transfer8x8_copy = transfer8x8_copy_3dne; |
449 |
|
|
450 |
|
/* Quantization */ |
451 |
|
dequant4_intra = dequant4_intra_3dne; |
452 |
|
dequant4_inter = dequant4_inter_3dne; |
453 |
|
quant_intra = quant_intra_3dne; |
454 |
|
quant_inter = quant_inter_3dne; |
455 |
|
dequant_intra = dequant_intra_3dne; |
456 |
|
dequant_inter = dequant_inter_3dne; |
457 |
|
|
458 |
|
/* ME functions */ |
459 |
|
calc_cbp = calc_cbp_3dne; |
460 |
|
sad16 = sad16_3dne; |
461 |
|
sad8 = sad8_3dne; |
462 |
|
sad16bi = sad16bi_3dne; |
463 |
|
sad8bi = sad8bi_3dne; |
464 |
|
dev16 = dev16_3dne; |
465 |
|
|
466 |
|
/* Interpolation */ |
467 |
|
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dne; |
468 |
|
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_3dne; |
469 |
|
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dne; |
470 |
|
} |
471 |
|
|
472 |
|
|
473 |
|
if ((cpu_flags & XVID_CPU_SSE2)) { |
474 |
|
|
475 |
calc_cbp = calc_cbp_sse2; |
calc_cbp = calc_cbp_sse2; |
476 |
|
|
480 |
quant_inter = quant_inter_sse2; |
quant_inter = quant_inter_sse2; |
481 |
dequant_inter = dequant_inter_sse2; |
dequant_inter = dequant_inter_sse2; |
482 |
|
|
483 |
/* ME */ |
#if defined(EXPERIMENTAL_SSE2_CODE) |
484 |
|
/* ME; slower than xmm */ |
485 |
sad16 = sad16_sse2; |
sad16 = sad16_sse2; |
486 |
dev16 = dev16_sse2; |
dev16 = dev16_sse2; |
487 |
|
#endif |
488 |
/* Forward and Inverse DCT */ |
/* Forward and Inverse DCT */ |
489 |
idct = idct_sse2; |
idct = idct_sse2; |
490 |
fdct = fdct_sse2; |
fdct = fdct_sse2; |
|
#endif |
|
491 |
} |
} |
|
|
|
492 |
#endif |
#endif |
493 |
|
|
494 |
#ifdef ARCH_IS_IA64 |
#if defined(ARCH_IS_IA64) |
495 |
if ((cpu_flags & XVID_CPU_IA64) > 0) { //use assembler routines? |
if ((cpu_flags & XVID_CPU_ASM)) { /* use assembler routines? */ |
496 |
idct_ia64_init(); |
idct_ia64_init(); |
497 |
fdct = fdct_ia64; |
fdct = fdct_ia64; |
498 |
idct = idct_ia64; |
idct = idct_ia64; /*not yet working, crashes */ |
499 |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_ia64; |
interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_ia64; |
500 |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_ia64; |
interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_ia64; |
501 |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_ia64; |
interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_ia64; |
503 |
sad16bi = sad16bi_ia64; |
sad16bi = sad16bi_ia64; |
504 |
sad8 = sad8_ia64; |
sad8 = sad8_ia64; |
505 |
dev16 = dev16_ia64; |
dev16 = dev16_ia64; |
506 |
Halfpel8_Refine = Halfpel8_Refine_ia64; |
/* Halfpel8_Refine = Halfpel8_Refine_ia64; */ |
507 |
quant_intra = quant_intra_ia64; |
quant_intra = quant_intra_ia64; |
508 |
dequant_intra = dequant_intra_ia64; |
dequant_intra = dequant_intra_ia64; |
509 |
quant_inter = quant_inter_ia64; |
quant_inter = quant_inter_ia64; |
514 |
transfer_8to16sub2 = transfer_8to16sub2_ia64; |
transfer_8to16sub2 = transfer_8to16sub2_ia64; |
515 |
transfer_16to8add = transfer_16to8add_ia64; |
transfer_16to8add = transfer_16to8add_ia64; |
516 |
transfer8x8_copy = transfer8x8_copy_ia64; |
transfer8x8_copy = transfer8x8_copy_ia64; |
517 |
// DEBUG("Using IA-64 assembler routines.\n"); |
DPRINTF(DPRINTF_DEBUG, "Using IA-64 assembler routines."); |
518 |
} |
} |
519 |
#endif |
#endif |
520 |
|
|
521 |
#ifdef ARCH_IS_PPC |
#if defined(ARCH_IS_PPC) |
522 |
#ifdef ARCH_IS_PPC_ALTIVEC |
if ((cpu_flags & XVID_CPU_ASM)) |
523 |
|
{ |
524 |
|
calc_cbp = calc_cbp_ppc; |
525 |
|
} |
526 |
|
|
527 |
|
if ((cpu_flags & XVID_CPU_ALTIVEC)) |
528 |
|
{ |
529 |
calc_cbp = calc_cbp_altivec; |
calc_cbp = calc_cbp_altivec; |
530 |
fdct = fdct_altivec; |
fdct = fdct_altivec; |
531 |
idct = idct_altivec; |
idct = idct_altivec; |
533 |
sad16 = sad16_altivec; |
sad16 = sad16_altivec; |
534 |
sad8 = sad8_altivec; |
sad8 = sad8_altivec; |
535 |
dev16 = dev16_altivec; |
dev16 = dev16_altivec; |
536 |
|
} |
537 |
|
#endif |
538 |
|
|
539 |
|
#if defined(_DEBUG) |
540 |
|
xvid_debug = init->debug; |
541 |
|
#endif |
542 |
|
|
543 |
|
return 0; |
544 |
|
} |
545 |
|
|
546 |
|
|
547 |
|
static int |
548 |
|
xvid_gbl_info(xvid_gbl_info_t * info) |
549 |
|
{ |
550 |
|
if (XVID_MAJOR(info->version) != 1) /* v1.x.x */ |
551 |
|
return XVID_ERR_VERSION; |
552 |
|
|
553 |
|
info->actual_version = XVID_VERSION; |
554 |
|
info->build = "dev-api-4"; |
555 |
|
info->cpu_flags = detect_cpu_flags(); |
556 |
|
|
557 |
|
#if defined(_SMP) && defined(WIN32) |
558 |
|
info->num_threads = pthread_num_processors_np();; |
559 |
#else |
#else |
560 |
calc_cbp = calc_cbp_ppc; |
info->num_threads = 0; |
561 |
|
#endif |
562 |
|
|
563 |
|
return 0; |
564 |
|
} |
565 |
|
|
566 |
|
|
567 |
|
static int |
568 |
|
xvid_gbl_convert(xvid_gbl_convert_t* convert) |
569 |
|
{ |
570 |
|
int width; |
571 |
|
int height; |
572 |
|
int width2; |
573 |
|
int height2; |
574 |
|
IMAGE img; |
575 |
|
|
576 |
|
if (XVID_MAJOR(convert->version) != 1) /* v1.x.x */ |
577 |
|
return XVID_ERR_VERSION; |
578 |
|
|
579 |
|
// const int flip1 = (convert->input.colorspace & XVID_CSP_VFLIP) ^ (convert->output.colorspace & XVID_CSP_VFLIP); |
580 |
|
width = convert->width; |
581 |
|
height = convert->height; |
582 |
|
width2 = convert->width/2; |
583 |
|
height2 = convert->height/2; |
584 |
|
|
585 |
|
switch (convert->input.csp & ~XVID_CSP_VFLIP) |
586 |
|
{ |
587 |
|
case XVID_CSP_YV12 : |
588 |
|
img.y = convert->input.plane[0]; |
589 |
|
img.v = (uint8_t*)convert->input.plane[0] + convert->input.stride[0]*height; |
590 |
|
img.u = (uint8_t*)convert->input.plane[0] + convert->input.stride[0]*height + (convert->input.stride[0]/2)*height2; |
591 |
|
image_output(&img, width, height, width, |
592 |
|
(uint8_t**)convert->output.plane, convert->output.stride, |
593 |
|
convert->output.csp, convert->interlacing); |
594 |
|
break; |
595 |
|
|
596 |
|
default : |
597 |
|
return XVID_ERR_FORMAT; |
598 |
|
} |
599 |
|
|
600 |
|
|
601 |
|
emms(); |
602 |
|
return 0; |
603 |
|
} |
604 |
|
|
605 |
|
|
606 |
|
|
607 |
|
void fill8(uint8_t * block, int size, int value) |
608 |
|
{ |
609 |
|
int i; |
610 |
|
for (i = 0; i < size; i++) |
611 |
|
block[i] = value; |
612 |
|
} |
613 |
|
|
614 |
|
void fill16(int16_t * block, int size, int value) |
615 |
|
{ |
616 |
|
int i; |
617 |
|
for (i = 0; i < size; i++) |
618 |
|
block[i] = value; |
619 |
|
} |
620 |
|
|
621 |
|
#define RANDOM(min,max) min + (rand() % (max-min)) |
622 |
|
|
623 |
|
void random8(uint8_t * block, int size, int min, int max) |
624 |
|
{ |
625 |
|
int i; |
626 |
|
for (i = 0; i < size; i++) |
627 |
|
block[i] = RANDOM(min,max); |
628 |
|
} |
629 |
|
|
630 |
|
void random16(int16_t * block, int size, int min, int max) |
631 |
|
{ |
632 |
|
int i; |
633 |
|
for (i = 0; i < size; i++) |
634 |
|
block[i] = RANDOM(min,max); |
635 |
|
} |
636 |
|
|
637 |
|
int compare16(const int16_t * blockA, const int16_t * blockB, int size) |
638 |
|
{ |
639 |
|
int i; |
640 |
|
for (i = 0; i < size; i++) |
641 |
|
if (blockA[i] != blockB[i]) |
642 |
|
return 1; |
643 |
|
|
644 |
|
return 0; |
645 |
|
} |
646 |
|
|
647 |
|
int diff16(const int16_t * blockA, const int16_t * blockB, int size) |
648 |
|
{ |
649 |
|
int i, diff = 0; |
650 |
|
for (i = 0; i < size; i++) |
651 |
|
diff += abs(blockA[i]-blockB[i]); |
652 |
|
return diff; |
653 |
|
} |
654 |
|
|
655 |
|
|
656 |
|
#define XVID_TEST_RANDOM 0x00000001 /* random input data */ |
657 |
|
#define XVID_TEST_VERBOSE 0x00000002 /* verbose error output */ |
658 |
|
|
659 |
|
|
660 |
|
#define TEST_FORWARD 0x00000001 /* intra */ |
661 |
|
#define TEST_FDCT (TEST_FORWARD) |
662 |
|
#define TEST_IDCT (0) |
663 |
|
|
664 |
|
static int test_transform(void * funcA, void * funcB, const char * nameB, |
665 |
|
int test, int flags) |
666 |
|
{ |
667 |
|
int i; |
668 |
|
int64_t timeSTART; |
669 |
|
int64_t timeA = 0; |
670 |
|
int64_t timeB = 0; |
671 |
|
DECLARE_ALIGNED_MATRIX(arrayA, 1, 64, int16_t, CACHE_LINE); |
672 |
|
DECLARE_ALIGNED_MATRIX(arrayB, 1, 64, int16_t, CACHE_LINE); |
673 |
|
int min, max; |
674 |
|
int count = 0; |
675 |
|
|
676 |
|
int tmp; |
677 |
|
int min_error = 0x10000*64; |
678 |
|
int max_error = 0; |
679 |
|
|
680 |
|
|
681 |
|
if ((test & TEST_FORWARD)) /* forward */ |
682 |
|
{ |
683 |
|
min = -256; |
684 |
|
max = 255; |
685 |
|
}else{ /* inverse */ |
686 |
|
min = -2048; |
687 |
|
max = 2047; |
688 |
|
} |
689 |
|
|
690 |
|
for (i = 0; i < 64*64; i++) |
691 |
|
{ |
692 |
|
if ((flags & XVID_TEST_RANDOM)) |
693 |
|
{ |
694 |
|
random16(arrayA, 64, min, max); |
695 |
|
}else{ |
696 |
|
fill16(arrayA, 64, i); |
697 |
|
} |
698 |
|
memcpy(arrayB, arrayA, 64*sizeof(int16_t)); |
699 |
|
|
700 |
|
if ((test & TEST_FORWARD)) |
701 |
|
{ |
702 |
|
timeSTART = read_counter(); |
703 |
|
((fdctFunc*)funcA)(arrayA); |
704 |
|
timeA += read_counter() - timeSTART; |
705 |
|
|
706 |
|
timeSTART = read_counter(); |
707 |
|
((fdctFunc*)funcB)(arrayB); |
708 |
|
timeB += read_counter() - timeSTART; |
709 |
|
} |
710 |
|
else |
711 |
|
{ |
712 |
|
timeSTART = read_counter(); |
713 |
|
((idctFunc*)funcA)(arrayA); |
714 |
|
timeA += read_counter() - timeSTART; |
715 |
|
|
716 |
|
timeSTART = read_counter(); |
717 |
|
((idctFunc*)funcB)(arrayB); |
718 |
|
timeB += read_counter() - timeSTART; |
719 |
|
} |
720 |
|
|
721 |
|
tmp = diff16(arrayA, arrayB, 64) / 64; |
722 |
|
if (tmp > max_error) |
723 |
|
max_error = tmp; |
724 |
|
if (tmp < min_error) |
725 |
|
min_error = tmp; |
726 |
|
|
727 |
|
count++; |
728 |
|
} |
729 |
|
|
730 |
|
/* print the "average difference" of best/worst transforms */ |
731 |
|
printf("%s:\t%i\t(min_error:%i, max_error:%i)\n", nameB, (int)(timeB / count), min_error, max_error); |
732 |
|
|
733 |
|
return 0; |
734 |
|
} |
735 |
|
|
736 |
|
|
737 |
|
#define TEST_QUANT 0x00000001 /* forward quantization */ |
738 |
|
#define TEST_INTRA 0x00000002 /* intra */ |
739 |
|
#define TEST_QUANT_INTRA (TEST_QUANT|TEST_INTRA) |
740 |
|
#define TEST_QUANT_INTER (TEST_QUANT) |
741 |
|
#define TEST_DEQUANT_INTRA (TEST_INTRA) |
742 |
|
#define TEST_DEQUANT_INTER (0) |
743 |
|
|
744 |
|
static int test_quant(void * funcA, void * funcB, const char * nameB, |
745 |
|
int test, int flags) |
746 |
|
{ |
747 |
|
int q,i; |
748 |
|
int64_t timeSTART; |
749 |
|
int64_t timeA = 0; |
750 |
|
int64_t timeB = 0; |
751 |
|
int retA = 0, retB = 0; |
752 |
|
DECLARE_ALIGNED_MATRIX(arrayX, 1, 64, int16_t, CACHE_LINE); |
753 |
|
DECLARE_ALIGNED_MATRIX(arrayA, 1, 64, int16_t, CACHE_LINE); |
754 |
|
DECLARE_ALIGNED_MATRIX(arrayB, 1, 64, int16_t, CACHE_LINE); |
755 |
|
int min, max; |
756 |
|
int count = 0; |
757 |
|
int errors = 0; |
758 |
|
|
759 |
|
if ((test & TEST_QUANT)) /* quant */ |
760 |
|
{ |
761 |
|
min = -2048; |
762 |
|
max = 2047; |
763 |
|
}else{ /* dequant */ |
764 |
|
min = -256; |
765 |
|
max = 255; |
766 |
|
} |
767 |
|
|
768 |
|
for (q = 1; q <= 31; q++) /* quantizer */ |
769 |
|
{ |
770 |
|
for (i = min; i < max; i++) /* input coeff */ |
771 |
|
{ |
772 |
|
if ((flags & XVID_TEST_RANDOM)) |
773 |
|
{ |
774 |
|
random16(arrayX, 64, min, max); |
775 |
|
}else{ |
776 |
|
fill16(arrayX, 64, i); |
777 |
|
} |
778 |
|
|
779 |
|
if ((test & TEST_INTRA)) /* intra */ |
780 |
|
{ |
781 |
|
timeSTART = read_counter(); |
782 |
|
((quanth263_intraFunc*)funcA)(arrayA, arrayX, q, q); |
783 |
|
timeA += read_counter() - timeSTART; |
784 |
|
|
785 |
|
timeSTART = read_counter(); |
786 |
|
((quanth263_intraFunc*)funcB)(arrayB, arrayX, q, q); |
787 |
|
timeB += read_counter() - timeSTART; |
788 |
|
} |
789 |
|
else /* inter */ |
790 |
|
{ |
791 |
|
timeSTART = read_counter(); |
792 |
|
retA = ((quanth263_interFunc*)funcA)(arrayA, arrayX, q); |
793 |
|
timeA += read_counter() - timeSTART; |
794 |
|
|
795 |
|
timeSTART = read_counter(); |
796 |
|
retB = ((quanth263_interFunc*)funcB)(arrayB, arrayX, q); |
797 |
|
timeB += read_counter() - timeSTART; |
798 |
|
} |
799 |
|
|
800 |
|
/* compare return value from quant_inter, and compare (de)quantiz'd arrays */ |
801 |
|
if ( ((test&TEST_QUANT) && !(test&TEST_INTRA) && retA != retB ) || |
802 |
|
compare16(arrayA, arrayB, 64)) |
803 |
|
{ |
804 |
|
errors++; |
805 |
|
if ((flags & XVID_TEST_VERBOSE)) |
806 |
|
printf("%s error: q=%i, i=%i\n", nameB, q, i); |
807 |
|
} |
808 |
|
|
809 |
|
count++; |
810 |
|
} |
811 |
|
} |
812 |
|
|
813 |
|
printf("%s:\t%i", nameB, (int)(timeB / count)); |
814 |
|
if (errors>0) |
815 |
|
printf("\t(%i errors out of %i)", errors, count); |
816 |
|
printf("\n"); |
817 |
|
|
818 |
|
return 0; |
819 |
|
} |
820 |
|
|
821 |
|
|
822 |
|
|
823 |
|
int xvid_init_test(int flags) |
824 |
|
{ |
825 |
|
#if defined(ARCH_IS_IA32) |
826 |
|
int cpu_flags; |
827 |
|
#endif |
828 |
|
|
829 |
|
printf("XviD tests\n\n"); |
830 |
|
|
831 |
|
#if defined(ARCH_IS_IA32) |
832 |
|
cpu_flags = detect_cpu_flags(); |
833 |
|
#endif |
834 |
|
|
835 |
|
idct_int32_init(); |
836 |
|
emms(); |
837 |
|
|
838 |
|
srand(time(0)); |
839 |
|
|
840 |
|
/* fDCT test */ |
841 |
|
printf("--- fdct ---\n"); |
842 |
|
test_transform(fdct_int32, fdct_int32, "c", TEST_FDCT, flags); |
843 |
|
|
844 |
|
#if defined(ARCH_IS_IA32) |
845 |
|
if (cpu_flags & XVID_CPU_MMX) |
846 |
|
test_transform(fdct_int32, fdct_mmx, "mmx", TEST_FDCT, flags); |
847 |
|
if (cpu_flags & XVID_CPU_SSE2) |
848 |
|
test_transform(fdct_int32, fdct_sse2, "sse2", TEST_FDCT, flags); |
849 |
|
#endif |
850 |
|
|
851 |
|
/* iDCT test */ |
852 |
|
printf("\n--- idct ---\n"); |
853 |
|
test_transform(idct_int32, idct_int32, "c", TEST_IDCT, flags); |
854 |
|
|
855 |
|
#if defined(ARCH_IS_IA32) |
856 |
|
if (cpu_flags & XVID_CPU_MMX) |
857 |
|
test_transform(idct_int32, idct_mmx, "mmx", TEST_IDCT, flags); |
858 |
|
if (cpu_flags & XVID_CPU_MMXEXT) |
859 |
|
test_transform(idct_int32, idct_xmm, "xmm", TEST_IDCT, flags); |
860 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
861 |
|
test_transform(idct_int32, idct_3dne, "3dne", TEST_IDCT, flags); |
862 |
|
if (cpu_flags & XVID_CPU_SSE2) |
863 |
|
test_transform(idct_int32, idct_sse2, "sse2", TEST_IDCT, flags); |
864 |
#endif |
#endif |
865 |
|
|
866 |
|
/* Intra quantization test */ |
867 |
|
printf("\n--- quant intra ---\n"); |
868 |
|
test_quant(quant_intra_c, quant_intra_c, "c", TEST_QUANT_INTRA, flags); |
869 |
|
|
870 |
|
#if defined(ARCH_IS_IA32) |
871 |
|
if (cpu_flags & XVID_CPU_MMX) |
872 |
|
test_quant(quant_intra_c, quant_intra_mmx, "mmx", TEST_QUANT_INTRA, flags); |
873 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
874 |
|
test_quant(quant_intra_c, quant_intra_3dne, "3dne", TEST_QUANT_INTRA, flags); |
875 |
|
if (cpu_flags & XVID_CPU_SSE2) |
876 |
|
test_quant(quant_intra_c, quant_intra_sse2, "sse2", TEST_QUANT_INTRA, flags); |
877 |
#endif |
#endif |
878 |
|
|
879 |
return XVID_ERR_OK; |
/* Inter quantization test */ |
880 |
|
printf("\n--- quant inter ---\n"); |
881 |
|
test_quant(quant_inter_c, quant_inter_c, "c", TEST_QUANT_INTER, flags); |
882 |
|
|
883 |
|
#if defined(ARCH_IS_IA32) |
884 |
|
if (cpu_flags & XVID_CPU_MMX) |
885 |
|
test_quant(quant_inter_c, quant_inter_mmx, "mmx", TEST_QUANT_INTER, flags); |
886 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
887 |
|
test_quant(quant_inter_c, quant_inter_3dne, "3dne", TEST_QUANT_INTER, flags); |
888 |
|
if (cpu_flags & XVID_CPU_SSE2) |
889 |
|
test_quant(quant_inter_c, quant_inter_sse2, "sse2", TEST_QUANT_INTER, flags); |
890 |
|
#endif |
891 |
|
|
892 |
|
/* Intra dequantization test */ |
893 |
|
printf("\n--- dequant intra ---\n"); |
894 |
|
test_quant(dequant_intra_c, dequant_intra_c, "c", TEST_DEQUANT_INTRA, flags); |
895 |
|
|
896 |
|
#if defined(ARCH_IS_IA32) |
897 |
|
if (cpu_flags & XVID_CPU_MMX) |
898 |
|
test_quant(dequant_intra_c, dequant_intra_mmx, "mmx", TEST_DEQUANT_INTRA, flags); |
899 |
|
if (cpu_flags & XVID_CPU_MMXEXT) |
900 |
|
test_quant(dequant_intra_c, dequant_intra_xmm, "xmm", TEST_DEQUANT_INTRA, flags); |
901 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
902 |
|
test_quant(dequant_intra_c, dequant_intra_3dne, "3dne", TEST_DEQUANT_INTRA, flags); |
903 |
|
if (cpu_flags & XVID_CPU_SSE2) |
904 |
|
test_quant(dequant_intra_c, dequant_intra_sse2, "sse2", TEST_DEQUANT_INTRA, flags); |
905 |
|
#endif |
906 |
|
|
907 |
|
/* Inter dequantization test */ |
908 |
|
printf("\n--- dequant inter ---\n"); |
909 |
|
test_quant(dequant_inter_c, dequant_inter_c, "c", TEST_DEQUANT_INTER, flags); |
910 |
|
|
911 |
|
#if defined(ARCH_IS_IA32) |
912 |
|
if (cpu_flags & XVID_CPU_MMX) |
913 |
|
test_quant(dequant_inter_c, dequant_inter_mmx, "mmx", TEST_DEQUANT_INTER, flags); |
914 |
|
if (cpu_flags & XVID_CPU_MMXEXT) |
915 |
|
test_quant(dequant_inter_c, dequant_inter_xmm, "xmm", TEST_DEQUANT_INTER, flags); |
916 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
917 |
|
test_quant(dequant_inter_c, dequant_inter_3dne, "3dne", TEST_DEQUANT_INTER, flags); |
918 |
|
if (cpu_flags & XVID_CPU_SSE2) |
919 |
|
test_quant(dequant_inter_c, dequant_inter_sse2, "sse2", TEST_DEQUANT_INTER, flags); |
920 |
|
#endif |
921 |
|
|
922 |
|
/* Intra quantization test */ |
923 |
|
printf("\n--- quant4 intra ---\n"); |
924 |
|
test_quant(quant4_intra_c, quant4_intra_c, "c", TEST_QUANT_INTRA, flags); |
925 |
|
|
926 |
|
#if defined(ARCH_IS_IA32) |
927 |
|
if (cpu_flags & XVID_CPU_MMX) |
928 |
|
test_quant(quant4_intra_c, quant4_intra_mmx, "mmx", TEST_QUANT_INTRA, flags); |
929 |
|
if (cpu_flags & XVID_CPU_MMXEXT) |
930 |
|
test_quant(quant4_intra_c, quant4_intra_xmm, "xmm", TEST_QUANT_INTRA, flags); |
931 |
|
#endif |
932 |
|
|
933 |
|
/* Inter quantization test */ |
934 |
|
printf("\n--- quant4 inter ---\n"); |
935 |
|
test_quant(quant4_inter_c, quant4_inter_c, "c", TEST_QUANT_INTER, flags); |
936 |
|
|
937 |
|
#if defined(ARCH_IS_IA32) |
938 |
|
if (cpu_flags & XVID_CPU_MMX) |
939 |
|
test_quant(quant4_inter_c, quant4_inter_mmx, "mmx", TEST_QUANT_INTER, flags); |
940 |
|
if (cpu_flags & XVID_CPU_MMXEXT) |
941 |
|
test_quant(quant4_inter_c, quant4_inter_xmm, "xmm", TEST_QUANT_INTER, flags); |
942 |
|
#endif |
943 |
|
|
944 |
|
/* Intra dequantization test */ |
945 |
|
printf("\n--- dequant4 intra ---\n"); |
946 |
|
test_quant(dequant4_intra_c, dequant4_intra_c, "c", TEST_DEQUANT_INTRA, flags); |
947 |
|
|
948 |
|
#if defined(ARCH_IS_IA32) |
949 |
|
if (cpu_flags & XVID_CPU_MMX) |
950 |
|
test_quant(dequant4_intra_c, dequant4_intra_mmx, "mmx", TEST_DEQUANT_INTRA, flags); |
951 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
952 |
|
test_quant(dequant4_intra_c, dequant4_intra_3dne, "3dne", TEST_DEQUANT_INTRA, flags); |
953 |
|
#endif |
954 |
|
|
955 |
|
/* Inter dequantization test */ |
956 |
|
printf("\n--- dequant4 inter ---\n"); |
957 |
|
test_quant(dequant4_inter_c, dequant4_inter_c, "c", TEST_DEQUANT_INTER, flags); |
958 |
|
|
959 |
|
#if defined(ARCH_IS_IA32) |
960 |
|
if (cpu_flags & XVID_CPU_MMX) |
961 |
|
test_quant(dequant4_inter_c, dequant4_inter_mmx, "mmx", TEST_DEQUANT_INTER, flags); |
962 |
|
if (cpu_flags & XVID_CPU_3DNOWEXT) |
963 |
|
test_quant(dequant4_inter_c, dequant4_inter_3dne, "3dne", TEST_DEQUANT_INTER, flags); |
964 |
|
#endif |
965 |
|
|
966 |
|
emms(); |
967 |
|
|
968 |
|
return 0; |
969 |
|
} |
970 |
|
|
971 |
|
|
972 |
|
/***************************************************************************** |
973 |
|
* XviD Global Entry point |
974 |
|
* |
975 |
|
* Well this function initialize all internal function pointers according |
976 |
|
* to the CPU features forced by the library client or autodetected (depending |
977 |
|
* on the XVID_CPU_FORCE flag). It also initializes vlc coding tables and all |
978 |
|
* image colorspace transformation tables. |
979 |
|
* |
980 |
|
****************************************************************************/ |
981 |
|
|
982 |
|
|
983 |
|
int |
984 |
|
xvid_global(void *handle, |
985 |
|
int opt, |
986 |
|
void *param1, |
987 |
|
void *param2) |
988 |
|
{ |
989 |
|
switch(opt) |
990 |
|
{ |
991 |
|
case XVID_GBL_INIT : |
992 |
|
return xvid_gbl_init((xvid_gbl_init_t*)param1); |
993 |
|
|
994 |
|
case XVID_GBL_INFO : |
995 |
|
return xvid_gbl_info((xvid_gbl_info_t*)param1); |
996 |
|
|
997 |
|
case XVID_GBL_CONVERT : |
998 |
|
return xvid_gbl_convert((xvid_gbl_convert_t*)param1); |
999 |
|
|
1000 |
|
case XVID_GBL_TEST : |
1001 |
|
{ |
1002 |
|
ptr_t flags = (ptr_t)param1; |
1003 |
|
return xvid_init_test((int)flags); |
1004 |
|
} |
1005 |
|
default : |
1006 |
|
return XVID_ERR_FAIL; |
1007 |
|
} |
1008 |
} |
} |
1009 |
|
|
1010 |
/***************************************************************************** |
/***************************************************************************** |
1024 |
void *param2) |
void *param2) |
1025 |
{ |
{ |
1026 |
switch (opt) { |
switch (opt) { |
|
case XVID_DEC_DECODE: |
|
|
return decoder_decode((DECODER *) handle, (XVID_DEC_FRAME *) param1); |
|
|
|
|
1027 |
case XVID_DEC_CREATE: |
case XVID_DEC_CREATE: |
1028 |
return decoder_create((XVID_DEC_PARAM *) param1); |
return decoder_create((xvid_dec_create_t *) param1); |
1029 |
|
|
1030 |
case XVID_DEC_DESTROY: |
case XVID_DEC_DESTROY: |
1031 |
return decoder_destroy((DECODER *) handle); |
return decoder_destroy((DECODER *) handle); |
1032 |
|
|
1033 |
|
case XVID_DEC_DECODE: |
1034 |
|
return decoder_decode((DECODER *) handle, (xvid_dec_frame_t *) param1, (xvid_dec_stats_t*) param2); |
1035 |
|
|
1036 |
default: |
default: |
1037 |
return XVID_ERR_FAIL; |
return XVID_ERR_FAIL; |
1038 |
} |
} |
1057 |
{ |
{ |
1058 |
switch (opt) { |
switch (opt) { |
1059 |
case XVID_ENC_ENCODE: |
case XVID_ENC_ENCODE: |
1060 |
return encoder_encode((Encoder *) handle, (XVID_ENC_FRAME *) param1, |
|
1061 |
(XVID_ENC_STATS *) param2); |
return enc_encode((Encoder *) handle, |
1062 |
|
(xvid_enc_frame_t *) param1, |
1063 |
|
(xvid_enc_stats_t *) param2); |
1064 |
|
|
1065 |
case XVID_ENC_CREATE: |
case XVID_ENC_CREATE: |
1066 |
return encoder_create((XVID_ENC_PARAM *) param1); |
return enc_create((xvid_enc_create_t *) param1); |
1067 |
|
|
1068 |
case XVID_ENC_DESTROY: |
case XVID_ENC_DESTROY: |
1069 |
return encoder_destroy((Encoder *) handle); |
return enc_destroy((Encoder *) handle); |
1070 |
|
|
1071 |
default: |
default: |
1072 |
return XVID_ERR_FAIL; |
return XVID_ERR_FAIL; |