--- xvid.c 2002/07/23 12:59:57 1.33 +++ xvid.c 2002/11/07 10:28:15 1.33.2.11 @@ -37,7 +37,7 @@ * - 22.12.2001 API change: added xvid_init() - Isibaar * - 16.12.2001 inital version; (c)2001 peter ross * - * $Id: xvid.c,v 1.33 2002/07/23 12:59:57 suxen_drol Exp $ + * $Id: xvid.c,v 1.33.2.11 2002/11/07 10:28:15 suxen_drol Exp $ * ****************************************************************************/ @@ -50,6 +50,7 @@ #include "image/colorspace.h" #include "image/interpolate8x8.h" #include "utils/mem_transfer.h" +#include "utils/mbfunctions.h" #include "quant/quant_h263.h" #include "quant/quant_mpeg4.h" #include "motion/motion.h" @@ -216,31 +217,70 @@ transfer_16to8add = transfer_16to8add_c; transfer8x8_copy = transfer8x8_copy_c; + /* Interlacing functions */ + MBFieldTest = MBFieldTest_c; + /* Image interpolation related functions */ interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_c; interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_c; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_c; + interpolate16x16_lowpass_h = interpolate16x16_lowpass_h_c; + interpolate16x16_lowpass_v = interpolate16x16_lowpass_v_c; + interpolate16x16_lowpass_hv = interpolate16x16_lowpass_hv_c; + + interpolate8x8_lowpass_h = interpolate8x8_lowpass_h_c; + interpolate8x8_lowpass_v = interpolate8x8_lowpass_v_c; + interpolate8x8_lowpass_hv = interpolate8x8_lowpass_hv_c; + + interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_c; + interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_c; + + interpolate8x8_avg2 = interpolate8x8_avg2_c; + interpolate8x8_avg4 = interpolate8x8_avg4_c; + /* Initialize internal colorspace transformation tables */ colorspace_init(); /* All colorspace transformation functions User Format->YV12 */ - rgb555_to_yv12 = rgb555_to_yv12_c; - rgb565_to_yv12 = rgb565_to_yv12_c; - rgb24_to_yv12 = rgb24_to_yv12_c; - rgb32_to_yv12 = rgb32_to_yv12_c; - yuv_to_yv12 = yuv_to_yv12_c; - yuyv_to_yv12 = yuyv_to_yv12_c; - uyvy_to_yv12 = uyvy_to_yv12_c; + yv12_to_yv12 = yv12_to_yv12_c; + rgb555_to_yv12 = rgb555_to_yv12_c; + rgb565_to_yv12 = rgb565_to_yv12_c; + bgr_to_yv12 = bgr_to_yv12_c; + bgra_to_yv12 = bgra_to_yv12_c; + abgr_to_yv12 = abgr_to_yv12_c; + rgba_to_yv12 = rgba_to_yv12_c; + yuyv_to_yv12 = yuyv_to_yv12_c; + uyvy_to_yv12 = uyvy_to_yv12_c; + + rgb555i_to_yv12 = rgb555i_to_yv12_c; + rgb565i_to_yv12 = rgb565i_to_yv12_c; + bgri_to_yv12 = bgri_to_yv12_c; + bgrai_to_yv12 = bgrai_to_yv12_c; + abgri_to_yv12 = abgri_to_yv12_c; + rgbai_to_yv12 = rgbai_to_yv12_c; + yuyvi_to_yv12 = yuyvi_to_yv12_c; + uyvyi_to_yv12 = uyvyi_to_yv12_c; + /* All colorspace transformation functions YV12->User format */ - yv12_to_rgb555 = yv12_to_rgb555_c; - yv12_to_rgb565 = yv12_to_rgb565_c; - yv12_to_rgb24 = yv12_to_rgb24_c; - yv12_to_rgb32 = yv12_to_rgb32_c; - yv12_to_yuv = yv12_to_yuv_c; - yv12_to_yuyv = yv12_to_yuyv_c; - yv12_to_uyvy = yv12_to_uyvy_c; + yv12_to_rgb555 = yv12_to_rgb555_c; + yv12_to_rgb565 = yv12_to_rgb565_c; + yv12_to_bgr = yv12_to_bgr_c; + yv12_to_bgra = yv12_to_bgra_c; + yv12_to_abgr = yv12_to_abgr_c; + yv12_to_rgba = yv12_to_rgba_c; + yv12_to_yuyv = yv12_to_yuyv_c; + yv12_to_uyvy = yv12_to_uyvy_c; + + yv12_to_rgb555i = yv12_to_rgb555i_c; + yv12_to_rgb565i = yv12_to_rgb565i_c; + yv12_to_bgri = yv12_to_bgri_c; + yv12_to_bgrai = yv12_to_bgrai_c; + yv12_to_abgri = yv12_to_abgri_c; + yv12_to_rgbai = yv12_to_rgbai_c; + yv12_to_yuyvi = yv12_to_yuyvi_c; + yv12_to_uyvyi = yv12_to_uyvyi_c; /* Functions used in motion estimation algorithms */ calc_cbp = calc_cbp_c; @@ -249,19 +289,26 @@ sad16bi = sad16bi_c; sad8bi = sad8bi_c; dev16 = dev16_c; + sad16v = sad16v_c; - Halfpel8_Refine = Halfpel8_Refine_c; +// Halfpel8_Refine = Halfpel8_Refine_c; #ifdef ARCH_X86 + + if ((cpu_flags & XVID_CPU_MMX) || (cpu_flags & XVID_CPU_MMXEXT) || + (cpu_flags & XVID_CPU_3DNOW) || (cpu_flags & XVID_CPU_3DNOWEXT) || + (cpu_flags & XVID_CPU_SSE) || (cpu_flags & XVID_CPU_SSE2)) + { + /* Restore FPU context : emms_c is a nop functions */ + emms = emms_mmx; + } + if ((cpu_flags & XVID_CPU_MMX) > 0) { /* Forward and Inverse Discrete Cosine Transformation functions */ fdct = fdct_mmx; idct = idct_mmx; - /* To restore FPU context after mmx use */ - emms = emms_mmx; - /* Quantization related functions */ quant_intra = quant_intra_mmx; dequant_intra = dequant_intra_mmx; @@ -281,24 +328,35 @@ transfer_16to8add = transfer_16to8add_mmx; transfer8x8_copy = transfer8x8_copy_mmx; + /* Interlacing Functions */ + MBFieldTest = MBFieldTest_mmx; /* Image Interpolation related functions */ interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_mmx; interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_mmx; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_mmx; - /* Image RGB->YV12 related functions */ - rgb24_to_yv12 = rgb24_to_yv12_mmx; - rgb32_to_yv12 = rgb32_to_yv12_mmx; - yuv_to_yv12 = yuv_to_yv12_mmx; + interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_mmx; + interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_mmx; + +// interpolate8x8_avg2 = interpolate8x8_avg2_mmx; + interpolate8x8_avg4 = interpolate8x8_avg4_mmx; + + /* image input xxx_to_yv12 related functions */ + yv12_to_yv12 = yv12_to_yv12_mmx; + bgr_to_yv12 = bgr_to_yv12_mmx; + bgra_to_yv12 = bgra_to_yv12_mmx; yuyv_to_yv12 = yuyv_to_yv12_mmx; uyvy_to_yv12 = uyvy_to_yv12_mmx; - /* Image YV12->RGB related functions */ - yv12_to_rgb24 = yv12_to_rgb24_mmx; - yv12_to_rgb32 = yv12_to_rgb32_mmx; + /* image output yv12_to_xxx related functions */ + yv12_to_bgr = yv12_to_bgr_mmx; + yv12_to_bgra = yv12_to_bgra_mmx; yv12_to_yuyv = yv12_to_yuyv_mmx; yv12_to_uyvy = yv12_to_uyvy_mmx; + + yv12_to_yuyvi = yv12_to_yuyvi_mmx; + yv12_to_uyvyi = yv12_to_uyvyi_mmx; /* Motion estimation related functions */ calc_cbp = calc_cbp_mmx; @@ -307,6 +365,7 @@ sad16bi = sad16bi_mmx; sad8bi = sad8bi_mmx; dev16 = dev16_mmx; + sad16v = sad16v_mmx; } @@ -316,6 +375,9 @@ /* ME functions */ sad16bi = sad16bi_3dn; sad8bi = sad8bi_3dn; + + yuyv_to_yv12 = yuyv_to_yv12_3dn; + uyvy_to_yv12 = uyvy_to_yv12_3dn; } @@ -337,7 +399,9 @@ transfer_8to16sub2 = transfer_8to16sub2_xmm; /* Colorspace transformation */ - yuv_to_yv12 = yuv_to_yv12_xmm; + yv12_to_yv12 = yv12_to_yv12_xmm; + yuyv_to_yv12 = yuyv_to_yv12_xmm; + uyvy_to_yv12 = yuyv_to_yv12_xmm; /* ME functions */ sad16 = sad16_xmm; @@ -345,7 +409,7 @@ sad16bi = sad16bi_xmm; sad8bi = sad8bi_xmm; dev16 = dev16_xmm; - + sad16v = sad16v_xmm; } if ((cpu_flags & XVID_CPU_3DNOW) > 0) { @@ -391,7 +455,7 @@ sad16bi = sad16bi_ia64; sad8 = sad8_ia64; dev16 = dev16_ia64; - Halfpel8_Refine = Halfpel8_Refine_ia64; +// Halfpel8_Refine = Halfpel8_Refine_ia64; quant_intra = quant_intra_ia64; dequant_intra = dequant_intra_ia64; quant_inter = quant_inter_ia64; @@ -441,7 +505,7 @@ { switch (opt) { case XVID_DEC_DECODE: - return decoder_decode((DECODER *) handle, (XVID_DEC_FRAME *) param1); + return decoder_decode((DECODER *) handle, (XVID_DEC_FRAME *) param1, (XVID_DEC_STATS*) param2); case XVID_DEC_CREATE: return decoder_create((XVID_DEC_PARAM *) param1); @@ -473,12 +537,11 @@ { switch (opt) { case XVID_ENC_ENCODE: -#ifdef BFRAMES + if (((Encoder *) handle)->mbParam.max_bframes >= 0) return encoder_encode_bframes((Encoder *) handle, (XVID_ENC_FRAME *) param1, (XVID_ENC_STATS *) param2); else -#endif return encoder_encode((Encoder *) handle, (XVID_ENC_FRAME *) param1, (XVID_ENC_STATS *) param2);