--- xvid.c 2004/12/19 13:16:50 1.59 +++ xvid.c 2008/11/14 15:43:27 1.74 @@ -19,7 +19,7 @@ * along with this program ; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * - * $Id: xvid.c,v 1.59 2004/12/19 13:16:50 syskin Exp $ + * $Id: xvid.c,v 1.74 2008/11/14 15:43:27 Isibaar Exp $ * ****************************************************************************/ @@ -40,6 +40,7 @@ #include "utils/mbfunctions.h" #include "quant/quant.h" #include "motion/motion.h" +#include "motion/gmc.h" #include "motion/sad.h" #include "utils/emms.h" #include "utils/timer.h" @@ -51,9 +52,9 @@ unsigned int xvid_debug = 0; /* xvid debug mask */ #endif -#if defined(ARCH_IS_IA32) && defined(_MSC_VER) +#if (defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64)) && defined(_MSC_VER) # include -#elif defined(ARCH_IS_IA32) || defined(ARCH_IS_PPC) +#elif defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64) || defined(ARCH_IS_PPC) # include # include @@ -76,7 +77,7 @@ * 0 : SIGILL was *not* signalled * 1 : SIGILL was signalled */ -#if defined(ARCH_IS_IA32) && defined(_MSC_VER) +#if (defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64)) && defined(_MSC_VER) static int sigill_check(void (*func)()) { @@ -89,7 +90,7 @@ } return(0); } -#elif defined(ARCH_IS_IA32) || defined(ARCH_IS_PPC) +#elif defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64) || defined(ARCH_IS_PPC) static int sigill_check(void (*func)()) { @@ -126,18 +127,18 @@ /* detect cpu flags */ static unsigned int -detect_cpu_flags() +detect_cpu_flags(void) { /* enable native assembly optimizations by default */ unsigned int cpu_flags = XVID_CPU_ASM; -#if defined(ARCH_IS_IA32) +#if defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64) cpu_flags |= check_cpu_features(); if ((cpu_flags & XVID_CPU_SSE) && sigill_check(sse_os_trigger)) cpu_flags &= ~XVID_CPU_SSE; - if ((cpu_flags & XVID_CPU_SSE2) && sigill_check(sse2_os_trigger)) - cpu_flags &= ~XVID_CPU_SSE2; + if ((cpu_flags & (XVID_CPU_SSE2|XVID_CPU_SSE3|XVID_CPU_SSE41)) && sigill_check(sse2_os_trigger)) + cpu_flags &= ~(XVID_CPU_SSE2|XVID_CPU_SSE3|XVID_CPU_SSE41); #endif #if defined(ARCH_IS_PPC) @@ -183,7 +184,7 @@ idct = idct_int32; /* Only needed on PPC Altivec archs */ - sadInit = 0; + sadInit = NULL; /* Restore FPU context : emms_c is a nop functions */ emms = emms_c; @@ -213,6 +214,7 @@ transfer_8to16sub2ro = transfer_8to16sub2ro_c; transfer_16to8add = transfer_16to8add_c; transfer8x8_copy = transfer8x8_copy_c; + transfer8x4_copy = transfer8x4_copy_c; /* Interlacing functions */ MBFieldTest = MBFieldTest_c; @@ -222,6 +224,10 @@ interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_c; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_c; + interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_c; + interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_c; + interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_c; + interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_c; interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_c; interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_c; @@ -251,6 +257,7 @@ yv12_to_yv12 = yv12_to_yv12_c; rgb555_to_yv12 = rgb555_to_yv12_c; rgb565_to_yv12 = rgb565_to_yv12_c; + rgb_to_yv12 = rgb_to_yv12_c; bgr_to_yv12 = bgr_to_yv12_c; bgra_to_yv12 = bgra_to_yv12_c; abgr_to_yv12 = abgr_to_yv12_c; @@ -272,6 +279,7 @@ /* All colorspace transformation functions YV12->User format */ yv12_to_rgb555 = yv12_to_rgb555_c; yv12_to_rgb565 = yv12_to_rgb565_c; + yv12_to_rgb = yv12_to_rgb_c; yv12_to_bgr = yv12_to_bgr_c; yv12_to_bgra = yv12_to_bgra_c; yv12_to_abgr = yv12_to_abgr_c; @@ -301,11 +309,14 @@ sse8_16bit = sse8_16bit_c; sse8_8bit = sse8_8bit_c; + init_GMC(cpu_flags); + #if defined(ARCH_IS_IA32) if ((cpu_flags & XVID_CPU_MMX) || (cpu_flags & XVID_CPU_MMXEXT) || (cpu_flags & XVID_CPU_3DNOW) || (cpu_flags & XVID_CPU_3DNOWEXT) || - (cpu_flags & XVID_CPU_SSE) || (cpu_flags & XVID_CPU_SSE2)) + (cpu_flags & XVID_CPU_SSE) || (cpu_flags & XVID_CPU_SSE2) || + (cpu_flags & XVID_CPU_SSE3) || (cpu_flags & XVID_CPU_SSE41)) { /* Restore FPU context : emms_c is a nop functions */ emms = emms_mmx; @@ -340,6 +351,7 @@ transfer_8to16sub2 = transfer_8to16sub2_mmx; transfer_16to8add = transfer_16to8add_mmx; transfer8x8_copy = transfer8x8_copy_mmx; + transfer8x4_copy = transfer8x4_copy_mmx; /* Interlacing Functions */ MBFieldTest = MBFieldTest_mmx; @@ -349,6 +361,10 @@ interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_mmx; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_mmx; + interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_mmx; + interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_mmx; + interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_mmx; + interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_mmx; interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_mmx; interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_mmx; @@ -366,7 +382,9 @@ /* image input xxx_to_yv12 related functions */ yv12_to_yv12 = yv12_to_yv12_mmx; bgr_to_yv12 = bgr_to_yv12_mmx; + rgb_to_yv12 = rgb_to_yv12_mmx; bgra_to_yv12 = bgra_to_yv12_mmx; + rgba_to_yv12 = rgba_to_yv12_mmx; yuyv_to_yv12 = yuyv_to_yv12_mmx; uyvy_to_yv12 = uyvy_to_yv12_mmx; @@ -416,13 +434,16 @@ interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_xmm; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_xmm; + interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_xmm; + interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_xmm; + interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_xmm; + interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_xmm; interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_xmm; interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_xmm; interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_xmm; /* Quantization */ - quant_mpeg_intra = quant_mpeg_intra_xmm; quant_mpeg_inter = quant_mpeg_inter_xmm; dequant_h263_intra = dequant_h263_intra_xmm; @@ -452,6 +473,10 @@ interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dn; interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_3dn; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dn; + + interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_3dn; + interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_3dn; + interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_3dn; } if ((cpu_flags & XVID_CPU_3DNOWEXT)) { @@ -463,6 +488,7 @@ transfer_8to16subro = transfer_8to16subro_3dne; transfer_16to8add = transfer_16to8add_3dne; transfer8x8_copy = transfer8x8_copy_3dne; + transfer8x4_copy = transfer8x4_copy_3dne; if ((cpu_flags & XVID_CPU_MMXEXT)) { /* Inverse DCT */ @@ -476,6 +502,10 @@ interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_3dne; interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dne; + interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_3dne; + interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_3dne; + interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_3dne; + /* Quantization */ quant_h263_intra = quant_h263_intra_3dne; /* cmov only */ quant_h263_inter = quant_h263_inter_3dne; @@ -509,13 +539,21 @@ sad16 = sad16_sse2; dev16 = dev16_sse2; - /* DCT operators - * no iDCT because it's not "Walken matching" */ + /* DCT operators */ fdct = fdct_sse2_skal; + idct = idct_sse2_skal; /* Is now IEEE1180 and Walken compliant. */ /* postprocessing */ image_brightness = image_brightness_sse2; } + + if ((cpu_flags & XVID_CPU_SSE3)) { + + /* SAD operators */ + sad16 = sad16_sse3; + dev16 = dev16_sse3; + } + #endif /* ARCH_IS_IA32 */ #if defined(ARCH_IS_IA64) @@ -596,6 +634,9 @@ quant_h263_inter = quant_h263_inter_altivec_c; dequant_h263_intra = dequant_h263_intra_altivec_c; dequant_h263_inter = dequant_h263_inter_altivec_c; + + dequant_mpeg_intra = dequant_mpeg_intra_altivec_c; + dequant_mpeg_inter = dequant_mpeg_inter_altivec_c; /* Qpel stuff */ xvid_QP_Funcs = &xvid_QP_Funcs_Altivec_C; @@ -603,6 +644,74 @@ } #endif +#if defined(ARCH_IS_X86_64) + /* For now, only XVID_CPU_ASM is looked for, so user can still + * disable asm usage the usual way. When Intel EMT64 cpus will + * be out, maybe we'll have to check more precisely what cpu + * features there really are. */ + if (cpu_flags & XVID_CPU_ASM) { + /* SIMD state flusher */ + emms = emms_mmx; + + /* DCT operators */ + fdct = fdct_skal_x86_64; + idct = idct_x86_64; + + /* SAD operators */ + sad16 = sad16_x86_64; + sad8 = sad8_x86_64; + sad16bi = sad16bi_x86_64; + sad8bi = sad8bi_x86_64; + dev16 = dev16_x86_64; + sad16v = sad16v_x86_64; + sse8_16bit = sse8_16bit_x86_64; + sse8_8bit = sse8_8bit_x86_64; + + /* Interpolation operators */ + interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_x86_64; + interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_x86_64; + interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_x86_64; + + interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_x86_64; + interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_x86_64; + interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_x86_64; + interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_x86_64; + + interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_x86_64; + interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_x86_64; + + interpolate8x8_avg2 = interpolate8x8_avg2_x86_64; + interpolate8x8_avg4 = interpolate8x8_avg4_x86_64; + + /* Quantization related functions */ + quant_h263_intra = quant_h263_intra_x86_64; + quant_h263_inter = quant_h263_inter_x86_64; + dequant_h263_intra = dequant_h263_intra_x86_64; + dequant_h263_inter = dequant_h263_inter_x86_64; + /*quant_mpeg_intra = quant_mpeg_intra_x86_64; fix me! */ + quant_mpeg_inter = quant_mpeg_inter_x86_64; + dequant_mpeg_intra = dequant_mpeg_intra_x86_64; + dequant_mpeg_inter = dequant_mpeg_inter_x86_64; + + /* Block related functions */ + transfer_8to16copy = transfer_8to16copy_x86_64; + transfer_16to8copy = transfer_16to8copy_x86_64; + transfer_8to16sub = transfer_8to16sub_x86_64; + transfer_8to16subro = transfer_8to16subro_x86_64; + transfer_8to16sub2 = transfer_8to16sub2_x86_64; + transfer_8to16sub2ro= transfer_8to16sub2ro_x86_64; + transfer_16to8add = transfer_16to8add_x86_64; + transfer8x8_copy = transfer8x8_copy_x86_64; + + /* Qpel stuff */ + xvid_QP_Funcs = &xvid_QP_Funcs_x86_64; + xvid_QP_Add_Funcs = &xvid_QP_Add_Funcs_x86_64; + + /* Interlacing Functions */ + MBFieldTest = MBFieldTest_x86_64; + } +#endif + #if defined(_DEBUG) xvid_debug = init->debug; #endif @@ -618,13 +727,21 @@ return XVID_ERR_VERSION; info->actual_version = XVID_VERSION; - info->build = "xvid-1.1-cvshead"; + info->build = "xvid-1.2.0-dev"; info->cpu_flags = detect_cpu_flags(); + info->num_threads = 0; -#if defined(_SMP) && defined(WIN32) - info->num_threads = pthread_num_processors_np();; -#else - info->num_threads = 0; +#if defined(WIN32) + { + DWORD dwProcessAffinityMask, dwSystemAffinityMask; + if (GetProcessAffinityMask(GetCurrentProcess(), &dwProcessAffinityMask, &dwSystemAffinityMask)) { + int i; + for(i=0; i<32; i++) { + if ((dwProcessAffinityMask & (1<num_threads++; + } + } + } #endif return 0;