[cvs] / xvidcore / src / image / qpel.h Repository:
ViewVC logotype

Diff of /xvidcore/src/image/qpel.h

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.1.4.1, Fri Aug 22 15:52:35 2003 UTC revision 1.7, Wed Oct 26 12:38:34 2005 UTC
# Line 3  Line 3 
3  *  XVID MPEG-4 VIDEO CODEC  *  XVID MPEG-4 VIDEO CODEC
4  *  - QPel interpolation -  *  - QPel interpolation -
5  *  *
6     *  Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
7     *
8  *  This program is free software ; you can redistribute it and/or modify  *  This program is free software ; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation ; either version 2 of the License, or  *  the Free Software Foundation ; either version 2 of the License, or
# Line 17  Line 19 
19  *  along with this program ; if not, write to the Free Software  *  along with this program ; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21  *  *
22  *****************************************************************************/   * $Id$
   
 /**************************************************************************  
  *  
  *      History:  
23   *   *
24   *  22.10.2002  initial coding  - Skal -   ****************************************************************************/
  *  
  *************************************************************************/  
25    
26  #ifndef _XVID_QPEL_H_  #ifndef _XVID_QPEL_H_
27  #define _XVID_QPEL_H_  #define _XVID_QPEL_H_
28    
29    #include "interpolate8x8.h"
30  #include "../utils/mem_transfer.h"  #include "../utils/mem_transfer.h"
31    
32  /*****************************************************************************  /*****************************************************************************
# Line 41  Line 38 
38    
39  typedef  XVID_QP_PASS_SIGNATURE(XVID_QP_PASS);  typedef  XVID_QP_PASS_SIGNATURE(XVID_QP_PASS);
40    
41      // We put everything in a single struct so it can easily be passed  /* We put everything in a single struct so it can easily be passed
42      // to prediction functions as a whole...   * to prediction functions as a whole... */
43    
44  struct XVID_QP_FUNCS {  typedef struct _XVID_QP_FUNCS {
45    
46      // filter for QPel 16x? prediction          /* filter for QPel 16x? prediction */
47    
48    XVID_QP_PASS *H_Pass;    XVID_QP_PASS *H_Pass;
49    XVID_QP_PASS *H_Pass_Avrg;    XVID_QP_PASS *H_Pass_Avrg;
# Line 55  Line 52 
52    XVID_QP_PASS *V_Pass_Avrg;    XVID_QP_PASS *V_Pass_Avrg;
53    XVID_QP_PASS *V_Pass_Avrg_Up;    XVID_QP_PASS *V_Pass_Avrg_Up;
54    
55      // filter for QPel 8x? prediction      /* filter for QPel 8x? prediction */
56    
57    XVID_QP_PASS *H_Pass_8;    XVID_QP_PASS *H_Pass_8;
58    XVID_QP_PASS *H_Pass_Avrg_8;    XVID_QP_PASS *H_Pass_Avrg_8;
# Line 63  Line 60 
60    XVID_QP_PASS *V_Pass_8;    XVID_QP_PASS *V_Pass_8;
61    XVID_QP_PASS *V_Pass_Avrg_8;    XVID_QP_PASS *V_Pass_Avrg_8;
62    XVID_QP_PASS *V_Pass_Avrg_Up_8;    XVID_QP_PASS *V_Pass_Avrg_Up_8;
63  };  } XVID_QP_FUNCS;
 typedef struct XVID_QP_FUNCS  XVID_QP_FUNCS;  
64    
65  /*****************************************************************************  /*****************************************************************************
66   * fwd dcl   * fwd dcl
67   ****************************************************************************/   ****************************************************************************/
68    extern void xvid_Init_QP();
69    
70  extern XVID_QP_FUNCS xvid_QP_Funcs_C;       // for P-frames  extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref;       /* for P-frames */
71  extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C;   // for B-frames  extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref;   /* for B-frames */
72    
73    extern XVID_QP_FUNCS xvid_QP_Funcs_C;       /* for P-frames */
74    extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C;   /* for B-frames */
75    
76    #ifdef ARCH_IS_IA32
77  extern XVID_QP_FUNCS xvid_QP_Funcs_mmx;  extern XVID_QP_FUNCS xvid_QP_Funcs_mmx;
78  extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx;  extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx;
79  extern void xvid_Init_QP_mmx(); // should be called at mmx initialization  #endif
80    
81    #ifdef ARCH_IS_PPC
82    extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C;
83    extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C;
84    #endif
85    
86    #ifdef ARCH_IS_X86_64
87    extern XVID_QP_FUNCS xvid_QP_Funcs_x86_64;
88    extern XVID_QP_FUNCS xvid_QP_Add_Funcs_x86_64;
89    #endif
90    
91  extern XVID_QP_FUNCS *xvid_QP_Funcs;      // <- main pointer for enc/dec structure  extern XVID_QP_FUNCS *xvid_QP_Funcs;      /* <- main pointer for enc/dec structure */
92  extern XVID_QP_FUNCS *xvid_QP_Add_Funcs;  // <- main pointer for enc/dec structure  extern XVID_QP_FUNCS *xvid_QP_Add_Funcs;  /* <- main pointer for enc/dec structure */
93    
94  /*****************************************************************************  /*****************************************************************************
95   * macros   * macros
# Line 100  Line 111 
111    
112   ****************************************************************************/   ****************************************************************************/
113    
114  static __inline void new_interpolate16x16_quarterpel(  static void __inline
115      uint8_t * const cur,  interpolate16x16_quarterpel(uint8_t * const cur,
116      uint8_t * const refn,      uint8_t * const refn,
117                  uint8_t * const refh,                  uint8_t * const refh,
118                  uint8_t * const refv,                  uint8_t * const refv,
# Line 119  Line 130 
130    
131          int32_t x_int, y_int;          int32_t x_int, y_int;
132    
133    const int32_t xRef = x*4 + dx;          const int32_t xRef = (int)x*4 + dx;
134    const int32_t yRef = y*4 + dy;          const int32_t yRef = (int)y*4 + dy;
135    
136       Ops = xvid_QP_Funcs; // TODO: pass as argument          Ops = xvid_QP_Funcs;
137       quads = (dx&3) | ((dy&3)<<2);       quads = (dx&3) | ((dy&3)<<2);
138    
139    x_int = xRef/4;          x_int = xRef >> 2;
140    if (xRef < 0 && xRef % 4)          y_int = yRef >> 2;
    x_int--;  
   
   y_int  = yRef/4;  
   if (yRef < 0 && yRef % 4)  
    y_int--;  
141    
142      dst = cur + y * stride + x;      dst = cur + y * stride + x;
143          src = refn + y_int * stride + x_int;          src = refn + y_int * (int)stride + x_int;
144    
145    tmp = refh; // we need at least a 16 x stride scratch block          tmp = refh; /* we need at least a 16 x stride scratch block */
146    
147    switch(quads) {    switch(quads) {
148      case 0:      case 0:
# Line 202  Line 208 
208    }    }
209  }  }
210    
211  static __inline void new_interpolate16x8_quarterpel(  static void __inline
212      uint8_t * const cur,  interpolate16x16_add_quarterpel(uint8_t * const cur,
213      uint8_t * const refn,      uint8_t * const refn,
214                  uint8_t * const refh,                  uint8_t * const refh,
215                  uint8_t * const refv,                  uint8_t * const refv,
# Line 218  Line 224 
224          uint8_t *tmp;          uint8_t *tmp;
225          int32_t quads;          int32_t quads;
226          const XVID_QP_FUNCS *Ops;          const XVID_QP_FUNCS *Ops;
227            const XVID_QP_FUNCS *Ops_Copy;
228    
229          int32_t x_int, y_int;          int32_t x_int, y_int;
230    
231    const int32_t xRef = x*4 + dx;          const int32_t xRef = (int)x*4 + dx;
232    const int32_t yRef = y*4 + dy;          const int32_t yRef = (int)y*4 + dy;
233    
234       Ops = xvid_QP_Funcs; // TODO: pass as argument          Ops = xvid_QP_Add_Funcs;
235            Ops_Copy = xvid_QP_Funcs;
236       quads = (dx&3) | ((dy&3)<<2);       quads = (dx&3) | ((dy&3)<<2);
237    
238    x_int = xRef/4;          x_int = xRef >> 2;
239    if (xRef < 0 && xRef % 4)          y_int = yRef >> 2;
    x_int--;  
   
   y_int  = yRef/4;  
   if (yRef < 0 && yRef % 4)  
    y_int--;  
240    
241      dst = cur + y * stride + x;      dst = cur + y * stride + x;
242          src = refn + y_int * stride + x_int;          src = refn + y_int * (int)stride + x_int;
243    
244    tmp = refh; // we need at least a 16 x stride scratch block          tmp = refh; /* we need at least a 16 x stride scratch block */
245    
246            switch(quads) {
247            case 0:
248                    /* NB: there is no halfpel involved ! the name's function can be
249                     *     misleading */
250                    interpolate8x8_halfpel_add(dst, src, stride, rounding);
251                    interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding);
252                    interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding);
253                    interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding);
254                    break;
255            case 1:
256                    Ops->H_Pass_Avrg(dst, src, 16, stride, rounding);
257                    break;
258            case 2:
259                    Ops->H_Pass(dst, src, 16, stride, rounding);
260                    break;
261            case 3:
262                    Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding);
263                    break;
264            case 4:
265                    Ops->V_Pass_Avrg(dst, src, 16, stride, rounding);
266                    break;
267            case 5:
268                    Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding);
269                    Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
270                    break;
271            case 6:
272                    Ops_Copy->H_Pass(tmp, src,        17, stride, rounding);
273                    Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
274                    break;
275            case 7:
276                    Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
277                    Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
278                    break;
279            case 8:
280                    Ops->V_Pass(dst, src, 16, stride, rounding);
281                    break;
282            case 9:
283                    Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding);
284                    Ops->V_Pass(dst, tmp, 16, stride, rounding);
285                    break;
286            case 10:
287                    Ops_Copy->H_Pass(tmp, src, 17, stride, rounding);
288                    Ops->V_Pass(dst, tmp, 16, stride, rounding);
289                    break;
290            case 11:
291                    Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
292                    Ops->V_Pass(dst, tmp, 16, stride, rounding);
293                    break;
294            case 12:
295                    Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding);
296                    break;
297            case 13:
298                    Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding);
299                    Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
300                    break;
301            case 14:
302                    Ops_Copy->H_Pass(tmp, src, 17, stride, rounding);
303                    Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding);
304                    break;
305            case 15:
306                    Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
307                    Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
308                    break;
309            }
310    }
311    
312    static void __inline
313    interpolate16x8_quarterpel(uint8_t * const cur,
314                                                               uint8_t * const refn,
315                                                               uint8_t * const refh,
316                                                               uint8_t * const refv,
317                                                               uint8_t * const refhv,
318                                                               const uint32_t x, const uint32_t y,
319                                                               const int32_t dx,  const int dy,
320                                                               const uint32_t stride,
321                                                               const uint32_t rounding)
322    {
323            const uint8_t *src;
324            uint8_t *dst;
325            uint8_t *tmp;
326            int32_t quads;
327            const XVID_QP_FUNCS *Ops;
328    
329            int32_t x_int, y_int;
330    
331            const int32_t xRef = (int)x*4 + dx;
332            const int32_t yRef = (int)y*4 + dy;
333    
334            Ops = xvid_QP_Funcs;
335            quads = (dx&3) | ((dy&3)<<2);
336    
337            x_int = xRef >> 2;
338            y_int = yRef >> 2;
339    
340            dst = cur + y * stride + x;
341            src = refn + y_int * (int)stride + x_int;
342    
343            tmp = refh; /* we need at least a 16 x stride scratch block */
344    
345    switch(quads) {    switch(quads) {
346      case 0:      case 0:
# Line 302  Line 404 
404    }    }
405  }  }
406    
407  static __inline void new_interpolate8x8_quarterpel(  static void __inline
408      uint8_t * const cur,  interpolate8x8_quarterpel(uint8_t * const cur,
409      uint8_t * const refn,      uint8_t * const refn,
410                  uint8_t * const refh,                  uint8_t * const refh,
411                  uint8_t * const refv,                  uint8_t * const refv,
# Line 321  Line 423 
423    
424          int32_t x_int, y_int;          int32_t x_int, y_int;
425    
426    const int32_t xRef = x*4 + dx;          const int32_t xRef = (int)x*4 + dx;
427    const int32_t yRef = y*4 + dy;          const int32_t yRef = (int)y*4 + dy;
428    
429       Ops = xvid_QP_Funcs; // TODO: pass as argument          Ops = xvid_QP_Funcs;
430       quads = (dx&3) | ((dy&3)<<2);       quads = (dx&3) | ((dy&3)<<2);
431    
432    x_int = xRef/4;          x_int = xRef >> 2;
433    if (xRef < 0 && xRef % 4)          y_int = yRef >> 2;
    x_int--;  
   
   y_int  = yRef/4;  
   if (yRef < 0 && yRef % 4)  
    y_int--;  
434    
435      dst = cur + y * stride + x;      dst = cur + y * stride + x;
436          src = refn + y_int * stride + x_int;          src = refn + y_int * (int)stride + x_int;
437    
438    tmp = refh; // we need at least a 16 x stride scratch block          tmp = refh; /* we need at least a 16 x stride scratch block */
439    
440    switch(quads) {    switch(quads) {
441      case 0:      case 0:
# Line 400  Line 497 
497      break;      break;
498    }    }
499  }  }
500  /*****************************************************************************/  
501    static void __inline
502    interpolate8x8_add_quarterpel(uint8_t * const cur,
503                                                              uint8_t * const refn,
504                                                              uint8_t * const refh,
505                                                              uint8_t * const refv,
506                                                              uint8_t * const refhv,
507                                                              const uint32_t x, const uint32_t y,
508                                                              const int32_t dx,  const int dy,
509                                                              const uint32_t stride,
510                                                              const uint32_t rounding)
511    {
512            const uint8_t *src;
513            uint8_t *dst;
514            uint8_t *tmp;
515            int32_t quads;
516            const XVID_QP_FUNCS *Ops;
517            const XVID_QP_FUNCS *Ops_Copy;
518    
519            int32_t x_int, y_int;
520    
521            const int32_t xRef = (int)x*4 + dx;
522            const int32_t yRef = (int)y*4 + dy;
523    
524            Ops = xvid_QP_Add_Funcs;
525            Ops_Copy = xvid_QP_Funcs;
526            quads = (dx&3) | ((dy&3)<<2);
527    
528            x_int = xRef >> 2;
529            y_int = yRef >> 2;
530    
531            dst = cur + y * stride + x;
532            src = refn + y_int * (int)stride + x_int;
533    
534            tmp = refh; /* we need at least a 16 x stride scratch block */
535    
536            switch(quads) {
537            case 0:
538                    /* Misleading function name, there is no halfpel involved
539                     * just dst and src averaging with rounding=0 */
540                    interpolate8x8_halfpel_add(dst, src, stride, rounding);
541                    break;
542            case 1:
543                    Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
544                    break;
545            case 2:
546                    Ops->H_Pass_8(dst, src, 8, stride, rounding);
547                    break;
548            case 3:
549                    Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
550                    break;
551            case 4:
552                    Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
553                    break;
554            case 5:
555                    Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
556                    Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
557                    break;
558            case 6:
559                    Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
560                    Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
561                    break;
562            case 7:
563                    Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
564                    Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
565                    break;
566            case 8:
567                    Ops->V_Pass_8(dst, src, 8, stride, rounding);
568                    break;
569            case 9:
570                    Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
571                    Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
572                    break;
573            case 10:
574                    Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
575                    Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
576                    break;
577            case 11:
578                    Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
579                    Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
580                    break;
581            case 12:
582                    Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
583                    break;
584            case 13:
585                    Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
586                    Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
587                    break;
588            case 14:
589                    Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
590                    Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
591                    break;
592            case 15:
593                    Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
594                    Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
595                    break;
596            }
597    }
598    
599  #endif  /* _XVID_QPEL_H_ */  #endif  /* _XVID_QPEL_H_ */

Legend:
Removed from v.1.1.4.1  
changed lines
  Added in v.1.7

No admin address has been configured
ViewVC Help
Powered by ViewVC 1.0.4