[cvs] / xvidcore / src / image / qpel.h Repository:
ViewVC logotype

Diff of /xvidcore/src/image/qpel.h

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.1, Sat May 3 23:26:35 2003 UTC revision 1.1.2.1, Sat May 3 23:26:35 2003 UTC
# Line 0  Line 1 
1    /*****************************************************************************
2    *
3    *  XVID MPEG-4 VIDEO CODEC
4    *  - QPel interpolation -
5    *
6    *  This program is free software ; you can redistribute it and/or modify
7    *  it under the terms of the GNU General Public License as published by
8    *  the Free Software Foundation ; either version 2 of the License, or
9    *  (at your option) any later version.
10    *
11    *  This program is distributed in the hope that it will be useful,
12    *  but WITHOUT ANY WARRANTY ; without even the implied warranty of
13    *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    *  GNU General Public License for more details.
15    *
16    *  You should have received a copy of the GNU General Public License
17    *  along with this program ; if not, write to the Free Software
18    *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19    *
20    *****************************************************************************/
21    
22    /**************************************************************************
23     *
24     *      History:
25     *
26     *  22.10.2002  initial coding  - Skal -
27     *
28     *************************************************************************/
29    
30    #ifndef _XVID_QPEL_H_
31    #define _XVID_QPEL_H_
32    
33    #include "../utils/mem_transfer.h"
34    
35    /*****************************************************************************
36     * Signatures
37     ****************************************************************************/
38    
39    #define XVID_QP_PASS_SIGNATURE(NAME)  \
40      void (NAME)(uint8_t *dst, const uint8_t *src, int32_t length, int32_t BpS, int32_t rounding)
41    
42    typedef  XVID_QP_PASS_SIGNATURE(XVID_QP_PASS);
43    
44        // We put everything in a single struct so it can easily be passed
45        // to prediction functions as a whole...
46    
47    struct XVID_QP_FUNCS {
48    
49        // filter for QPel 16x? prediction
50    
51      XVID_QP_PASS *H_Pass;
52      XVID_QP_PASS *H_Pass_Avrg;
53      XVID_QP_PASS *H_Pass_Avrg_Up;
54      XVID_QP_PASS *V_Pass;
55      XVID_QP_PASS *V_Pass_Avrg;
56      XVID_QP_PASS *V_Pass_Avrg_Up;
57    
58        // filter for QPel 8x? prediction
59    
60      XVID_QP_PASS *H_Pass_8;
61      XVID_QP_PASS *H_Pass_Avrg_8;
62      XVID_QP_PASS *H_Pass_Avrg_Up_8;
63      XVID_QP_PASS *V_Pass_8;
64      XVID_QP_PASS *V_Pass_Avrg_8;
65      XVID_QP_PASS *V_Pass_Avrg_Up_8;
66    };
67    typedef struct XVID_QP_FUNCS  XVID_QP_FUNCS;
68    
69    /*****************************************************************************
70     * fwd dcl
71     ****************************************************************************/
72    
73    extern XVID_QP_FUNCS xvid_QP_Funcs_C;       // for P-frames
74    extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C;   // for B-frames
75    
76    extern XVID_QP_FUNCS xvid_QP_Funcs_mmx;
77    extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx;
78    extern void xvid_Init_QP_mmx(); // should be called at mmx initialization
79    
80    extern XVID_QP_FUNCS *xvid_QP_Funcs;      // <- main pointer for enc/dec structure
81    extern XVID_QP_FUNCS *xvid_QP_Add_Funcs;  // <- main pointer for enc/dec structure
82    
83    /*****************************************************************************
84     * macros
85     ****************************************************************************/
86    
87    /*****************************************************************************
88    
89        Passes to be performed
90    
91     case 0:         copy
92     case 2:         h-pass
93     case 1/3:       h-pass + h-avrg
94     case 8:                           v-pass
95     case 10:        h-pass          + v-pass
96     case 9/11:      h-pass + h-avrg + v-pass
97     case 4/12:                        v-pass + v-avrg
98     case 6/14:      h-pass          + v-pass + v-avrg
99     case 5/13/7/15: h-pass + h-avrg + v-pass + v-avrg
100    
101     ****************************************************************************/
102    
103    static __inline void new_interpolate16x16_quarterpel(
104        uint8_t * const cur,
105        uint8_t * const refn,
106                    uint8_t * const refh,
107                    uint8_t * const refv,
108                    uint8_t * const refhv,
109                    const uint32_t x, const uint32_t y,
110                    const int32_t dx,  const int dy,
111                    const uint32_t stride,
112                    const uint32_t rounding)
113    {
114            const uint8_t *src;
115            uint8_t *dst;
116            uint8_t *tmp;
117            int32_t quads;
118            const XVID_QP_FUNCS *Ops;
119    
120            int32_t x_int, y_int;
121    
122      const int32_t xRef = x*4 + dx;
123      const int32_t yRef = y*4 + dy;
124    
125         Ops = xvid_QP_Funcs; // TODO: pass as argument
126         quads = (dx&3) | ((dy&3)<<2);
127    
128      x_int = xRef/4;
129      if (xRef < 0 && xRef % 4)
130       x_int--;
131    
132      y_int  = yRef/4;
133      if (yRef < 0 && yRef % 4)
134       y_int--;
135    
136        dst = cur + y * stride + x;
137            src = refn + y_int * stride + x_int;
138    
139      tmp = refh; // we need at least a 16 x stride scratch block
140    
141      switch(quads) {
142        case 0:
143          transfer8x8_copy( dst, src, stride);
144          transfer8x8_copy( dst+8, src+8, stride);
145          transfer8x8_copy( dst+8*stride, src+8*stride, stride);
146          transfer8x8_copy( dst+8*stride+8, src+8*stride+8, stride);
147        break;
148        case 1:
149          Ops->H_Pass_Avrg(dst, src, 16, stride, rounding);
150        break;
151        case 2:
152          Ops->H_Pass(dst, src, 16, stride, rounding);
153        break;
154        case 3:
155          Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding);
156        break;
157        case 4:
158          Ops->V_Pass_Avrg(dst, src, 16, stride, rounding);
159        break;
160        case 5:
161          Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
162          Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
163        break;
164        case 6:
165          Ops->H_Pass(tmp, src,   17, stride, rounding);
166          Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
167        break;
168        case 7:
169          Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
170          Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
171        break;
172        case 8:
173          Ops->V_Pass(dst, src, 16, stride, rounding);
174        break;
175        case 9:
176          Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
177          Ops->V_Pass(dst, tmp, 16, stride, rounding);
178        break;
179        case 10:
180          Ops->H_Pass(tmp, src, 17, stride, rounding);
181          Ops->V_Pass(dst, tmp, 16, stride, rounding);
182        break;
183        case 11:
184          Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
185          Ops->V_Pass(dst, tmp, 16, stride, rounding);
186        break;
187        case 12:
188          Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding);
189        break;
190        case 13:
191          Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
192          Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
193        break;
194        case 14:
195          Ops->H_Pass(tmp, src, 17, stride, rounding);
196          Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding);
197        break;
198        case 15:
199          Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
200          Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
201        break;
202      }
203    }
204    
205    static __inline void new_interpolate16x8_quarterpel(
206        uint8_t * const cur,
207        uint8_t * const refn,
208                    uint8_t * const refh,
209                    uint8_t * const refv,
210                    uint8_t * const refhv,
211                    const uint32_t x, const uint32_t y,
212                    const int32_t dx,  const int dy,
213                    const uint32_t stride,
214                    const uint32_t rounding)
215    {
216            const uint8_t *src;
217            uint8_t *dst;
218            uint8_t *tmp;
219            int32_t quads;
220            const XVID_QP_FUNCS *Ops;
221    
222            int32_t x_int, y_int;
223    
224      const int32_t xRef = x*4 + dx;
225      const int32_t yRef = y*4 + dy;
226    
227         Ops = xvid_QP_Funcs; // TODO: pass as argument
228         quads = (dx&3) | ((dy&3)<<2);
229    
230      x_int = xRef/4;
231      if (xRef < 0 && xRef % 4)
232       x_int--;
233    
234      y_int  = yRef/4;
235      if (yRef < 0 && yRef % 4)
236       y_int--;
237    
238        dst = cur + y * stride + x;
239            src = refn + y_int * stride + x_int;
240    
241      tmp = refh; // we need at least a 16 x stride scratch block
242    
243      switch(quads) {
244        case 0:
245          transfer8x8_copy( dst, src, stride);
246          transfer8x8_copy( dst+8, src+8, stride);
247        break;
248        case 1:
249          Ops->H_Pass_Avrg(dst, src, 8, stride, rounding);
250        break;
251        case 2:
252          Ops->H_Pass(dst, src, 8, stride, rounding);
253        break;
254        case 3:
255          Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding);
256        break;
257        case 4:
258          Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding);
259        break;
260        case 5:
261          Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
262          Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
263        break;
264        case 6:
265          Ops->H_Pass(tmp, src,   9, stride, rounding);
266          Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
267        break;
268        case 7:
269          Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
270          Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
271        break;
272        case 8:
273          Ops->V_Pass_8(dst, src, 16, stride, rounding);
274        break;
275        case 9:
276          Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
277          Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
278        break;
279        case 10:
280          Ops->H_Pass(tmp, src, 9, stride, rounding);
281          Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
282        break;
283        case 11:
284          Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
285          Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
286        break;
287        case 12:
288          Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding);
289        break;
290        case 13:
291          Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
292          Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
293        break;
294        case 14:
295          Ops->H_Pass(tmp, src, 9, stride, rounding);
296          Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding);
297        break;
298        case 15:
299          Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
300          Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
301        break;
302      }
303    }
304    
305    static __inline void new_interpolate8x8_quarterpel(
306        uint8_t * const cur,
307        uint8_t * const refn,
308                    uint8_t * const refh,
309                    uint8_t * const refv,
310                    uint8_t * const refhv,
311                    const uint32_t x, const uint32_t y,
312                    const int32_t dx,  const int dy,
313                    const uint32_t stride,
314                    const uint32_t rounding)
315    {
316            const uint8_t *src;
317            uint8_t *dst;
318            uint8_t *tmp;
319            int32_t quads;
320            const XVID_QP_FUNCS *Ops;
321    
322            int32_t x_int, y_int;
323    
324      const int32_t xRef = x*4 + dx;
325      const int32_t yRef = y*4 + dy;
326    
327         Ops = xvid_QP_Funcs; // TODO: pass as argument
328         quads = (dx&3) | ((dy&3)<<2);
329    
330      x_int = xRef/4;
331      if (xRef < 0 && xRef % 4)
332       x_int--;
333    
334      y_int  = yRef/4;
335      if (yRef < 0 && yRef % 4)
336       y_int--;
337    
338        dst = cur + y * stride + x;
339            src = refn + y_int * stride + x_int;
340    
341      tmp = refh; // we need at least a 16 x stride scratch block
342    
343      switch(quads) {
344        case 0:
345          transfer8x8_copy( dst, src, stride);
346        break;
347        case 1:
348          Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
349        break;
350        case 2:
351          Ops->H_Pass_8(dst, src, 8, stride, rounding);
352        break;
353        case 3:
354          Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
355        break;
356        case 4:
357          Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
358        break;
359        case 5:
360          Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
361          Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
362        break;
363        case 6:
364          Ops->H_Pass_8(tmp, src, 9, stride, rounding);
365          Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
366        break;
367        case 7:
368          Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
369          Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
370        break;
371        case 8:
372          Ops->V_Pass_8(dst, src, 8, stride, rounding);
373        break;
374        case 9:
375          Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
376          Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
377        break;
378        case 10:
379          Ops->H_Pass_8(tmp, src, 9, stride, rounding);
380          Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
381        break;
382        case 11:
383          Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
384          Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
385        break;
386        case 12:
387          Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
388        break;
389        case 13:
390          Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
391          Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
392        break;
393        case 14:
394          Ops->H_Pass_8(tmp, src, 9, stride, rounding);
395          Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
396        break;
397        case 15:
398          Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
399          Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
400        break;
401      }
402    }
403    /*****************************************************************************/
404    
405    #endif  /* _XVID_QPEL_H_ */

Legend:
Removed from v.1.1  
changed lines
  Added in v.1.1.2.1

No admin address has been configured
ViewVC Help
Powered by ViewVC 1.0.4