3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * - RGB colorspace conversions - |
; * - RGB colorspace conversions - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002-2003 Michael Militzer <isibaar@xvid.org> |
; * Copyright(C) 2002-2008 Michael Militzer <michael@xvid.org> |
7 |
; * 2002-2003 Peter Ross <pross@xvid.org> |
; * 2002-2003 Peter Ross <pross@xvid.org> |
8 |
; * |
; * |
9 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
22 |
; * |
; * |
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function |
|
|
%define %1 _%1:function |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
26 |
|
|
27 |
;============================================================================= |
;============================================================================= |
28 |
; Some constants |
; Some constants |
48 |
%define V_ADD 128 |
%define V_ADD 128 |
49 |
|
|
50 |
; Scaling used during conversion |
; Scaling used during conversion |
51 |
%define SCALEBITS 6 |
%define SCALEBITS_OUT 6 |
52 |
|
%define SCALEBITS_IN 13 |
53 |
|
|
54 |
|
%define FIX_ROUND (1<<(SCALEBITS_IN-1)) |
55 |
|
|
56 |
;============================================================================= |
;============================================================================= |
57 |
; Read only data |
; Read only data |
58 |
;============================================================================= |
;============================================================================= |
59 |
|
|
60 |
%ifdef FORMAT_COFF |
DATA |
61 |
SECTION .rodata |
|
62 |
%else |
ALIGN SECTION_ALIGN |
|
SECTION .rodata align=16 |
|
|
%endif |
|
|
ALIGN 16 |
|
63 |
|
|
64 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
65 |
; RGB->YV12 multiplication matrices |
; RGB->YV12 multiplication matrices |
66 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
67 |
; FIX(Y_B) FIX(Y_G) FIX(Y_R) Ignored |
; FIX(Y_B) FIX(Y_G) FIX(Y_R) Ignored |
68 |
|
|
69 |
y_mul: dw 25, 129, 66, 0 |
bgr_y_mul: dw 803, 4129, 2105, 0 |
70 |
u_mul: dw 112, -74, -38, 0 |
bgr_u_mul: dw 3596, -2384, -1212, 0 |
71 |
v_mul: dw -18, -94, 112, 0 |
bgr_v_mul: dw -582, -3015, 3596, 0 |
72 |
|
|
73 |
|
;----------------------------------------------------------------------------- |
74 |
|
; BGR->YV12 multiplication matrices |
75 |
|
;----------------------------------------------------------------------------- |
76 |
|
; FIX(Y_R) FIX(Y_G) FIX(Y_B) Ignored |
77 |
|
|
78 |
|
rgb_y_mul: dw 2105, 4129, 803, 0 |
79 |
|
rgb_u_mul: dw -1212, -2384, 3596, 0 |
80 |
|
rgb_v_mul: dw 3596, -3015, -582, 0 |
81 |
|
|
82 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
83 |
; YV12->RGB data |
; YV12->RGB data |
110 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
111 |
|
|
112 |
%macro BGR_TO_YV12_INIT 2 |
%macro BGR_TO_YV12_INIT 2 |
113 |
movq mm7, [y_mul] |
movq mm7, [bgr_y_mul] |
114 |
%endmacro |
%endmacro |
115 |
|
|
116 |
|
|
117 |
%macro BGR_TO_YV12 2 |
%macro BGR_TO_YV12 2 |
118 |
; y_out |
; y_out |
119 |
|
|
120 |
|
pxor mm4, mm4 |
121 |
|
pxor mm5, mm5 |
122 |
|
movd mm0, [x_ptr] ; x_ptr[0...] |
123 |
|
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
124 |
|
punpcklbw mm0, mm4 ; [ |b |g |r ] |
125 |
|
punpcklbw mm2, mm5 ; [ |b |g |r ] |
126 |
|
movq mm6, mm0 ; = [ |b4|g4|r4] |
127 |
|
paddw mm6, mm2 ; +[ |b4|g4|r4] |
128 |
|
pmaddwd mm0, mm7 ; *= Y_MUL |
129 |
|
pmaddwd mm2, mm7 ; *= Y_MUL |
130 |
|
movq mm4, mm0 ; [r] |
131 |
|
movq mm5, mm2 ; [r] |
132 |
|
psrlq mm4, 32 ; +[g] |
133 |
|
psrlq mm5, 32 ; +[g] |
134 |
|
paddd mm0, mm4 ; +[b] |
135 |
|
paddd mm2, mm5 ; +[b] |
136 |
|
|
137 |
|
pxor mm4, mm4 |
138 |
|
pxor mm5, mm5 |
139 |
|
movd mm1, [x_ptr+%1] ; src[%1...] |
140 |
|
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
141 |
|
punpcklbw mm1, mm4 ; [ |b |g |r ] |
142 |
|
punpcklbw mm3, mm5 ; [ |b |g |r ] |
143 |
|
paddw mm6, mm1 ; +[ |b4|g4|r4] |
144 |
|
paddw mm6, mm3 ; +[ |b4|g4|r4] |
145 |
|
pmaddwd mm1, mm7 ; *= Y_MUL |
146 |
|
pmaddwd mm3, mm7 ; *= Y_MUL |
147 |
|
movq mm4, mm1 ; [r] |
148 |
|
movq mm5, mm3 ; [r] |
149 |
|
psrlq mm4, 32 ; +[g] |
150 |
|
psrlq mm5, 32 ; +[g] |
151 |
|
paddd mm1, mm4 ; +[b] |
152 |
|
paddd mm3, mm5 ; +[b] |
153 |
|
|
154 |
|
push x_stride |
155 |
|
|
156 |
|
movd x_stride_d, mm0 |
157 |
|
add x_stride, FIX_ROUND |
158 |
|
shr x_stride, SCALEBITS_IN |
159 |
|
add x_stride, Y_ADD |
160 |
|
mov [y_ptr], dl ; y_ptr[0] |
161 |
|
|
162 |
|
movd x_stride_d, mm1 |
163 |
|
add x_stride, FIX_ROUND |
164 |
|
shr x_stride, SCALEBITS_IN |
165 |
|
add x_stride, Y_ADD |
166 |
|
mov [y_ptr + 1], dl ; y_ptr[1] |
167 |
|
|
168 |
|
movd x_stride_d, mm2 |
169 |
|
add x_stride, FIX_ROUND |
170 |
|
shr x_stride, SCALEBITS_IN |
171 |
|
add x_stride, Y_ADD |
172 |
|
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
173 |
|
|
174 |
|
movd x_stride_d, mm3 |
175 |
|
add x_stride, FIX_ROUND |
176 |
|
shr x_stride, SCALEBITS_IN |
177 |
|
add x_stride, Y_ADD |
178 |
|
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
179 |
|
|
180 |
|
; u_ptr, v_ptr |
181 |
|
movq mm0, mm6 ; = [ |b4|g4|r4] |
182 |
|
pmaddwd mm6, [bgr_v_mul] ; *= V_MUL |
183 |
|
pmaddwd mm0, [bgr_u_mul] ; *= U_MUL |
184 |
|
movq mm1, mm0 |
185 |
|
movq mm2, mm6 |
186 |
|
psrlq mm1, 32 |
187 |
|
psrlq mm2, 32 |
188 |
|
paddd mm0, mm1 |
189 |
|
paddd mm2, mm6 |
190 |
|
|
191 |
|
movd x_stride_d, mm0 |
192 |
|
add x_stride, 4*FIX_ROUND |
193 |
|
shr x_stride, (SCALEBITS_IN+2) |
194 |
|
add x_stride, U_ADD |
195 |
|
mov [u_ptr], dl |
196 |
|
|
197 |
|
movd x_stride_d, mm2 |
198 |
|
add x_stride, 4*FIX_ROUND |
199 |
|
shr x_stride, (SCALEBITS_IN+2) |
200 |
|
add x_stride, V_ADD |
201 |
|
mov [v_ptr], dl |
202 |
|
|
203 |
|
pop x_stride |
204 |
|
%endmacro |
205 |
|
|
206 |
|
;------------------------------------------------------------------------------ |
207 |
|
; RGB_TO_YV12( BYTES ) |
208 |
|
; |
209 |
|
; BYTES 3=rgb(24bit), 4=rgba(32-bit) |
210 |
|
; |
211 |
|
; bytes=3/4, pixels = 2, vpixels=2 |
212 |
|
;------------------------------------------------------------------------------ |
213 |
|
|
214 |
|
%macro RGB_TO_YV12_INIT 2 |
215 |
|
movq mm7, [rgb_y_mul] |
216 |
|
%endmacro |
217 |
|
|
218 |
|
|
219 |
|
%macro RGB_TO_YV12 2 |
220 |
|
; y_out |
221 |
pxor mm4, mm4 |
pxor mm4, mm4 |
222 |
pxor mm5, mm5 |
pxor mm5, mm5 |
223 |
movd mm0, [edi] ; x_ptr[0...] |
movd mm0, [x_ptr] ; x_ptr[0...] |
224 |
movd mm2, [edi+edx] ; x_ptr[x_stride...] |
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
225 |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
226 |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
227 |
movq mm6, mm0 ; = [ |b4|g4|r4] |
movq mm6, mm0 ; = [ |b4|g4|r4] |
237 |
|
|
238 |
pxor mm4, mm4 |
pxor mm4, mm4 |
239 |
pxor mm5, mm5 |
pxor mm5, mm5 |
240 |
movd mm1, [edi+%1] ; src[%1...] |
movd mm1, [x_ptr+%1] ; src[%1...] |
241 |
movd mm3, [edi+edx+%1] ; src[x_stride+%1...] |
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
242 |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
243 |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
244 |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
252 |
paddd mm1, mm4 ; +[b] |
paddd mm1, mm4 ; +[b] |
253 |
paddd mm3, mm5 ; +[b] |
paddd mm3, mm5 ; +[b] |
254 |
|
|
255 |
push edx |
push x_stride |
256 |
|
|
257 |
movd edx, mm0 |
movd x_stride_d, mm0 |
258 |
shr edx, 8 |
add x_stride, FIX_ROUND |
259 |
add edx, Y_ADD |
shr x_stride, SCALEBITS_IN |
260 |
mov [esi], dl ; y_ptr[0] |
add x_stride, Y_ADD |
261 |
|
mov [y_ptr], dl ; y_ptr[0] |
262 |
movd edx, mm1 |
|
263 |
shr edx, 8 |
movd x_stride_d, mm1 |
264 |
add edx, Y_ADD |
add x_stride, FIX_ROUND |
265 |
mov [esi + 1], dl ; y_ptr[1] |
shr x_stride, SCALEBITS_IN |
266 |
|
add x_stride, Y_ADD |
267 |
movd edx, mm2 |
mov [y_ptr + 1], dl ; y_ptr[1] |
268 |
shr edx, 8 |
|
269 |
add edx, Y_ADD |
movd x_stride_d, mm2 |
270 |
mov [esi + eax + 0], dl ; y_ptr[y_stride + 0] |
add x_stride, FIX_ROUND |
271 |
|
shr x_stride, SCALEBITS_IN |
272 |
movd edx, mm3 |
add x_stride, Y_ADD |
273 |
shr edx, 8 |
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
274 |
add edx, Y_ADD |
|
275 |
mov [esi + eax + 1], dl ; y_ptr[y_stride + 1] |
movd x_stride_d, mm3 |
276 |
|
add x_stride, FIX_ROUND |
277 |
|
shr x_stride, SCALEBITS_IN |
278 |
|
add x_stride, Y_ADD |
279 |
|
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
280 |
|
|
281 |
; u_ptr, v_ptr |
; u_ptr, v_ptr |
282 |
movq mm0, mm6 ; = [ |b4|g4|r4] |
movq mm0, mm6 ; = [ |b4|g4|r4] |
283 |
pmaddwd mm6, [v_mul] ; *= V_MUL |
pmaddwd mm6, [rgb_v_mul] ; *= V_MUL |
284 |
pmaddwd mm0, [u_mul] ; *= U_MUL |
pmaddwd mm0, [rgb_u_mul] ; *= U_MUL |
285 |
movq mm1, mm0 |
movq mm1, mm0 |
286 |
movq mm2, mm6 |
movq mm2, mm6 |
287 |
psrlq mm1, 32 |
psrlq mm1, 32 |
289 |
paddd mm0, mm1 |
paddd mm0, mm1 |
290 |
paddd mm2, mm6 |
paddd mm2, mm6 |
291 |
|
|
292 |
movd edx, mm0 |
movd x_stride_d, mm0 |
293 |
shr edx, 10 |
add x_stride, 4*FIX_ROUND |
294 |
add edx, U_ADD |
shr x_stride, (SCALEBITS_IN+2) |
295 |
mov [ebx], dl |
add x_stride, U_ADD |
296 |
|
mov [u_ptr], dl |
297 |
movd edx, mm2 |
|
298 |
shr edx, 10 |
movd x_stride_d, mm2 |
299 |
add edx, V_ADD |
add x_stride, 4*FIX_ROUND |
300 |
mov [ecx], dl |
shr x_stride, (SCALEBITS_IN+2) |
301 |
|
add x_stride, V_ADD |
302 |
|
mov [v_ptr], dl |
303 |
|
|
304 |
pop edx |
pop x_stride |
305 |
%endmacro |
%endmacro |
306 |
|
|
307 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
317 |
%endmacro |
%endmacro |
318 |
|
|
319 |
%macro YV12_TO_BGR 2 |
%macro YV12_TO_BGR 2 |
320 |
%define TEMP_Y1 esp |
%define TEMP_Y1 _ESP |
321 |
%define TEMP_Y2 esp + 8 |
%define TEMP_Y2 _ESP + 8 |
322 |
%define TEMP_G1 esp + 16 |
%define TEMP_G1 _ESP + 16 |
323 |
%define TEMP_G2 esp + 24 |
%define TEMP_G2 _ESP + 24 |
324 |
%define TEMP_B1 esp + 32 |
%define TEMP_B1 _ESP + 32 |
325 |
%define TEMP_B2 esp + 40 |
%define TEMP_B2 _ESP + 40 |
326 |
|
|
327 |
movd mm2, [ebx] ; u_ptr[0] |
movd mm2, [u_ptr] ; u_ptr[0] |
328 |
movd mm3, [ecx] ; v_ptr[0] |
movd mm3, [v_ptr] ; v_ptr[0] |
329 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
330 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
331 |
psubsw mm2, [U_SUB] ; U - 128 |
psubsw mm2, [U_SUB] ; U - 128 |
344 |
paddsw mm2, mm3 |
paddsw mm2, mm3 |
345 |
paddsw mm6, mm0 |
paddsw mm6, mm0 |
346 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
347 |
movq mm0, [esi] ; y7y6y5y4y3y2y1y0 -> mm0 |
movq mm0, [y_ptr] ; y7y6y5y4y3y2y1y0 -> mm0 |
348 |
movq mm1, mm0 |
movq mm1, mm0 |
349 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
350 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
356 |
movq [TEMP_Y1], mm0 ; y3y2y1y0 -> mm7 |
movq [TEMP_Y1], mm0 ; y3y2y1y0 -> mm7 |
357 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
358 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
359 |
psraw mm1, SCALEBITS |
psraw mm1, SCALEBITS_OUT |
360 |
psraw mm0, SCALEBITS |
psraw mm0, SCALEBITS_OUT |
361 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
362 |
movq [TEMP_G1], mm0 |
movq [TEMP_G1], mm0 |
363 |
movq mm0, [esi+eax] ; y7y6y5y4y3y2y1y0 -> mm0 |
movq mm0, [y_ptr+y_stride] ; y7y6y5y4y3y2y1y0 -> mm0 |
364 |
movq mm1, mm0 |
movq mm1, mm0 |
365 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
366 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
372 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
373 |
movq mm2, mm0 |
movq mm2, mm0 |
374 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
375 |
psraw mm1, SCALEBITS |
psraw mm1, SCALEBITS_OUT |
376 |
psraw mm0, SCALEBITS |
psraw mm0, SCALEBITS_OUT |
377 |
packuswb mm0, mm1 ; g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ; g7g6g5g4g3g2g1g0 -> mm0 |
378 |
movq [TEMP_G2], mm0 |
movq [TEMP_G2], mm0 |
379 |
movq mm0, mm4 |
movq mm0, mm4 |
383 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
384 |
movq mm7, mm2 ; y3y2y1y0 -> mm7 |
movq mm7, mm2 ; y3y2y1y0 -> mm7 |
385 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
386 |
psraw mm3, SCALEBITS |
psraw mm3, SCALEBITS_OUT |
387 |
psraw mm2, SCALEBITS |
psraw mm2, SCALEBITS_OUT |
388 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
389 |
movq [TEMP_B2], mm2 |
movq [TEMP_B2], mm2 |
390 |
movq mm3, [TEMP_Y2] |
movq mm3, [TEMP_Y2] |
393 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
394 |
movq mm4, mm2 ; TEMP_Y1 -> mm4 |
movq mm4, mm2 ; TEMP_Y1 -> mm4 |
395 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
396 |
psraw mm3, SCALEBITS |
psraw mm3, SCALEBITS_OUT |
397 |
psraw mm2, SCALEBITS |
psraw mm2, SCALEBITS_OUT |
398 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
399 |
movq [TEMP_B1], mm2 |
movq [TEMP_B1], mm2 |
400 |
movq mm0, mm5 |
movq mm0, mm5 |
402 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm0 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm0 |
403 |
paddsw mm1, mm5 ; r7r6r5r4 -> mm1 |
paddsw mm1, mm5 ; r7r6r5r4 -> mm1 |
404 |
paddsw mm7, mm0 ; r3r2r1r0 -> mm7 |
paddsw mm7, mm0 ; r3r2r1r0 -> mm7 |
405 |
psraw mm1, SCALEBITS |
psraw mm1, SCALEBITS_OUT |
406 |
psraw mm7, SCALEBITS |
psraw mm7, SCALEBITS_OUT |
407 |
packuswb mm7, mm1 ; r7r6r5r4r3r2r1r0 -> mm7 (TEMP_R2) |
packuswb mm7, mm1 ; r7r6r5r4r3r2r1r0 -> mm7 (TEMP_R2) |
408 |
paddsw mm6, mm5 ; r7r6r5r4 -> mm6 |
paddsw mm6, mm5 ; r7r6r5r4 -> mm6 |
409 |
paddsw mm4, mm0 ; r3r2r1r0 -> mm4 |
paddsw mm4, mm0 ; r3r2r1r0 -> mm4 |
410 |
psraw mm6, SCALEBITS |
psraw mm6, SCALEBITS_OUT |
411 |
psraw mm4, SCALEBITS |
psraw mm4, SCALEBITS_OUT |
412 |
packuswb mm4, mm6 ; r7r6r5r4r3r2r1r0 -> mm4 (TEMP_R1) |
packuswb mm4, mm6 ; r7r6r5r4r3r2r1r0 -> mm4 (TEMP_R1) |
413 |
movq mm0, [TEMP_B1] |
movq mm0, [TEMP_B1] |
414 |
movq mm1, [TEMP_G1] |
movq mm1, [TEMP_G1] |
427 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
428 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
429 |
%if %1 == 3 ; BGR (24-bit) |
%if %1 == 3 ; BGR (24-bit) |
430 |
movd [edi], mm2 |
movd [x_ptr], mm2 |
431 |
psrlq mm2, 32 |
psrlq mm2, 32 |
432 |
movd [edi + 3], mm2 |
movd [x_ptr + 3], mm2 |
433 |
movd [edi + 6], mm4 |
movd [x_ptr + 6], mm4 |
434 |
psrlq mm4, 32 |
psrlq mm4, 32 |
435 |
movd [edi + 9], mm4 |
movd [x_ptr + 9], mm4 |
436 |
movd [edi + 12], mm0 |
movd [x_ptr + 12], mm0 |
437 |
psrlq mm0, 32 |
psrlq mm0, 32 |
438 |
movd [edi + 15], mm0 |
movd [x_ptr + 15], mm0 |
439 |
movq mm2, mm5 |
movq mm2, mm5 |
440 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
441 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
444 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
445 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
446 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
447 |
movq [edi + 16], mm5 |
movq [x_ptr + 16], mm5 |
448 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
449 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
450 |
movq mm2, mm0 |
movq mm2, mm0 |
459 |
movq mm5, mm0 |
movq mm5, mm0 |
460 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
461 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
462 |
movd [edi+edx], mm2 |
movd [x_ptr+x_stride], mm2 |
463 |
psrlq mm2, 32 |
psrlq mm2, 32 |
464 |
movd [edi+edx + 3], mm2 |
movd [x_ptr+x_stride + 3], mm2 |
465 |
movd [edi+edx + 6], mm4 |
movd [x_ptr+x_stride + 6], mm4 |
466 |
psrlq mm4, 32 |
psrlq mm4, 32 |
467 |
movd [edi+edx + 9], mm4 |
movd [x_ptr+x_stride + 9], mm4 |
468 |
movd [edi+edx + 12], mm0 |
movd [x_ptr+x_stride + 12], mm0 |
469 |
psrlq mm0, 32 |
psrlq mm0, 32 |
470 |
movd [edi+edx + 15], mm0 |
movd [x_ptr+x_stride + 15], mm0 |
471 |
movq mm2, mm5 |
movq mm2, mm5 |
472 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
473 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
476 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
477 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
478 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
479 |
movq [edi + edx + 16], mm5 |
movq [x_ptr + x_stride + 16], mm5 |
480 |
|
|
481 |
%else ; BGRA (32-bit) |
%else ; BGRA (32-bit) |
482 |
movq [edi], mm2 |
movq [x_ptr], mm2 |
483 |
movq [edi + 8], mm4 |
movq [x_ptr + 8], mm4 |
484 |
movq [edi + 16], mm0 |
movq [x_ptr + 16], mm0 |
485 |
movq [edi + 24], mm5 |
movq [x_ptr + 24], mm5 |
486 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
487 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
488 |
movq mm2, mm0 |
movq mm2, mm0 |
497 |
movq mm5, mm0 |
movq mm5, mm0 |
498 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
499 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
500 |
movq [edi + edx], mm2 |
movq [x_ptr + x_stride], mm2 |
501 |
movq [edi + edx + 8], mm4 |
movq [x_ptr + x_stride + 8], mm4 |
502 |
movq [edi + edx + 16], mm0 |
movq [x_ptr + x_stride + 16], mm0 |
503 |
movq [edi + edx + 24], mm5 |
movq [x_ptr + x_stride + 24], mm5 |
504 |
%endif |
%endif |
505 |
|
|
506 |
%undef TEMP_Y1 |
%undef TEMP_Y1 |
515 |
; Code |
; Code |
516 |
;============================================================================= |
;============================================================================= |
517 |
|
|
518 |
SECTION .text |
TEXT |
519 |
|
|
520 |
%include "colorspace_mmx.inc" |
%include "colorspace_mmx.inc" |
521 |
|
|
522 |
; input |
; input |
523 |
MAKE_COLORSPACE bgr_to_yv12_mmx,0, 3,2,2, BGR_TO_YV12, 3, -1 |
MAKE_COLORSPACE bgr_to_yv12_mmx,0, 3,2,2, BGR_TO_YV12, 3, -1 |
524 |
MAKE_COLORSPACE bgra_to_yv12_mmx,0, 4,2,2, BGR_TO_YV12, 4, -1 |
MAKE_COLORSPACE bgra_to_yv12_mmx,0, 4,2,2, BGR_TO_YV12, 4, -1 |
525 |
|
MAKE_COLORSPACE rgb_to_yv12_mmx,0, 3,2,2, RGB_TO_YV12, 3, -1 |
526 |
|
MAKE_COLORSPACE rgba_to_yv12_mmx,0, 4,2,2, RGB_TO_YV12, 4, -1 |
527 |
|
|
528 |
; output |
; output |
529 |
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
530 |
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
531 |
|
|
532 |
|
NON_EXEC_STACK |