1 |
;/************************************************************************** |
;/***************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * colorspace |
; * - RGB colorspace conversions - |
5 |
|
; * |
6 |
|
; * Copyright(C) 2002-2008 Michael Militzer <michael@xvid.org> |
7 |
|
; * 2002-2003 Peter Ross <pross@xvid.org> |
8 |
; * |
; * |
9 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify |
10 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
18 |
; * |
; * |
19 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
|
;/************************************************************************** |
|
22 |
; * |
; * |
23 |
; * History: |
; ****************************************************************************/ |
|
; * |
|
|
; * 10.10.2001 initial version; (c)2002 peter ross <pross@xvid.org> |
|
|
; * |
|
|
; *************************************************************************/ |
|
24 |
|
|
25 |
|
%include "nasm.inc" |
26 |
|
|
27 |
bits 32 |
;============================================================================= |
28 |
|
; Some constants |
29 |
|
;============================================================================= |
30 |
|
|
31 |
%macro cglobal 1 |
;----------------------------------------------------------------------------- |
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
|
|
|
|
|
|
|
section .data |
|
|
align 16 |
|
|
|
|
|
;=========================================================================== |
|
32 |
; RGB->YV12 yuv constants |
; RGB->YV12 yuv constants |
33 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
34 |
|
|
35 |
%define Y_R 0.257 |
%define Y_R 0.257 |
36 |
%define Y_G 0.504 |
%define Y_G 0.504 |
37 |
%define Y_B 0.098 |
%define Y_B 0.098 |
47 |
%define V_B 0.071 |
%define V_B 0.071 |
48 |
%define V_ADD 128 |
%define V_ADD 128 |
49 |
|
|
50 |
;=========================================================================== |
; Scaling used during conversion |
51 |
; RGB->YV12 multiplication matrices |
%define SCALEBITS_OUT 6 |
52 |
;=========================================================================== |
%define SCALEBITS_IN 13 |
|
; FIX(Y_B) FIX(Y_G) FIX(Y_R) |
|
|
y_mul dw 25, 129, 66, 0 |
|
|
u_mul dw 112, -74, -38, 0 |
|
|
v_mul dw -18, -94, 112, 0 |
|
53 |
|
|
54 |
|
%define FIX_ROUND (1<<(SCALEBITS_IN-1)) |
55 |
|
|
56 |
;=========================================================================== |
;============================================================================= |
57 |
; YV12->RGB data |
; Read only data |
58 |
;=========================================================================== |
;============================================================================= |
|
%define SCALEBITS 6 |
|
|
Y_SUB dw 16, 16, 16, 16 |
|
|
U_SUB dw 128, 128, 128, 128 |
|
|
V_SUB dw 128, 128, 128, 128 |
|
59 |
|
|
60 |
Y_MUL dw 74, 74, 74, 74 |
DATA |
61 |
|
|
62 |
UG_MUL dw 25, 25, 25, 25 |
ALIGN SECTION_ALIGN |
|
VG_MUL dw 52, 52, 52, 52 |
|
63 |
|
|
64 |
UB_MUL dw 129, 129, 129, 129 |
;----------------------------------------------------------------------------- |
65 |
VR_MUL dw 102, 102, 102, 102 |
; RGB->YV12 multiplication matrices |
66 |
|
;----------------------------------------------------------------------------- |
67 |
|
; FIX(Y_B) FIX(Y_G) FIX(Y_R) Ignored |
68 |
|
|
69 |
|
bgr_y_mul: dw 803, 4129, 2105, 0 |
70 |
|
bgr_u_mul: dw 3596, -2384, -1212, 0 |
71 |
|
bgr_v_mul: dw -582, -3015, 3596, 0 |
72 |
|
|
73 |
|
;----------------------------------------------------------------------------- |
74 |
|
; BGR->YV12 multiplication matrices |
75 |
|
;----------------------------------------------------------------------------- |
76 |
|
; FIX(Y_R) FIX(Y_G) FIX(Y_B) Ignored |
77 |
|
|
78 |
|
rgb_y_mul: dw 2105, 4129, 803, 0 |
79 |
|
rgb_u_mul: dw -1212, -2384, 3596, 0 |
80 |
|
rgb_v_mul: dw 3596, -3015, -582, 0 |
81 |
|
|
82 |
|
;----------------------------------------------------------------------------- |
83 |
|
; YV12->RGB data |
84 |
|
;----------------------------------------------------------------------------- |
85 |
|
|
86 |
section .text |
Y_SUB: dw 16, 16, 16, 16 |
87 |
|
U_SUB: dw 128, 128, 128, 128 |
88 |
|
V_SUB: dw 128, 128, 128, 128 |
89 |
|
|
90 |
%include "colorspace_mmx.inc" |
Y_MUL: dw 74, 74, 74, 74 |
91 |
|
|
92 |
|
UG_MUL: dw 25, 25, 25, 25 |
93 |
|
VG_MUL: dw 52, 52, 52, 52 |
94 |
|
|
95 |
|
UB_MUL: dw 129, 129, 129, 129 |
96 |
|
VR_MUL: dw 102, 102, 102, 102 |
97 |
|
|
98 |
|
BRIGHT: db 128, 128, 128, 128, 128, 128, 128, 128 |
99 |
|
|
100 |
|
;============================================================================= |
101 |
|
; Helper macros used with the colorspace_mmx.inc file |
102 |
|
;============================================================================= |
103 |
|
|
104 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
105 |
; RGB_TO_YV12( BYTES ) |
; BGR_TO_YV12( BYTES ) |
106 |
; |
; |
107 |
; BYTES 3=rgb24, 4=rgb32 |
; BYTES 3=bgr(24bit), 4=bgra(32-bit) |
108 |
; |
; |
109 |
; bytes=3/4, pixels = 2, vpixels=2 |
; bytes=3/4, pixels = 2, vpixels=2 |
110 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
111 |
%macro RGB_TO_YV12_INIT 2 |
|
112 |
movq mm7, [y_mul] |
%macro BGR_TO_YV12_INIT 2 |
113 |
|
movq mm7, [bgr_y_mul] |
114 |
%endmacro |
%endmacro |
115 |
|
|
116 |
|
|
117 |
%macro RGB_TO_YV12 2 |
%macro BGR_TO_YV12 2 |
118 |
; y_out |
; y_out |
119 |
|
|
120 |
pxor mm4, mm4 |
pxor mm4, mm4 |
121 |
pxor mm5, mm5 |
pxor mm5, mm5 |
122 |
movd mm0, [edi] ; x_ptr[0...] |
movd mm0, [x_ptr] ; x_ptr[0...] |
123 |
movd mm2, [edi+edx] ; x_ptr[x_stride...] |
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
124 |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
125 |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
126 |
movq mm6, mm0 ; = [ |b4|g4|r4] |
movq mm6, mm0 ; = [ |b4|g4|r4] |
136 |
|
|
137 |
pxor mm4, mm4 |
pxor mm4, mm4 |
138 |
pxor mm5, mm5 |
pxor mm5, mm5 |
139 |
movd mm1, [edi+%1] ; src[%1...] |
movd mm1, [x_ptr+%1] ; src[%1...] |
140 |
movd mm3, [edi+edx+%1] ; src[x_stride+%1...] |
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
141 |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
142 |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
143 |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
151 |
paddd mm1, mm4 ; +[b] |
paddd mm1, mm4 ; +[b] |
152 |
paddd mm3, mm5 ; +[b] |
paddd mm3, mm5 ; +[b] |
153 |
|
|
154 |
push edx |
push x_stride |
155 |
|
|
156 |
movd edx, mm0 |
movd x_stride_d, mm0 |
157 |
shr edx, 8 |
add x_stride, FIX_ROUND |
158 |
add edx, Y_ADD |
shr x_stride, SCALEBITS_IN |
159 |
mov [esi], dl ; y_ptr[0] |
add x_stride, Y_ADD |
160 |
|
mov [y_ptr], dl ; y_ptr[0] |
161 |
movd edx, mm1 |
|
162 |
shr edx, 8 |
movd x_stride_d, mm1 |
163 |
add edx, Y_ADD |
add x_stride, FIX_ROUND |
164 |
mov [esi + 1], dl ; y_ptr[1] |
shr x_stride, SCALEBITS_IN |
165 |
|
add x_stride, Y_ADD |
166 |
movd edx, mm2 |
mov [y_ptr + 1], dl ; y_ptr[1] |
167 |
shr edx, 8 |
|
168 |
add edx, Y_ADD |
movd x_stride_d, mm2 |
169 |
mov [esi + eax + 0], dl ; y_ptr[y_stride + 0] |
add x_stride, FIX_ROUND |
170 |
|
shr x_stride, SCALEBITS_IN |
171 |
movd edx, mm3 |
add x_stride, Y_ADD |
172 |
shr edx, 8 |
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
173 |
add edx, Y_ADD |
|
174 |
mov [esi + eax + 1], dl ; y_ptr[y_stride + 1] |
movd x_stride_d, mm3 |
175 |
|
add x_stride, FIX_ROUND |
176 |
|
shr x_stride, SCALEBITS_IN |
177 |
|
add x_stride, Y_ADD |
178 |
|
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
179 |
|
|
180 |
; u_ptr, v_ptr |
; u_ptr, v_ptr |
|
|
|
181 |
movq mm0, mm6 ; = [ |b4|g4|r4] |
movq mm0, mm6 ; = [ |b4|g4|r4] |
182 |
pmaddwd mm6, [v_mul] ; *= V_MUL |
pmaddwd mm6, [bgr_v_mul] ; *= V_MUL |
183 |
pmaddwd mm0, [u_mul] ; *= U_MUL |
pmaddwd mm0, [bgr_u_mul] ; *= U_MUL |
184 |
movq mm1, mm0 |
movq mm1, mm0 |
185 |
movq mm2, mm6 |
movq mm2, mm6 |
186 |
psrlq mm1, 32 |
psrlq mm1, 32 |
188 |
paddd mm0, mm1 |
paddd mm0, mm1 |
189 |
paddd mm2, mm6 |
paddd mm2, mm6 |
190 |
|
|
191 |
movd edx, mm0 |
movd x_stride_d, mm0 |
192 |
shr edx, 10 |
add x_stride, 4*FIX_ROUND |
193 |
add edx, U_ADD |
shr x_stride, (SCALEBITS_IN+2) |
194 |
mov [ebx], dl |
add x_stride, U_ADD |
195 |
|
mov [u_ptr], dl |
196 |
movd edx, mm2 |
|
197 |
shr edx, 10 |
movd x_stride_d, mm2 |
198 |
add edx, V_ADD |
add x_stride, 4*FIX_ROUND |
199 |
mov [ecx], dl |
shr x_stride, (SCALEBITS_IN+2) |
200 |
|
add x_stride, V_ADD |
201 |
|
mov [v_ptr], dl |
202 |
|
|
203 |
pop edx |
pop x_stride |
204 |
%endmacro |
%endmacro |
205 |
|
|
206 |
|
;------------------------------------------------------------------------------ |
207 |
|
; RGB_TO_YV12( BYTES ) |
208 |
|
; |
209 |
|
; BYTES 3=rgb(24bit), 4=rgba(32-bit) |
210 |
|
; |
211 |
|
; bytes=3/4, pixels = 2, vpixels=2 |
212 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
213 |
|
|
214 |
|
%macro RGB_TO_YV12_INIT 2 |
215 |
|
movq mm7, [rgb_y_mul] |
216 |
|
%endmacro |
217 |
|
|
218 |
|
|
219 |
|
%macro RGB_TO_YV12 2 |
220 |
|
; y_out |
221 |
|
pxor mm4, mm4 |
222 |
|
pxor mm5, mm5 |
223 |
|
movd mm0, [x_ptr] ; x_ptr[0...] |
224 |
|
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
225 |
|
punpcklbw mm0, mm4 ; [ |b |g |r ] |
226 |
|
punpcklbw mm2, mm5 ; [ |b |g |r ] |
227 |
|
movq mm6, mm0 ; = [ |b4|g4|r4] |
228 |
|
paddw mm6, mm2 ; +[ |b4|g4|r4] |
229 |
|
pmaddwd mm0, mm7 ; *= Y_MUL |
230 |
|
pmaddwd mm2, mm7 ; *= Y_MUL |
231 |
|
movq mm4, mm0 ; [r] |
232 |
|
movq mm5, mm2 ; [r] |
233 |
|
psrlq mm4, 32 ; +[g] |
234 |
|
psrlq mm5, 32 ; +[g] |
235 |
|
paddd mm0, mm4 ; +[b] |
236 |
|
paddd mm2, mm5 ; +[b] |
237 |
|
|
238 |
|
pxor mm4, mm4 |
239 |
|
pxor mm5, mm5 |
240 |
|
movd mm1, [x_ptr+%1] ; src[%1...] |
241 |
|
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
242 |
|
punpcklbw mm1, mm4 ; [ |b |g |r ] |
243 |
|
punpcklbw mm3, mm5 ; [ |b |g |r ] |
244 |
|
paddw mm6, mm1 ; +[ |b4|g4|r4] |
245 |
|
paddw mm6, mm3 ; +[ |b4|g4|r4] |
246 |
|
pmaddwd mm1, mm7 ; *= Y_MUL |
247 |
|
pmaddwd mm3, mm7 ; *= Y_MUL |
248 |
|
movq mm4, mm1 ; [r] |
249 |
|
movq mm5, mm3 ; [r] |
250 |
|
psrlq mm4, 32 ; +[g] |
251 |
|
psrlq mm5, 32 ; +[g] |
252 |
|
paddd mm1, mm4 ; +[b] |
253 |
|
paddd mm3, mm5 ; +[b] |
254 |
|
|
255 |
|
push x_stride |
256 |
|
|
257 |
|
movd x_stride_d, mm0 |
258 |
|
add x_stride, FIX_ROUND |
259 |
|
shr x_stride, SCALEBITS_IN |
260 |
|
add x_stride, Y_ADD |
261 |
|
mov [y_ptr], dl ; y_ptr[0] |
262 |
|
|
263 |
|
movd x_stride_d, mm1 |
264 |
|
add x_stride, FIX_ROUND |
265 |
|
shr x_stride, SCALEBITS_IN |
266 |
|
add x_stride, Y_ADD |
267 |
|
mov [y_ptr + 1], dl ; y_ptr[1] |
268 |
|
|
269 |
|
movd x_stride_d, mm2 |
270 |
|
add x_stride, FIX_ROUND |
271 |
|
shr x_stride, SCALEBITS_IN |
272 |
|
add x_stride, Y_ADD |
273 |
|
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
274 |
|
|
275 |
|
movd x_stride_d, mm3 |
276 |
|
add x_stride, FIX_ROUND |
277 |
|
shr x_stride, SCALEBITS_IN |
278 |
|
add x_stride, Y_ADD |
279 |
|
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
280 |
|
|
281 |
|
; u_ptr, v_ptr |
282 |
|
movq mm0, mm6 ; = [ |b4|g4|r4] |
283 |
|
pmaddwd mm6, [rgb_v_mul] ; *= V_MUL |
284 |
|
pmaddwd mm0, [rgb_u_mul] ; *= U_MUL |
285 |
|
movq mm1, mm0 |
286 |
|
movq mm2, mm6 |
287 |
|
psrlq mm1, 32 |
288 |
|
psrlq mm2, 32 |
289 |
|
paddd mm0, mm1 |
290 |
|
paddd mm2, mm6 |
291 |
|
|
292 |
|
movd x_stride_d, mm0 |
293 |
|
add x_stride, 4*FIX_ROUND |
294 |
|
shr x_stride, (SCALEBITS_IN+2) |
295 |
|
add x_stride, U_ADD |
296 |
|
mov [u_ptr], dl |
297 |
|
|
298 |
|
movd x_stride_d, mm2 |
299 |
|
add x_stride, 4*FIX_ROUND |
300 |
|
shr x_stride, (SCALEBITS_IN+2) |
301 |
|
add x_stride, V_ADD |
302 |
|
mov [v_ptr], dl |
303 |
|
|
304 |
|
pop x_stride |
305 |
|
%endmacro |
306 |
|
|
307 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
308 |
; YV12_TO_RGB( BYTES ) |
; YV12_TO_BGR( BYTES ) |
309 |
; |
; |
310 |
; BYTES 3=rgb24, 4=rgb32 |
; BYTES 3=bgr(24-bit), 4=bgra(32-bit) |
311 |
; |
; |
312 |
; bytes=3/4, pixels = 8, vpixels=2 |
; bytes=3/4, pixels = 8, vpixels=2 |
313 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
314 |
%macro YV12_TO_RGB_INIT 2 |
|
315 |
|
%macro YV12_TO_BGR_INIT 2 |
316 |
pxor mm7, mm7 ; clear mm7 |
pxor mm7, mm7 ; clear mm7 |
317 |
%endmacro |
%endmacro |
318 |
|
|
319 |
|
%macro YV12_TO_BGR 2 |
320 |
|
%define TEMP_Y1 _ESP |
321 |
|
%define TEMP_Y2 _ESP + 8 |
322 |
|
%define TEMP_G1 _ESP + 16 |
323 |
|
%define TEMP_G2 _ESP + 24 |
324 |
|
%define TEMP_B1 _ESP + 32 |
325 |
|
%define TEMP_B2 _ESP + 40 |
326 |
|
|
327 |
%macro YV12_TO_RGB 2 |
movd mm2, [u_ptr] ; u_ptr[0] |
328 |
%define TEMP_Y1 esp |
movd mm3, [v_ptr] ; v_ptr[0] |
|
%define TEMP_Y2 esp + 8 |
|
|
%define TEMP_G1 esp + 16 |
|
|
%define TEMP_G2 esp + 24 |
|
|
%define TEMP_B1 esp + 32 |
|
|
%define TEMP_B2 esp + 40 |
|
|
movd mm2, [ebx] ; u_ptr[0] |
|
|
movd mm3, [ecx] ; v_ptr[0] |
|
|
|
|
329 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
330 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
|
|
|
331 |
psubsw mm2, [U_SUB] ; U - 128 |
psubsw mm2, [U_SUB] ; U - 128 |
332 |
psubsw mm3, [V_SUB] ; V - 128 |
psubsw mm3, [V_SUB] ; V - 128 |
|
|
|
333 |
movq mm4, mm2 |
movq mm4, mm2 |
334 |
movq mm5, mm3 |
movq mm5, mm3 |
|
|
|
335 |
pmullw mm2, [UG_MUL] |
pmullw mm2, [UG_MUL] |
336 |
pmullw mm3, [VG_MUL] |
pmullw mm3, [VG_MUL] |
|
|
|
337 |
movq mm6, mm2 ; u3u2u1u0 -> mm6 |
movq mm6, mm2 ; u3u2u1u0 -> mm6 |
338 |
punpckhwd mm2, mm2 ; u3u3u2u2 -> mm2 |
punpckhwd mm2, mm2 ; u3u3u2u2 -> mm2 |
339 |
punpcklwd mm6, mm6 ; u1u1u0u0 -> mm6 |
punpcklwd mm6, mm6 ; u1u1u0u0 -> mm6 |
|
|
|
340 |
pmullw mm4, [UB_MUL] ; B_ADD -> mm4 |
pmullw mm4, [UB_MUL] ; B_ADD -> mm4 |
|
|
|
341 |
movq mm0, mm3 |
movq mm0, mm3 |
342 |
punpckhwd mm3, mm3 ; v3v3v2v2 -> mm2 |
punpckhwd mm3, mm3 ; v3v3v2v2 -> mm2 |
343 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm6 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm6 |
|
|
|
344 |
paddsw mm2, mm3 |
paddsw mm2, mm3 |
345 |
paddsw mm6, mm0 |
paddsw mm6, mm0 |
|
|
|
346 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
347 |
|
movq mm0, [y_ptr] ; y7y6y5y4y3y2y1y0 -> mm0 |
|
movq mm0, [esi] ; y7y6y5y4y3y2y1y0 -> mm0 |
|
|
|
|
348 |
movq mm1, mm0 |
movq mm1, mm0 |
349 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
350 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
|
|
|
351 |
psubsw mm0, [Y_SUB] ; Y - Y_SUB |
psubsw mm0, [Y_SUB] ; Y - Y_SUB |
352 |
psubsw mm1, [Y_SUB] ; Y - Y_SUB |
psubsw mm1, [Y_SUB] ; Y - Y_SUB |
|
|
|
353 |
pmullw mm1, [Y_MUL] |
pmullw mm1, [Y_MUL] |
354 |
pmullw mm0, [Y_MUL] |
pmullw mm0, [Y_MUL] |
|
|
|
355 |
movq [TEMP_Y2], mm1 ; y7y6y5y4 -> mm3 |
movq [TEMP_Y2], mm1 ; y7y6y5y4 -> mm3 |
356 |
movq [TEMP_Y1], mm0 ; y3y2y1y0 -> mm7 |
movq [TEMP_Y1], mm0 ; y3y2y1y0 -> mm7 |
|
|
|
357 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
358 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
359 |
|
psraw mm1, SCALEBITS_OUT |
360 |
psraw mm1, SCALEBITS |
psraw mm0, SCALEBITS_OUT |
|
psraw mm0, SCALEBITS |
|
|
|
|
361 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
|
|
|
362 |
movq [TEMP_G1], mm0 |
movq [TEMP_G1], mm0 |
363 |
|
movq mm0, [y_ptr+y_stride] ; y7y6y5y4y3y2y1y0 -> mm0 |
|
movq mm0, [esi+eax] ; y7y6y5y4y3y2y1y0 -> mm0 |
|
|
|
|
364 |
movq mm1, mm0 |
movq mm1, mm0 |
|
|
|
365 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
366 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
|
|
|
367 |
psubsw mm0, [Y_SUB] ; Y - Y_SUB |
psubsw mm0, [Y_SUB] ; Y - Y_SUB |
368 |
psubsw mm1, [Y_SUB] ; Y - Y_SUB |
psubsw mm1, [Y_SUB] ; Y - Y_SUB |
|
|
|
369 |
pmullw mm1, [Y_MUL] |
pmullw mm1, [Y_MUL] |
370 |
pmullw mm0, [Y_MUL] |
pmullw mm0, [Y_MUL] |
|
|
|
371 |
movq mm3, mm1 |
movq mm3, mm1 |
372 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
|
|
|
373 |
movq mm2, mm0 |
movq mm2, mm0 |
374 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
375 |
|
psraw mm1, SCALEBITS_OUT |
376 |
psraw mm1, SCALEBITS |
psraw mm0, SCALEBITS_OUT |
|
psraw mm0, SCALEBITS |
|
|
|
|
377 |
packuswb mm0, mm1 ; g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ; g7g6g5g4g3g2g1g0 -> mm0 |
|
|
|
378 |
movq [TEMP_G2], mm0 |
movq [TEMP_G2], mm0 |
|
|
|
379 |
movq mm0, mm4 |
movq mm0, mm4 |
380 |
punpckhwd mm4, mm4 ; u3u3u2u2 -> mm2 |
punpckhwd mm4, mm4 ; u3u3u2u2 -> mm2 |
381 |
punpcklwd mm0, mm0 ; u1u1u0u0 -> mm6 |
punpcklwd mm0, mm0 ; u1u1u0u0 -> mm6 |
|
|
|
382 |
movq mm1, mm3 ; y7y6y5y4 -> mm1 |
movq mm1, mm3 ; y7y6y5y4 -> mm1 |
383 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
|
|
|
384 |
movq mm7, mm2 ; y3y2y1y0 -> mm7 |
movq mm7, mm2 ; y3y2y1y0 -> mm7 |
|
|
|
385 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
386 |
|
psraw mm3, SCALEBITS_OUT |
387 |
psraw mm3, SCALEBITS |
psraw mm2, SCALEBITS_OUT |
|
psraw mm2, SCALEBITS |
|
|
|
|
388 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
|
|
|
389 |
movq [TEMP_B2], mm2 |
movq [TEMP_B2], mm2 |
|
|
|
390 |
movq mm3, [TEMP_Y2] |
movq mm3, [TEMP_Y2] |
391 |
movq mm2, [TEMP_Y1] |
movq mm2, [TEMP_Y1] |
|
|
|
392 |
movq mm6, mm3 ; TEMP_Y2 -> mm6 |
movq mm6, mm3 ; TEMP_Y2 -> mm6 |
393 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
|
|
|
394 |
movq mm4, mm2 ; TEMP_Y1 -> mm4 |
movq mm4, mm2 ; TEMP_Y1 -> mm4 |
395 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
396 |
|
psraw mm3, SCALEBITS_OUT |
397 |
psraw mm3, SCALEBITS |
psraw mm2, SCALEBITS_OUT |
|
psraw mm2, SCALEBITS |
|
|
|
|
398 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
|
|
|
399 |
movq [TEMP_B1], mm2 |
movq [TEMP_B1], mm2 |
|
|
|
400 |
movq mm0, mm5 |
movq mm0, mm5 |
401 |
punpckhwd mm5, mm5 ; v3v3v2v2 -> mm5 |
punpckhwd mm5, mm5 ; v3v3v2v2 -> mm5 |
402 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm0 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm0 |
|
|
|
403 |
paddsw mm1, mm5 ; r7r6r5r4 -> mm1 |
paddsw mm1, mm5 ; r7r6r5r4 -> mm1 |
404 |
paddsw mm7, mm0 ; r3r2r1r0 -> mm7 |
paddsw mm7, mm0 ; r3r2r1r0 -> mm7 |
405 |
|
psraw mm1, SCALEBITS_OUT |
406 |
psraw mm1, SCALEBITS |
psraw mm7, SCALEBITS_OUT |
|
psraw mm7, SCALEBITS |
|
|
|
|
407 |
packuswb mm7, mm1 ; r7r6r5r4r3r2r1r0 -> mm7 (TEMP_R2) |
packuswb mm7, mm1 ; r7r6r5r4r3r2r1r0 -> mm7 (TEMP_R2) |
|
|
|
408 |
paddsw mm6, mm5 ; r7r6r5r4 -> mm6 |
paddsw mm6, mm5 ; r7r6r5r4 -> mm6 |
409 |
paddsw mm4, mm0 ; r3r2r1r0 -> mm4 |
paddsw mm4, mm0 ; r3r2r1r0 -> mm4 |
410 |
|
psraw mm6, SCALEBITS_OUT |
411 |
psraw mm6, SCALEBITS |
psraw mm4, SCALEBITS_OUT |
|
psraw mm4, SCALEBITS |
|
|
|
|
412 |
packuswb mm4, mm6 ; r7r6r5r4r3r2r1r0 -> mm4 (TEMP_R1) |
packuswb mm4, mm6 ; r7r6r5r4r3r2r1r0 -> mm4 (TEMP_R1) |
|
|
|
413 |
movq mm0, [TEMP_B1] |
movq mm0, [TEMP_B1] |
414 |
movq mm1, [TEMP_G1] |
movq mm1, [TEMP_G1] |
|
|
|
415 |
movq mm6, mm7 |
movq mm6, mm7 |
|
|
|
416 |
movq mm2, mm0 |
movq mm2, mm0 |
417 |
punpcklbw mm2, mm4 ; r3b3r2b2r1b1r0b0 -> mm2 |
punpcklbw mm2, mm4 ; r3b3r2b2r1b1r0b0 -> mm2 |
418 |
punpckhbw mm0, mm4 ; r7b7r6b6r5b5r4b4 -> mm0 |
punpckhbw mm0, mm4 ; r7b7r6b6r5b5r4b4 -> mm0 |
|
|
|
419 |
pxor mm7, mm7 |
pxor mm7, mm7 |
|
|
|
420 |
movq mm3, mm1 |
movq mm3, mm1 |
421 |
punpcklbw mm1, mm7 ; 0g30g20g10g0 -> mm1 |
punpcklbw mm1, mm7 ; 0g30g20g10g0 -> mm1 |
422 |
punpckhbw mm3, mm7 ; 0g70g60g50g4 -> mm3 |
punpckhbw mm3, mm7 ; 0g70g60g50g4 -> mm3 |
|
|
|
423 |
movq mm4, mm2 |
movq mm4, mm2 |
424 |
punpcklbw mm2, mm1 ; 0r1g1b10r0g0b0 -> mm2 |
punpcklbw mm2, mm1 ; 0r1g1b10r0g0b0 -> mm2 |
425 |
punpckhbw mm4, mm1 ; 0r3g3b30r2g2b2 -> mm4 |
punpckhbw mm4, mm1 ; 0r3g3b30r2g2b2 -> mm4 |
|
|
|
426 |
movq mm5, mm0 |
movq mm5, mm0 |
427 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
428 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
429 |
|
%if %1 == 3 ; BGR (24-bit) |
430 |
%if %1 == 3 ; RGB24 |
movd [x_ptr], mm2 |
|
movd [edi], mm2 |
|
431 |
psrlq mm2, 32 |
psrlq mm2, 32 |
432 |
|
movd [x_ptr + 3], mm2 |
433 |
movd [edi + 3], mm2 |
movd [x_ptr + 6], mm4 |
|
movd [edi + 6], mm4 |
|
|
|
|
434 |
psrlq mm4, 32 |
psrlq mm4, 32 |
435 |
|
movd [x_ptr + 9], mm4 |
436 |
movd [edi + 9], mm4 |
movd [x_ptr + 12], mm0 |
|
movd [edi + 12], mm0 |
|
|
|
|
437 |
psrlq mm0, 32 |
psrlq mm0, 32 |
438 |
|
movd [x_ptr + 15], mm0 |
439 |
movd [edi + 15], mm0 |
movq mm2, mm5 |
440 |
movd [edi + 18], mm5 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
441 |
|
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
442 |
psrlq mm5, 32 |
psrlq mm5, 32 ; 00000r7g7b7 -> mm5 |
443 |
|
psrlq mm2, 16 ; 000r6g6b600 -> mm2 |
444 |
movd [edi + 21], mm5 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
445 |
|
psllq mm5, 40 ; r7g7b700000 -> mm5 |
446 |
|
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
447 |
|
movq [x_ptr + 16], mm5 |
448 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
449 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
|
|
|
450 |
movq mm2, mm0 |
movq mm2, mm0 |
451 |
punpcklbw mm2, mm6 ; r3b3r2b2r1b1r0b0 -> mm2 |
punpcklbw mm2, mm6 ; r3b3r2b2r1b1r0b0 -> mm2 |
452 |
punpckhbw mm0, mm6 ; r7b7r6b6r5b5r4b4 -> mm0 |
punpckhbw mm0, mm6 ; r7b7r6b6r5b5r4b4 -> mm0 |
|
|
|
453 |
movq mm3, mm1 |
movq mm3, mm1 |
454 |
punpcklbw mm1, mm7 ; 0g30g20g10g0 -> mm1 |
punpcklbw mm1, mm7 ; 0g30g20g10g0 -> mm1 |
455 |
punpckhbw mm3, mm7 ; 0g70g60g50g4 -> mm3 |
punpckhbw mm3, mm7 ; 0g70g60g50g4 -> mm3 |
|
|
|
456 |
movq mm4, mm2 |
movq mm4, mm2 |
457 |
punpcklbw mm2, mm1 ; 0r1g1b10r0g0b0 -> mm2 |
punpcklbw mm2, mm1 ; 0r1g1b10r0g0b0 -> mm2 |
458 |
punpckhbw mm4, mm1 ; 0r3g3b30r2g2b2 -> mm4 |
punpckhbw mm4, mm1 ; 0r3g3b30r2g2b2 -> mm4 |
|
|
|
459 |
movq mm5, mm0 |
movq mm5, mm0 |
460 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
461 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
462 |
|
movd [x_ptr+x_stride], mm2 |
|
movd [edi+edx], mm2 |
|
463 |
psrlq mm2, 32 |
psrlq mm2, 32 |
464 |
|
movd [x_ptr+x_stride + 3], mm2 |
465 |
movd [edi+edx + 3], mm2 |
movd [x_ptr+x_stride + 6], mm4 |
|
movd [edi+edx + 6], mm4 |
|
|
|
|
466 |
psrlq mm4, 32 |
psrlq mm4, 32 |
467 |
|
movd [x_ptr+x_stride + 9], mm4 |
468 |
movd [edi+edx + 9], mm4 |
movd [x_ptr+x_stride + 12], mm0 |
|
movd [edi+edx + 12], mm0 |
|
|
|
|
469 |
psrlq mm0, 32 |
psrlq mm0, 32 |
470 |
|
movd [x_ptr+x_stride + 15], mm0 |
471 |
movd [edi+edx + 15], mm0 |
movq mm2, mm5 |
472 |
movd [edi+edx + 18], mm5 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
473 |
|
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
474 |
psrlq mm5, 32 |
psrlq mm5, 32 ; 00000r7g7b7 -> mm5 |
475 |
|
psrlq mm2, 16 ; 000r6g6b600 -> mm2 |
476 |
movd [edi+edx + 21], mm5 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
477 |
|
psllq mm5, 40 ; r7g7b700000 -> mm5 |
478 |
%else ; RGB32 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
479 |
movq [edi], mm2 |
movq [x_ptr + x_stride + 16], mm5 |
480 |
movq [edi + 8], mm4 |
|
481 |
movq [edi + 16], mm0 |
%else ; BGRA (32-bit) |
482 |
movq [edi + 24], mm5 |
movq [x_ptr], mm2 |
483 |
|
movq [x_ptr + 8], mm4 |
484 |
|
movq [x_ptr + 16], mm0 |
485 |
|
movq [x_ptr + 24], mm5 |
486 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
487 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
|
|
|
488 |
movq mm2, mm0 |
movq mm2, mm0 |
489 |
punpcklbw mm2, mm6 ; r3b3r2b2r1b1r0b0 -> mm2 |
punpcklbw mm2, mm6 ; r3b3r2b2r1b1r0b0 -> mm2 |
490 |
punpckhbw mm0, mm6 ; r7b7r6b6r5b5r4b4 -> mm0 |
punpckhbw mm0, mm6 ; r7b7r6b6r5b5r4b4 -> mm0 |
|
|
|
491 |
movq mm3, mm1 |
movq mm3, mm1 |
492 |
punpcklbw mm1, mm7 ; 0g30g20g10g0 -> mm1 |
punpcklbw mm1, mm7 ; 0g30g20g10g0 -> mm1 |
493 |
punpckhbw mm3, mm7 ; 0g70g60g50g4 -> mm3 |
punpckhbw mm3, mm7 ; 0g70g60g50g4 -> mm3 |
|
|
|
494 |
movq mm4, mm2 |
movq mm4, mm2 |
495 |
punpcklbw mm2, mm1 ; 0r1g1b10r0g0b0 -> mm2 |
punpcklbw mm2, mm1 ; 0r1g1b10r0g0b0 -> mm2 |
496 |
punpckhbw mm4, mm1 ; 0r3g3b30r2g2b2 -> mm4 |
punpckhbw mm4, mm1 ; 0r3g3b30r2g2b2 -> mm4 |
|
|
|
497 |
movq mm5, mm0 |
movq mm5, mm0 |
498 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
499 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
500 |
|
movq [x_ptr + x_stride], mm2 |
501 |
movq [edi + edx], mm2 |
movq [x_ptr + x_stride + 8], mm4 |
502 |
movq [edi + edx + 8], mm4 |
movq [x_ptr + x_stride + 16], mm0 |
503 |
movq [edi + edx + 16], mm0 |
movq [x_ptr + x_stride + 24], mm5 |
|
movq [edi + edx + 24], mm5 |
|
504 |
%endif |
%endif |
505 |
|
|
506 |
%undef TEMP_Y1 |
%undef TEMP_Y1 |
510 |
%undef TEMP_B1 |
%undef TEMP_B1 |
511 |
%undef TEMP_B2 |
%undef TEMP_B2 |
512 |
%endmacro |
%endmacro |
|
;------------------------------------------------------------------------------ |
|
513 |
|
|
514 |
|
;============================================================================= |
515 |
|
; Code |
516 |
|
;============================================================================= |
517 |
|
|
518 |
|
TEXT |
519 |
|
|
520 |
|
%include "colorspace_mmx.inc" |
521 |
|
|
522 |
; input |
; input |
523 |
|
MAKE_COLORSPACE bgr_to_yv12_mmx,0, 3,2,2, BGR_TO_YV12, 3, -1 |
524 |
MAKE_COLORSPACE rgb24_to_yv12_mmx,0, 3,2,2, RGB_TO_YV12, 3, -1 |
MAKE_COLORSPACE bgra_to_yv12_mmx,0, 4,2,2, BGR_TO_YV12, 4, -1 |
525 |
MAKE_COLORSPACE rgb32_to_yv12_mmx,0, 4,2,2, RGB_TO_YV12, 4, -1 |
MAKE_COLORSPACE rgb_to_yv12_mmx,0, 3,2,2, RGB_TO_YV12, 3, -1 |
526 |
|
MAKE_COLORSPACE rgba_to_yv12_mmx,0, 4,2,2, RGB_TO_YV12, 4, -1 |
527 |
|
|
528 |
; output |
; output |
529 |
|
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
530 |
|
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
531 |
|
|
532 |
MAKE_COLORSPACE yv12_to_rgb24_mmx,48, 3,8,2, YV12_TO_RGB, 3, -1 |
NON_EXEC_STACK |
|
MAKE_COLORSPACE yv12_to_rgb32_mmx,48, 4,8,2, YV12_TO_RGB, 4, -1 |
|
|
|
|