Commit 0686f269 authored by Frank Barchard's avatar Frank Barchard

blend remove alignment 1 pixel loop for less overhead.

R=tpsiaki@google.com
BUG=none
TESTED=libyuvTest.ARGBBlend_Opt

Review URL: https://webrtc-codereview.appspot.com/50289005.
parent 553c7f85
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 1439
Version: 1440
License: BSD
License File: LICENSE
......
......@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1439
#define LIBYUV_VERSION 1440
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
......@@ -3229,41 +3229,7 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
"psllw $0x8,%%xmm5 \n"
"pcmpeqb %%xmm4,%%xmm4 \n"
"pslld $0x18,%%xmm4 \n"
"sub $0x1,%3 \n"
"je 91f \n"
"jl 99f \n"
// 1 pixel loop until destination pointer is aligned.
"10: \n"
"test $0xf,%2 \n"
"je 19f \n"
"movd " MEMACCESS(0) ",%%xmm3 \n"
"lea " MEMLEA(0x4,0) ",%0 \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"movd " MEMACCESS(1) ",%%xmm2 \n"
"psrlw $0x8,%%xmm3 \n"
"pshufhw $0xf5,%%xmm3,%%xmm3 \n"
"pshuflw $0xf5,%%xmm3,%%xmm3 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movd " MEMACCESS(1) ",%%xmm1 \n"
"lea " MEMLEA(0x4,1) ",%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"movd %%xmm0," MEMACCESS(2) " \n"
"lea " MEMLEA(0x4,2) ",%2 \n"
"sub $0x1,%3 \n"
"jge 10b \n"
"19: \n"
"add $1-4,%3 \n"
"sub $0x4,%3 \n"
"jl 49f \n"
// 4 pixel loop.
......@@ -3364,39 +3330,7 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
"psllw $0x8,%%xmm5 \n"
"pcmpeqb %%xmm4,%%xmm4 \n"
"pslld $0x18,%%xmm4 \n"
"sub $0x1,%3 \n"
"je 91f \n"
"jl 99f \n"
// 1 pixel loop until destination pointer is aligned.
"10: \n"
"test $0xf,%2 \n"
"je 19f \n"
"movd " MEMACCESS(0) ",%%xmm3 \n"
"lea " MEMLEA(0x4,0) ",%0 \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"movd " MEMACCESS(1) ",%%xmm2 \n"
"pshufb %4,%%xmm3 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movd " MEMACCESS(1) ",%%xmm1 \n"
"lea " MEMLEA(0x4,1) ",%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"movd %%xmm0," MEMACCESS(2) " \n"
"lea " MEMLEA(0x4,2) ",%2 \n"
"sub $0x1,%3 \n"
"jge 10b \n"
"19: \n"
"add $1-4,%3 \n"
"sub $0x4,%3 \n"
"jl 49f \n"
// 4 pixel loop.
......
......@@ -4020,43 +4020,8 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
psllw xmm5, 8
pcmpeqb xmm4, xmm4 // generate mask 0xff000000
pslld xmm4, 24
sub ecx, 1
je convertloop1 // only 1 pixel?
jl convertloop1b
// 1 pixel loop until destination pointer is aligned.
alignloop1:
test edx, 15 // aligned?
je alignloop1b
movd xmm3, [eax]
lea eax, [eax + 4]
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
movd xmm2, [esi] // _r_b
psrlw xmm3, 8 // alpha
pshufhw xmm3, xmm3, 0F5h // 8 alpha words
pshuflw xmm3, xmm3, 0F5h
pand xmm2, xmm6 // _r_b
paddw xmm3, xmm7 // 256 - alpha
pmullw xmm2, xmm3 // _r_b * alpha
movd xmm1, [esi] // _a_g
lea esi, [esi + 4]
psrlw xmm1, 8 // _a_g
por xmm0, xmm4 // set alpha to 255
pmullw xmm1, xmm3 // _a_g * alpha
psrlw xmm2, 8 // _r_b convert to 8 bits again
paddusb xmm0, xmm2 // + src argb
pand xmm1, xmm5 // a_g_ convert to 8 bits again
paddusb xmm0, xmm1 // + src argb
movd [edx], xmm0
lea edx, [edx + 4]
sub ecx, 1
jge alignloop1
alignloop1b:
add ecx, 1 - 4
jl convertloop4b
sub ecx, 4
jl convertloop4b // less than 4 pixels?
// 4 pixel loop.
convertloop4:
......@@ -4154,41 +4119,8 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
psllw xmm5, 8
pcmpeqb xmm4, xmm4 // generate mask 0xff000000
pslld xmm4, 24
sub ecx, 1
je convertloop1 // only 1 pixel?
jl convertloop1b
// 1 pixel loop until destination pointer is aligned.
alignloop1:
test edx, 15 // aligned?
je alignloop1b
movd xmm3, [eax]
lea eax, [eax + 4]
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
movd xmm2, [esi] // _r_b
pshufb xmm3, kShuffleAlpha // alpha
pand xmm2, xmm6 // _r_b
paddw xmm3, xmm7 // 256 - alpha
pmullw xmm2, xmm3 // _r_b * alpha
movd xmm1, [esi] // _a_g
lea esi, [esi + 4]
psrlw xmm1, 8 // _a_g
por xmm0, xmm4 // set alpha to 255
pmullw xmm1, xmm3 // _a_g * alpha
psrlw xmm2, 8 // _r_b convert to 8 bits again
paddusb xmm0, xmm2 // + src argb
pand xmm1, xmm5 // a_g_ convert to 8 bits again
paddusb xmm0, xmm1 // + src argb
movd [edx], xmm0
lea edx, [edx + 4]
sub ecx, 1
jge alignloop1
alignloop1b:
add ecx, 1 - 4
jl convertloop4b
sub ecx, 4
jl convertloop4b // less than 4 pixels?
// 4 pixel loop.
convertloop4:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment