Commit 794fe123 authored by fbarchard@google.com's avatar fbarchard@google.com

alpha blend 4 pixel loop bug fix and blender C code match SSE for better…

alpha blend 4 pixel loop bug fix and blender C code match SSE for better testability and reference code for future optimized code.
BUG=none
TEST=none
Review URL: https://webrtc-codereview.appspot.com/645008

git-svn-id: http://libyuv.googlecode.com/svn/trunk@287 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent ee220888
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 286
Version: 287
License: BSD
License File: LICENSE
......
......@@ -11,7 +11,7 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 286
#define LIBYUV_VERSION 287
#endif // INCLUDE_LIBYUV_VERSION_H_
......@@ -588,194 +588,78 @@ void UYVYToYRow_C(const uint8* src_yuy2, uint8* dst_y, int width) {
}
}
#define BLENDER(f, b, a) (((256 - a) * b) >> 8) + f
#define BLEND(f, b, a) (((256 - a) * b) >> 8) + f
// Blend src_argb0 over src_argb1 and store to dst_argb.
// dst_argb may be src_argb0 or src_argb1.
// This code mimics the SSSE3 version for better testability.
void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
for (int x = 0; x < width - 1; x += 2) {
uint32 fb = src_argb0[0];
uint32 fg = src_argb0[1];
uint32 fr = src_argb0[2];
uint32 a = src_argb0[3];
if (a == 0) {
*reinterpret_cast<uint32*>(dst_argb) =
*reinterpret_cast<const uint32*>(src_argb1);
} else if (a == 255) {
*reinterpret_cast<uint32*>(dst_argb) =
*reinterpret_cast<const uint32*>(src_argb0);
} else {
const uint32 fb = src_argb0[0];
const uint32 fg = src_argb0[1];
const uint32 fr = src_argb0[2];
const uint32 bb = src_argb1[0];
const uint32 bg = src_argb1[1];
const uint32 br = src_argb1[2];
dst_argb[0] = BLENDER(fb, bb, a);
dst_argb[1] = BLENDER(fg, bg, a);
dst_argb[2] = BLENDER(fr, br, a);
dst_argb[3] = 255u;
}
uint32 bb = src_argb1[0];
uint32 bg = src_argb1[1];
uint32 br = src_argb1[2];
dst_argb[0] = BLEND(fb, bb, a);
dst_argb[1] = BLEND(fg, bg, a);
dst_argb[2] = BLEND(fr, br, a);
dst_argb[3] = 255u;
fb = src_argb0[4 + 0];
fg = src_argb0[4 + 1];
fr = src_argb0[4 + 2];
a = src_argb0[4 + 3];
if (a == 0) {
*reinterpret_cast<uint32*>(dst_argb + 4) =
*reinterpret_cast<const uint32*>(src_argb1 + 4);
} else if (a == 255) {
*reinterpret_cast<uint32*>(dst_argb + 4) =
*reinterpret_cast<const uint32*>(src_argb0 + 4);
} else {
const uint32 fb = src_argb0[4 + 0];
const uint32 fg = src_argb0[4 + 1];
const uint32 fr = src_argb0[4 + 2];
const uint32 bb = src_argb1[4 + 0];
const uint32 bg = src_argb1[4 + 1];
const uint32 br = src_argb1[4 + 2];
dst_argb[4 + 0] = BLENDER(fb, bb, a);
dst_argb[4 + 1] = BLENDER(fg, bg, a);
dst_argb[4 + 2] = BLENDER(fr, br, a);
dst_argb[4 + 3] = 255u;
}
bb = src_argb1[4 + 0];
bg = src_argb1[4 + 1];
br = src_argb1[4 + 2];
dst_argb[4 + 0] = BLEND(fb, bb, a);
dst_argb[4 + 1] = BLEND(fg, bg, a);
dst_argb[4 + 2] = BLEND(fr, br, a);
dst_argb[4 + 3] = 255u;
src_argb0 += 8;
src_argb1 += 8;
dst_argb += 8;
}
if (width & 1) {
uint32 fb = src_argb0[0];
uint32 fg = src_argb0[1];
uint32 fr = src_argb0[2];
uint32 a = src_argb0[3];
if (a == 0) {
*reinterpret_cast<uint32*>(dst_argb) =
*reinterpret_cast<const uint32*>(src_argb1);
} else if (a == 255) {
*reinterpret_cast<uint32*>(dst_argb) =
*reinterpret_cast<const uint32*>(src_argb0);
} else {
const uint32 fb = src_argb0[0];
const uint32 fg = src_argb0[1];
const uint32 fr = src_argb0[2];
const uint32 bb = src_argb1[0];
const uint32 bg = src_argb1[1];
const uint32 br = src_argb1[2];
dst_argb[0] = BLENDER(fb, bb, a);
dst_argb[1] = BLENDER(fg, bg, a);
dst_argb[2] = BLENDER(fr, br, a);
dst_argb[3] = 255u;
}
uint32 bb = src_argb1[0];
uint32 bg = src_argb1[1];
uint32 br = src_argb1[2];
dst_argb[0] = BLEND(fb, bb, a);
dst_argb[1] = BLEND(fg, bg, a);
dst_argb[2] = BLEND(fr, br, a);
dst_argb[3] = 255u;
}
}
// Wrappers to handle odd width
#define YANY(NAMEANY, I420TORGB_SSE, I420TORGB_C, UV_SHIFT) \
void NAMEANY(const uint8* y_buf, \
const uint8* u_buf, \
const uint8* v_buf, \
uint8* rgb_buf, \
int width) { \
int n = width & ~7; \
I420TORGB_SSE(y_buf, u_buf, v_buf, rgb_buf, n); \
I420TORGB_C(y_buf + n, \
u_buf + (n >> UV_SHIFT), \
v_buf + (n >> UV_SHIFT), \
rgb_buf + n * 4, width & 7); \
}
// Wrappers to handle odd width
#define Y2NY(NAMEANY, NV12TORGB_SSE, NV12TORGB_C, UV_SHIFT) \
void NAMEANY(const uint8* y_buf, \
const uint8* uv_buf, \
uint8* rgb_buf, \
int width) { \
int n = width & ~7; \
NV12TORGB_SSE(y_buf, uv_buf, rgb_buf, n); \
NV12TORGB_C(y_buf + n, \
uv_buf + (n >> UV_SHIFT), \
rgb_buf + n * 4, width & 7); \
}
#if defined(HAS_I422TOARGBROW_SSSE3)
YANY(I444ToARGBRow_Any_SSSE3, I444ToARGBRow_Unaligned_SSSE3, I444ToARGBRow_C, 0)
YANY(I422ToARGBRow_Any_SSSE3, I422ToARGBRow_Unaligned_SSSE3, I422ToARGBRow_C, 1)
YANY(I411ToARGBRow_Any_SSSE3, I411ToARGBRow_Unaligned_SSSE3, I411ToARGBRow_C, 2)
Y2NY(NV12ToARGBRow_Any_SSSE3, NV12ToARGBRow_Unaligned_SSSE3, NV12ToARGBRow_C, 0)
Y2NY(NV21ToARGBRow_Any_SSSE3, NV21ToARGBRow_Unaligned_SSSE3, NV21ToARGBRow_C, 0)
YANY(I422ToBGRARow_Any_SSSE3, I422ToBGRARow_Unaligned_SSSE3, I422ToBGRARow_C, 1)
YANY(I422ToABGRRow_Any_SSSE3, I422ToABGRRow_Unaligned_SSSE3, I422ToABGRRow_C, 1)
#endif
#if defined(HAS_I422TOARGBROW_NEON)
YANY(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, I422ToARGBRow_C)
YANY(I422ToBGRARow_Any_NEON, I422ToBGRARow_NEON, I422ToBGRARow_C)
YANY(I422ToABGRRow_Any_NEON, I422ToABGRRow_NEON, I422ToABGRRow_C)
#endif
#undef YANY
#define RGBANY(NAMEANY, ARGBTORGB, BPP) \
void NAMEANY(const uint8* argb_buf, \
uint8* rgb_buf, \
int width) { \
SIMD_ALIGNED(uint8 row[kMaxStride]); \
ARGBTORGB(argb_buf, row, width); \
memcpy(rgb_buf, row, width * BPP); \
}
#if defined(HAS_ARGBTORGB24ROW_SSSE3)
RGBANY(ARGBToRGB24Row_Any_SSSE3, ARGBToRGB24Row_SSSE3, 3)
RGBANY(ARGBToRAWRow_Any_SSSE3, ARGBToRAWRow_SSSE3, 3)
RGBANY(ARGBToRGB565Row_Any_SSE2, ARGBToRGB565Row_SSE2, 2)
RGBANY(ARGBToARGB1555Row_Any_SSE2, ARGBToARGB1555Row_SSE2, 2)
RGBANY(ARGBToARGB4444Row_Any_SSE2, ARGBToARGB4444Row_SSE2, 2)
#endif
#undef RGBANY
#ifdef HAS_ARGBTOYROW_SSSE3
#define YANY(NAMEANY, ARGBTOY_SSE, BPP) \
void NAMEANY(const uint8* src_argb, uint8* dst_y, int width) { \
ARGBTOY_SSE(src_argb, dst_y, width - 16); \
ARGBTOY_SSE(src_argb + (width - 16) * BPP, dst_y + (width - 16), 16); \
}
YANY(ARGBToYRow_Any_SSSE3, ARGBToYRow_Unaligned_SSSE3, 4)
YANY(BGRAToYRow_Any_SSSE3, BGRAToYRow_Unaligned_SSSE3, 4)
YANY(ABGRToYRow_Any_SSSE3, ABGRToYRow_Unaligned_SSSE3, 4)
YANY(YUY2ToYRow_Any_SSE2, YUY2ToYRow_Unaligned_SSE2, 2)
YANY(UYVYToYRow_Any_SSE2, UYVYToYRow_Unaligned_SSE2, 2)
#undef YANY
#define UVANY(NAMEANY, ARGBTOUV_SSE, ARGBTOUV_C, BPP) \
void NAMEANY(const uint8* src_argb, int src_stride_argb, \
uint8* dst_u, uint8* dst_v, int width) { \
int n = width & ~15; \
ARGBTOUV_SSE(src_argb, src_stride_argb, dst_u, dst_v, n); \
ARGBTOUV_C(src_argb + n * BPP, src_stride_argb, \
dst_u + (n >> 1), \
dst_v + (n >> 1), \
width & 15); \
}
UVANY(ARGBToUVRow_Any_SSSE3, ARGBToUVRow_Unaligned_SSSE3, ARGBToUVRow_C, 4)
UVANY(BGRAToUVRow_Any_SSSE3, BGRAToUVRow_Unaligned_SSSE3, BGRAToUVRow_C, 4)
UVANY(ABGRToUVRow_Any_SSSE3, ABGRToUVRow_Unaligned_SSSE3, ABGRToUVRow_C, 4)
UVANY(YUY2ToUVRow_Any_SSE2, YUY2ToUVRow_Unaligned_SSE2, YUY2ToUVRow_C, 2)
UVANY(UYVYToUVRow_Any_SSE2, UYVYToUVRow_Unaligned_SSE2, UYVYToUVRow_C, 2)
#undef UVANY
#endif
#undef BLEND
#define ATTENUATE(f, a) (a | (a << 8)) * (f | (f << 8)) >> 24
// Multiply source RGB by alpha and store to destination.
// b = (b * a + 127) / 255;
// This code mimics the SSSE3 version for better testability.
void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
for (int i = 0; i < width - 1; i += 2) {
uint32 b = src_argb[0];
uint32 g = src_argb[1];
uint32 r = src_argb[2];
uint32 a = src_argb[3];
dst_argb[0] = (b * a + 255) >> 8;
dst_argb[1] = (g * a + 255) >> 8;
dst_argb[2] = (r * a + 255) >> 8;
dst_argb[0] = ATTENUATE(b, a);
dst_argb[1] = ATTENUATE(g, a);
dst_argb[2] = ATTENUATE(r, a);
dst_argb[3] = a;
b = src_argb[4];
g = src_argb[5];
r = src_argb[6];
a = src_argb[7];
dst_argb[4] = (b * a + 255) >> 8;
dst_argb[5] = (g * a + 255) >> 8;
dst_argb[6] = (r * a + 255) >> 8;
dst_argb[4] = ATTENUATE(b, a);
dst_argb[5] = ATTENUATE(g, a);
dst_argb[6] = ATTENUATE(r, a);
dst_argb[7] = a;
src_argb += 8;
dst_argb += 8;
......@@ -786,12 +670,13 @@ void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
const uint32 g = src_argb[1];
const uint32 r = src_argb[2];
const uint32 a = src_argb[3];
dst_argb[0] = (b * a + 255) >> 8;
dst_argb[1] = (g * a + 255) >> 8;
dst_argb[2] = (r * a + 255) >> 8;
dst_argb[0] = ATTENUATE(b, a);
dst_argb[1] = ATTENUATE(g, a);
dst_argb[2] = ATTENUATE(r, a);
dst_argb[3] = a;
}
}
#undef ATTENUATE
// Divide source RGB by alpha and store to destination.
// b = (b * 255 + (a / 2)) / a;
......@@ -866,6 +751,102 @@ void ARGBUnattenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
}
}
// Wrappers to handle odd width
#define YANY(NAMEANY, I420TORGB_SSE, I420TORGB_C, UV_SHIFT) \
void NAMEANY(const uint8* y_buf, \
const uint8* u_buf, \
const uint8* v_buf, \
uint8* rgb_buf, \
int width) { \
int n = width & ~7; \
I420TORGB_SSE(y_buf, u_buf, v_buf, rgb_buf, n); \
I420TORGB_C(y_buf + n, \
u_buf + (n >> UV_SHIFT), \
v_buf + (n >> UV_SHIFT), \
rgb_buf + n * 4, width & 7); \
}
// Wrappers to handle odd width
#define Y2NY(NAMEANY, NV12TORGB_SSE, NV12TORGB_C, UV_SHIFT) \
void NAMEANY(const uint8* y_buf, \
const uint8* uv_buf, \
uint8* rgb_buf, \
int width) { \
int n = width & ~7; \
NV12TORGB_SSE(y_buf, uv_buf, rgb_buf, n); \
NV12TORGB_C(y_buf + n, \
uv_buf + (n >> UV_SHIFT), \
rgb_buf + n * 4, width & 7); \
}
#if defined(HAS_I422TOARGBROW_SSSE3)
YANY(I444ToARGBRow_Any_SSSE3, I444ToARGBRow_Unaligned_SSSE3, I444ToARGBRow_C, 0)
YANY(I422ToARGBRow_Any_SSSE3, I422ToARGBRow_Unaligned_SSSE3, I422ToARGBRow_C, 1)
YANY(I411ToARGBRow_Any_SSSE3, I411ToARGBRow_Unaligned_SSSE3, I411ToARGBRow_C, 2)
Y2NY(NV12ToARGBRow_Any_SSSE3, NV12ToARGBRow_Unaligned_SSSE3, NV12ToARGBRow_C, 0)
Y2NY(NV21ToARGBRow_Any_SSSE3, NV21ToARGBRow_Unaligned_SSSE3, NV21ToARGBRow_C, 0)
YANY(I422ToBGRARow_Any_SSSE3, I422ToBGRARow_Unaligned_SSSE3, I422ToBGRARow_C, 1)
YANY(I422ToABGRRow_Any_SSSE3, I422ToABGRRow_Unaligned_SSSE3, I422ToABGRRow_C, 1)
#endif
#if defined(HAS_I422TOARGBROW_NEON)
YANY(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, I422ToARGBRow_C)
YANY(I422ToBGRARow_Any_NEON, I422ToBGRARow_NEON, I422ToBGRARow_C)
YANY(I422ToABGRRow_Any_NEON, I422ToABGRRow_NEON, I422ToABGRRow_C)
#endif
#undef YANY
#define RGBANY(NAMEANY, ARGBTORGB, BPP) \
void NAMEANY(const uint8* argb_buf, \
uint8* rgb_buf, \
int width) { \
SIMD_ALIGNED(uint8 row[kMaxStride]); \
ARGBTORGB(argb_buf, row, width); \
memcpy(rgb_buf, row, width * BPP); \
}
#if defined(HAS_ARGBTORGB24ROW_SSSE3)
RGBANY(ARGBToRGB24Row_Any_SSSE3, ARGBToRGB24Row_SSSE3, 3)
RGBANY(ARGBToRAWRow_Any_SSSE3, ARGBToRAWRow_SSSE3, 3)
RGBANY(ARGBToRGB565Row_Any_SSE2, ARGBToRGB565Row_SSE2, 2)
RGBANY(ARGBToARGB1555Row_Any_SSE2, ARGBToARGB1555Row_SSE2, 2)
RGBANY(ARGBToARGB4444Row_Any_SSE2, ARGBToARGB4444Row_SSE2, 2)
#endif
#undef RGBANY
#ifdef HAS_ARGBTOYROW_SSSE3
#define YANY(NAMEANY, ARGBTOY_SSE, BPP) \
void NAMEANY(const uint8* src_argb, uint8* dst_y, int width) { \
ARGBTOY_SSE(src_argb, dst_y, width - 16); \
ARGBTOY_SSE(src_argb + (width - 16) * BPP, dst_y + (width - 16), 16); \
}
YANY(ARGBToYRow_Any_SSSE3, ARGBToYRow_Unaligned_SSSE3, 4)
YANY(BGRAToYRow_Any_SSSE3, BGRAToYRow_Unaligned_SSSE3, 4)
YANY(ABGRToYRow_Any_SSSE3, ABGRToYRow_Unaligned_SSSE3, 4)
YANY(YUY2ToYRow_Any_SSE2, YUY2ToYRow_Unaligned_SSE2, 2)
YANY(UYVYToYRow_Any_SSE2, UYVYToYRow_Unaligned_SSE2, 2)
#undef YANY
#define UVANY(NAMEANY, ARGBTOUV_SSE, ARGBTOUV_C, BPP) \
void NAMEANY(const uint8* src_argb, int src_stride_argb, \
uint8* dst_u, uint8* dst_v, int width) { \
int n = width & ~15; \
ARGBTOUV_SSE(src_argb, src_stride_argb, dst_u, dst_v, n); \
ARGBTOUV_C(src_argb + n * BPP, src_stride_argb, \
dst_u + (n >> 1), \
dst_v + (n >> 1), \
width & 15); \
}
UVANY(ARGBToUVRow_Any_SSSE3, ARGBToUVRow_Unaligned_SSSE3, ARGBToUVRow_C, 4)
UVANY(BGRAToUVRow_Any_SSSE3, BGRAToUVRow_Unaligned_SSSE3, BGRAToUVRow_C, 4)
UVANY(ABGRToUVRow_Any_SSSE3, ABGRToUVRow_Unaligned_SSSE3, ABGRToUVRow_C, 4)
UVANY(YUY2ToUVRow_Any_SSE2, YUY2ToUVRow_Unaligned_SSE2, YUY2ToUVRow_C, 2)
UVANY(UYVYToUVRow_Any_SSE2, UYVYToUVRow_Unaligned_SSE2, UYVYToUVRow_C, 2)
#undef UVANY
#endif
void ComputeCumulativeSumRow_C(const uint8* row, int32* cumsum,
const int32* previous_cumsum, int width) {
int32 row_sum[4] = {0, 0, 0, 0};
......
......@@ -2375,60 +2375,39 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
"add $1-4,%3 \n"
"jl 49f \n"
// 8 pixel loop.
// 4 pixel loop.
".p2align 2 \n"
"41: \n"
"movdqu (%0),%%xmm3 \n"
"lea 0x10(%0),%0 \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"movdqu (%1),%%xmm2 \n"
"psrlw $0x8,%%xmm3 \n"
"pshufhw $0xf5,%%xmm3,%%xmm3 \n"
"pshuflw $0xf5,%%xmm3,%%xmm3 \n"
"movdqu (%1),%%xmm2 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movdqu (%1),%%xmm1 \n"
"lea 0x10(%1),%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"movdqu 0x10(%0),%%xmm3 \n"
"lea 0x20(%0),%0 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n"
"movdqa %%xmm0,(%2) \n"
"jl 49f \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"movdqu 0x10(%1),%%xmm2 \n"
"psrlw $0x8,%%xmm3 \n"
"pshufhw $0xf5,%%xmm3,%%xmm3 \n"
"pshuflw $0xf5,%%xmm3,%%xmm3 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movdqu 0x10(%1),%%xmm1 \n"
"lea 0x20(%1),%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n"
"movdqa %%xmm0,0x10(%2) \n"
"lea 0x20(%2),%2 \n"
"lea 0x10(%2),%2 \n"
"jge 41b \n"
"49: \n"
"add $0x3,%3 \n"
"jl 99f \n"
// 1 pixel loop.
// 1 pixel loop.
"91: \n"
"movd (%0),%%xmm3 \n"
"lea 0x4(%0),%0 \n"
......@@ -2531,56 +2510,37 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
"add $1-4,%3 \n"
"jl 49f \n"
// 8 pixel loop.
// 4 pixel loop.
".p2align 2 \n"
"41: \n"
"movdqu (%0),%%xmm3 \n"
"lea 0x10(%0),%0 \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"pshufb %4,%%xmm3 \n"
"movdqu (%1),%%xmm2 \n"
"pshufb %4,%%xmm3 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movdqu (%1),%%xmm1 \n"
"lea 0x10(%1),%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"movdqu 0x10(%0),%%xmm3 \n"
"lea 0x20(%0),%0 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n"
"movdqa %%xmm0,(%2) \n"
"jl 49f \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"movdqu 0x10(%1),%%xmm2 \n"
"pshufb %4,%%xmm3 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movdqu 0x10(%1),%%xmm1 \n"
"lea 0x20(%1),%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n"
"movdqa %%xmm0,0x10(%2) \n"
"lea 0x20(%2),%2 \n"
"lea 0x10(%2),%2 \n"
"jge 41b \n"
"49: \n"
"add $0x3,%3 \n"
"jl 99f \n"
// 1 pixel loop.
// 1 pixel loop.
"91: \n"
"movd (%0),%%xmm3 \n"
"lea 0x4(%0),%0 \n"
......@@ -2629,7 +2589,7 @@ void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) {
"pcmpeqb %%xmm5,%%xmm5 \n"
"psrld $0x8,%%xmm5 \n"
// 4 pixel loop
// 4 pixel loop
".p2align 4 \n"
"1: \n"
"movdqa (%0),%%xmm0 \n"
......
......@@ -2474,54 +2474,31 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
add ecx, 1 - 4
jl convertloop4b
// 8 pixel loop.
align 4
// 4 pixel loop.
convertloop4:
movdqu xmm3, [eax]
movdqu xmm3, [eax] // src argb
lea eax, [eax + 16]
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
movdqu xmm2, [esi] // _r_b
psrlw xmm3, 8 // alpha
pshufhw xmm3, xmm3,0F5h // 8 alpha words
pshuflw xmm3, xmm3,0F5h
movdqu xmm2, [esi] // _r_b
pand xmm2, xmm6 // _r_b
paddw xmm3, xmm7 // 256 - alpha
pmullw xmm2, xmm3 // _r_b * alpha
movdqu xmm1, [esi] // _a_g
lea esi, [esi + 16]
psrlw xmm1, 8 // _a_g
por xmm0, xmm4 // set alpha to 255
pmullw xmm1, xmm3 // _a_g * alpha
movdqu xmm3, [eax + 16]
lea eax, [eax + 32]
psrlw xmm2, 8 // _r_b convert to 8 bits again
paddusb xmm0, xmm2 // + src argb
pand xmm1, xmm5 // a_g_ convert to 8 bits again
paddusb xmm0, xmm1 // + src argb
sub ecx, 4
movdqa [edx], xmm0
jl convertloop4b
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
movdqu xmm2, [esi + 16] // _r_b
psrlw xmm3, 8 // alpha
pshufhw xmm3, xmm3,0F5h // 8 alpha words
pshuflw xmm3, xmm3,0F5h
pand xmm2, xmm6 // _r_b
paddw xmm3, xmm7 // 256 - alpha
pmullw xmm2, xmm3 // _r_b * alpha
movdqu xmm1, [esi + 16] // _a_g
lea esi, [esi + 32]
psrlw xmm1, 8 // _a_g
por xmm0, xmm4 // set alpha to 255
pmullw xmm1, xmm3 // _a_g * alpha
psrlw xmm2, 8 // _r_b convert to 8 bits again
paddusb xmm0, xmm2 // + src argb
pand xmm1, xmm5 // a_g_ convert to 8 bits again
paddusb xmm0, xmm1 // + src argb
sub ecx, 4
movdqa [edx + 16], xmm0
lea edx, [edx + 32]
lea edx, [edx + 16]
jge convertloop4
convertloop4b:
......@@ -2530,7 +2507,7 @@ void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
// 1 pixel loop.
convertloop1:
movd xmm3, [eax]
movd xmm3, [eax] // src argb
lea eax, [eax + 4]
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
......@@ -2629,50 +2606,29 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
add ecx, 1 - 4
jl convertloop4b
// 8 pixel loop.
align 4
// 4 pixel loop.
convertloop4:
movdqu xmm3, [eax]
movdqu xmm3, [eax] // src argb
lea eax, [eax + 16]
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
pshufb xmm3, kShuffleAlpha // alpha
movdqu xmm2, [esi] // _r_b
pshufb xmm3, kShuffleAlpha // alpha
pand xmm2, xmm6 // _r_b
paddw xmm3, xmm7 // 256 - alpha
pmullw xmm2, xmm3 // _r_b * alpha
movdqu xmm1, [esi] // _a_g
lea esi, [esi + 16]
psrlw xmm1, 8 // _a_g
por xmm0, xmm4 // set alpha to 255
pmullw xmm1, xmm3 // _a_g * alpha
movdqu xmm3, [eax + 16]
lea eax, [eax + 32]
psrlw xmm2, 8 // _r_b convert to 8 bits again
paddusb xmm0, xmm2 // + src argb
pand xmm1, xmm5 // a_g_ convert to 8 bits again
paddusb xmm0, xmm1 // + src argb
sub ecx, 4
movdqa [edx], xmm0
jl convertloop4b
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
movdqu xmm2, [esi + 16] // _r_b
pshufb xmm3, kShuffleAlpha // alpha
pand xmm2, xmm6 // _r_b
paddw xmm3, xmm7 // 256 - alpha
pmullw xmm2, xmm3 // _r_b * alpha
movdqu xmm1, [esi + 16] // _a_g
lea esi, [esi + 32]
psrlw xmm1, 8 // _a_g
por xmm0, xmm4 // set alpha to 255
pmullw xmm1, xmm3 // _a_g * alpha
psrlw xmm2, 8 // _r_b convert to 8 bits again
paddusb xmm0, xmm2 // + src argb
pand xmm1, xmm5 // a_g_ convert to 8 bits again
paddusb xmm0, xmm1 // + src argb
sub ecx, 4
movdqa [edx + 16], xmm0
lea edx, [edx + 32]
lea edx, [edx + 16]
jge convertloop4
convertloop4b:
......@@ -2681,7 +2637,7 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
// 1 pixel loop.
convertloop1:
movd xmm3, [eax]
movd xmm3, [eax] // src argb
lea eax, [eax + 4]
movdqa xmm0, xmm3 // src argb
pxor xmm3, xmm4 // ~alpha
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment