Commit 96af870c authored by fbarchard@google.com's avatar fbarchard@google.com

Port Blend SSSE3 to gcc

BUG=none
TEST=none
Review URL: https://webrtc-codereview.appspot.com/491001

git-svn-id: http://libyuv.googlecode.com/svn/trunk@239 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 4d3bd834
Name: libyuv Name: libyuv
URL: http://code.google.com/p/libyuv/ URL: http://code.google.com/p/libyuv/
Version: 238 Version: 239
License: BSD License: BSD
License File: LICENSE License File: LICENSE
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ #ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 238 #define LIBYUV_VERSION 239
#endif // INCLUDE_LIBYUV_VERSION_H_ #endif // INCLUDE_LIBYUV_VERSION_H_
...@@ -65,6 +65,7 @@ extern "C" { ...@@ -65,6 +65,7 @@ extern "C" {
#define HAS_YUY2TOUVROW_SSE2 #define HAS_YUY2TOUVROW_SSE2
#define HAS_UYVYTOUVROW_SSE2 #define HAS_UYVYTOUVROW_SSE2
#define HAS_ARGBBLENDROW_SSE2 #define HAS_ARGBBLENDROW_SSE2
#define HAS_ARGBBLENDROW_SSSE3
#endif #endif
// The following are available on Neon platforms // The following are available on Neon platforms
...@@ -78,12 +79,6 @@ extern "C" { ...@@ -78,12 +79,6 @@ extern "C" {
#define HAS_I420TOABGRROW_NEON #define HAS_I420TOABGRROW_NEON
#endif #endif
// The following are only available on Win32
// TODO(fbarchard): Port to GCC
#if !defined(YUV_DISABLE_ASM) && defined(_M_IX86)
#define HAS_ARGBBLENDROW_SSSE3
#endif
#if defined(_MSC_VER) #if defined(_MSC_VER)
#define SIMD_ALIGNED(var) __declspec(align(16)) var #define SIMD_ALIGNED(var) __declspec(align(16)) var
typedef __declspec(align(16)) int8 vec8[16]; typedef __declspec(align(16)) int8 vec8[16];
......
...@@ -526,6 +526,66 @@ void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1, ...@@ -526,6 +526,66 @@ void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
} }
} }
#ifdef HAS_ARGBBLENDROW_SSE2
void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned.
if ((uintptr_t)(dst_argb) & 15) {
int count = width;
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) {
count = (-(intptr_t)(dst_argb) >> 2) & 3;
}
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count);
src_argb0 += count * 4;
src_argb1 += count * 4;
dst_argb += count * 4;
width -= count;
}
// Do multiple of 4 pixels
if (width & ~3) {
ARGBBlendRow_Aligned_SSE2(src_argb0, src_argb1, dst_argb, width & ~3);
}
// Do remaining 1 to 3 pixels
if (width & 3) {
src_argb0 += (width & ~3) * 4;
src_argb1 += (width & ~3) * 4;
dst_argb += (width & ~3) * 4;
width &= 3;
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width);
}
}
#endif // HAS_ARGBBLENDROW_SSE2
#ifdef HAS_ARGBBLENDROW_SSSE3
void ARGBBlendRow_Any_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned.
if ((uintptr_t)(dst_argb) & 15) {
int count = width;
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) {
count = (-(intptr_t)(dst_argb) >> 2) & 3;
}
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count);
src_argb0 += count * 4;
src_argb1 += count * 4;
dst_argb += count * 4;
width -= count;
}
// Do multiple of 4 pixels.
if (width & ~3) {
ARGBBlendRow_Aligned_SSSE3(src_argb0, src_argb1, dst_argb, width & ~3);
}
// Do remaining 1 to 3 pixels
if (width & 3) {
src_argb0 += (width & ~3) * 4;
src_argb1 += (width & ~3) * 4;
dst_argb += (width & ~3) * 4;
width &= 3;
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width);
}
}
#endif // HAS_ARGBBLENDROW_SSSE3
// Wrappers to handle odd sizes/alignments // Wrappers to handle odd sizes/alignments
#define YUVANY(NAMEANY, I420TORGB_SSE, I420TORGB_C) \ #define YUVANY(NAMEANY, I420TORGB_SSE, I420TORGB_C) \
void NAMEANY(const uint8* y_buf, \ void NAMEANY(const uint8* y_buf, \
......
...@@ -2029,8 +2029,10 @@ void UYVYToUVRow_Unaligned_SSE2(const uint8* src_uyvy, int stride_uyvy, ...@@ -2029,8 +2029,10 @@ void UYVYToUVRow_Unaligned_SSE2(const uint8* src_uyvy, int stride_uyvy,
#endif // HAS_YUY2TOYROW_SSE2 #endif // HAS_YUY2TOYROW_SSE2
#ifdef HAS_ARGBBLENDROW_SSE2 #ifdef HAS_ARGBBLENDROW_SSE2
// Blend 8 pixels at a time // Blend 8 pixels at a time.
// Destination aligned to 16 bytes, multiple of 4 pixels // src_argb0 unaligned.
// src_argb1 and dst_argb aligned to 16 bytes.
// width must be multiple of 4 pixels.
void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
asm volatile ( asm volatile (
...@@ -2045,7 +2047,7 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -2045,7 +2047,7 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1,
// 8 pixel loop // 8 pixel loop
"1: \n" "1: \n"
"movdqu (%0),%%xmm3 \n" // first 4 pixels "movdqu (%0),%%xmm3 \n"
"movdqa %%xmm3,%%xmm0 \n" "movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n" "pxor %%xmm4,%%xmm3 \n"
"movdqu (%1),%%xmm2 \n" "movdqu (%1),%%xmm2 \n"
...@@ -2068,7 +2070,7 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -2068,7 +2070,7 @@ void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1,
"sub $0x4,%3 \n" "sub $0x4,%3 \n"
"movdqa %%xmm0,(%2) \n" "movdqa %%xmm0,(%2) \n"
"jle 9f \n" "jle 9f \n"
"movdqa %%xmm3,%%xmm0 \n" // next 4 pixels "movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n" "pxor %%xmm4,%%xmm3 \n"
"movdqu 0x10(%1),%%xmm2 \n" "movdqu 0x10(%1),%%xmm2 \n"
"psrlw $0x8,%%xmm3 \n" "psrlw $0x8,%%xmm3 \n"
...@@ -2153,35 +2155,82 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -2153,35 +2155,82 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#endif #endif
); );
} }
#endif // HAS_ARGBBLENDROW_SSE2
void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1, #ifdef HAS_ARGBBLENDROW_SSSE3
// Shuffle table for reversing the bytes.
CONST uvec8 kShuffleAlpha = {
3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned. asm volatile (
if ((uintptr_t)(dst_argb) & 15) { "pcmpeqb %%xmm7,%%xmm7 \n"
int count = width; "psrlw $0xf,%%xmm7 \n"
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) { "pcmpeqb %%xmm6,%%xmm6 \n"
count = (-(intptr_t)(dst_argb) >> 2) & 3; "psrlw $0x8,%%xmm6 \n"
} "pcmpeqb %%xmm5,%%xmm5 \n"
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count); "psllw $0x8,%%xmm5 \n"
src_argb0 += count * 4; "pcmpeqb %%xmm4,%%xmm4 \n"
src_argb1 += count * 4; "pslld $0x18,%%xmm4 \n"
dst_argb += count * 4;
width -= count; // 8 pixel loop
} "1: \n"
// Do multiple of 4 pixels "movdqu (%0),%%xmm3 \n"
if (width & ~3) { "movdqa %%xmm3,%%xmm0 \n"
ARGBBlendRow_Aligned_SSE2(src_argb0, src_argb1, dst_argb, width & ~3); "pxor %%xmm4,%%xmm3 \n"
} "pshufb %4,%%xmm3 \n"
// Do remaining 1 to 3 pixels "movdqu (%1),%%xmm2 \n"
if (width & 3) { "pand %%xmm6,%%xmm2 \n"
src_argb0 += (width & ~3) * 4; "paddw %%xmm7,%%xmm3 \n"
src_argb1 += (width & ~3) * 4; "pmullw %%xmm3,%%xmm2 \n"
dst_argb += (width & ~3) * 4; "movdqu (%1),%%xmm1 \n"
width &= 3; "psrlw $0x8,%%xmm1 \n"
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width); "por %%xmm4,%%xmm0 \n"
} "pmullw %%xmm3,%%xmm1 \n"
"movdqu 0x10(%0),%%xmm3 \n"
"lea 0x20(%0),%0 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n"
"movdqa %%xmm0,(%2) \n"
"jle 9f \n"
"movdqa %%xmm3,%%xmm0 \n"
"pxor %%xmm4,%%xmm3 \n"
"movdqu 0x10(%1),%%xmm2 \n"
"pshufb %4,%%xmm3 \n"
"pand %%xmm6,%%xmm2 \n"
"paddw %%xmm7,%%xmm3 \n"
"pmullw %%xmm3,%%xmm2 \n"
"movdqu 0x10(%1),%%xmm1 \n"
"lea 0x20(%1),%1 \n"
"psrlw $0x8,%%xmm1 \n"
"por %%xmm4,%%xmm0 \n"
"pmullw %%xmm3,%%xmm1 \n"
"psrlw $0x8,%%xmm2 \n"
"paddusb %%xmm2,%%xmm0 \n"
"pand %%xmm5,%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n"
"movdqa %%xmm0,0x10(%2) \n"
"lea 0x20(%2),%2 \n"
"jg 1b \n"
"9: \n"
: "+r"(src_argb0), // %0
"+r"(src_argb1), // %1
"+r"(dst_argb), // %2
"+r"(width) // %3
: "m"(kShuffleAlpha) // %4
: "memory", "cc"
#if defined(__SSE2__)
, "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
#endif
);
} }
#endif // HAS_ARGBBLENDROW_SSE2 #endif // HAS_ARGBBLENDROW_SSSE3
#endif // defined(__x86_64__) || defined(__i386__) #endif // defined(__x86_64__) || defined(__i386__)
......
...@@ -2203,35 +2203,7 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -2203,35 +2203,7 @@ void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1,
} }
} }
void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned.
if ((uintptr_t)(dst_argb) & 15) {
int count = width;
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) {
count = (-(intptr_t)(dst_argb) >> 2) & 3;
}
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count);
src_argb0 += count * 4;
src_argb1 += count * 4;
dst_argb += count * 4;
width -= count;
}
// Do multiple of 4 pixels
if (width & ~3) {
ARGBBlendRow_Aligned_SSE2(src_argb0, src_argb1, dst_argb, width & ~3);
}
// Do remaining 1 to 3 pixels
if (width & 3) {
src_argb0 += (width & ~3) * 4;
src_argb1 += (width & ~3) * 4;
dst_argb += (width & ~3) * 4;
width &= 3;
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width);
}
}
#endif // HAS_ARGBBLENDROW_SSE2 #endif // HAS_ARGBBLENDROW_SSE2
#ifdef HAS_ARGBBLENDROW_SSSE3 #ifdef HAS_ARGBBLENDROW_SSSE3
// Shuffle table for reversing the bytes. // Shuffle table for reversing the bytes.
static const uvec8 kShuffleAlpha = { static const uvec8 kShuffleAlpha = {
...@@ -2318,34 +2290,6 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, ...@@ -2318,34 +2290,6 @@ void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
ret ret
} }
} }
void ARGBBlendRow_Any_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned.
if ((uintptr_t)(dst_argb) & 15) {
int count = width;
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) {
count = (-(intptr_t)(dst_argb) >> 2) & 3;
}
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count);
src_argb0 += count * 4;
src_argb1 += count * 4;
dst_argb += count * 4;
width -= count;
}
// Do multiple of 4 pixels.
if (width & ~3) {
ARGBBlendRow_Aligned_SSSE3(src_argb0, src_argb1, dst_argb, width & ~3);
}
// Do remaining 1 to 3 pixels
if (width & 3) {
src_argb0 += (width & ~3) * 4;
src_argb1 += (width & ~3) * 4;
dst_argb += (width & ~3) * 4;
width &= 3;
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width);
}
}
#endif // HAS_ARGBBLENDROW_SSSE3 #endif // HAS_ARGBBLENDROW_SSSE3
#endif // _M_IX86 #endif // _M_IX86
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment