Commit bac5f2c3 authored by fbarchard@google.com's avatar fbarchard@google.com

ARGBBlend functions that handle any alignment and number of pixels

BUG=none
TEST=none
Review URL: https://webrtc-codereview.appspot.com/635011

git-svn-id: http://libyuv.googlecode.com/svn/trunk@285 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 133adc46
Name: libyuv Name: libyuv
URL: http://code.google.com/p/libyuv/ URL: http://code.google.com/p/libyuv/
Version: 284 Version: 285
License: BSD License: BSD
License File: LICENSE License File: LICENSE
......
...@@ -231,7 +231,7 @@ typedef void (*ARGBBlendRow)(const uint8* src_argb0, ...@@ -231,7 +231,7 @@ typedef void (*ARGBBlendRow)(const uint8* src_argb0,
uint8* dst_argb, int width); uint8* dst_argb, int width);
// Get function to Alpha Blend ARGB pixels and store to destination. // Get function to Alpha Blend ARGB pixels and store to destination.
ARGBBlendRow GetARGBBlend(uint8* dst_argb, int dst_stride_argb, int width); ARGBBlendRow GetARGBBlend();
// Alpha Blend ARGB images and store to destination. // Alpha Blend ARGB images and store to destination.
int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, int ARGBBlend(const uint8* src_argb0, int src_stride_argb0,
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ #ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 284 #define LIBYUV_VERSION 285
#endif // INCLUDE_LIBYUV_VERSION_H_ #endif // INCLUDE_LIBYUV_VERSION_H_
...@@ -163,36 +163,18 @@ int ARGBCopy(const uint8* src_argb, int src_stride_argb, ...@@ -163,36 +163,18 @@ int ARGBCopy(const uint8* src_argb, int src_stride_argb,
// Get a blender that optimized for the CPU, alignment and pixel count. // Get a blender that optimized for the CPU, alignment and pixel count.
// As there are 6 blenders to choose from, the caller should try to use // As there are 6 blenders to choose from, the caller should try to use
// the same blend function for all pixels if possible. // the same blend function for all pixels if possible.
ARGBBlendRow GetARGBBlend(uint8* dst_argb, int dst_stride_argb, int width) { ARGBBlendRow GetARGBBlend() {
void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1, void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width) = ARGBBlendRow_C; uint8* dst_argb, int width) = ARGBBlendRow_C;
#if defined(HAS_ARGBBLENDROW1_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
ARGBBlendRow = ARGBBlendRow1_SSSE3;
#if defined(HAS_ARGBBLENDROW_SSSE3) #if defined(HAS_ARGBBLENDROW_SSSE3)
if (width >= 4) { if (TestCpuFlag(kCpuHasSSSE3)) {
ARGBBlendRow = ARGBBlendRow_Any_SSSE3; ARGBBlendRow = ARGBBlendRow_SSSE3;
if (IS_ALIGNED(width, 4) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBBlendRow = ARGBBlendRow_Aligned_SSSE3;
}
}
#endif
return ARGBBlendRow; return ARGBBlendRow;
} }
#endif #endif
#if defined(HAS_ARGBBLENDROW1_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ARGBBlendRow = ARGBBlendRow1_SSE2;
#if defined(HAS_ARGBBLENDROW_SSE2) #if defined(HAS_ARGBBLENDROW_SSE2)
if (width >= 4) { if (TestCpuFlag(kCpuHasSSE2)) {
ARGBBlendRow = ARGBBlendRow_Any_SSE2; ARGBBlendRow = ARGBBlendRow_SSE2;
if (IS_ALIGNED(width, 4) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBBlendRow = ARGBBlendRow_Aligned_SSE2;
}
}
#endif
} }
#endif #endif
return ARGBBlendRow; return ARGBBlendRow;
...@@ -213,8 +195,7 @@ int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, ...@@ -213,8 +195,7 @@ int ARGBBlend(const uint8* src_argb0, int src_stride_argb0,
dst_stride_argb = -dst_stride_argb; dst_stride_argb = -dst_stride_argb;
} }
void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1, void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width) = uint8* dst_argb, int width) = GetARGBBlend();
GetARGBBlend(dst_argb, dst_stride_argb, width);
for (int y = 0; y < height; ++y) { for (int y = 0; y < height; ++y) {
ARGBBlendRow(src_argb0, src_argb1, dst_argb, width); ARGBBlendRow(src_argb0, src_argb1, dst_argb, width);
...@@ -626,8 +607,8 @@ int ARGB1555ToARGB(const uint8* src_argb1555, int src_stride_argb1555, ...@@ -626,8 +607,8 @@ int ARGB1555ToARGB(const uint8* src_argb1555, int src_stride_argb1555,
src_argb1555 = src_argb1555 + (height - 1) * src_stride_argb1555; src_argb1555 = src_argb1555 + (height - 1) * src_stride_argb1555;
src_stride_argb1555 = -src_stride_argb1555; src_stride_argb1555 = -src_stride_argb1555;
} }
void (*ARGB1555ToARGBRow)(const uint8* src_argb1555, uint8* dst_argb, int pix) = void (*ARGB1555ToARGBRow)(const uint8* src_argb1555, uint8* dst_argb,
ARGB1555ToARGBRow_C; int pix) = ARGB1555ToARGBRow_C;
#if defined(HAS_ARGB1555TOARGBROW_SSE2) #if defined(HAS_ARGB1555TOARGBROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) && if (TestCpuFlag(kCpuHasSSE2) &&
IS_ALIGNED(width, 8) && IS_ALIGNED(width, 8) &&
...@@ -653,8 +634,8 @@ int ARGB4444ToARGB(const uint8* src_argb4444, int src_stride_argb4444, ...@@ -653,8 +634,8 @@ int ARGB4444ToARGB(const uint8* src_argb4444, int src_stride_argb4444,
src_argb4444 = src_argb4444 + (height - 1) * src_stride_argb4444; src_argb4444 = src_argb4444 + (height - 1) * src_stride_argb4444;
src_stride_argb4444 = -src_stride_argb4444; src_stride_argb4444 = -src_stride_argb4444;
} }
void (*ARGB4444ToARGBRow)(const uint8* src_argb4444, uint8* dst_argb, int pix) = void (*ARGB4444ToARGBRow)(const uint8* src_argb4444, uint8* dst_argb,
ARGB4444ToARGBRow_C; int pix) = ARGB4444ToARGBRow_C;
#if defined(HAS_ARGB4444TOARGBROW_SSE2) #if defined(HAS_ARGB4444TOARGBROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) && if (TestCpuFlag(kCpuHasSSE2) &&
IS_ALIGNED(width, 8) && IS_ALIGNED(width, 8) &&
...@@ -1691,7 +1672,7 @@ int ARGBComputeCumulativeSum(const uint8* src_argb, int src_stride_argb, ...@@ -1691,7 +1672,7 @@ int ARGBComputeCumulativeSum(const uint8* src_argb, int src_stride_argb,
ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2; ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2;
} }
#endif #endif
memset(dst_cumsum, 0, width * sizeof(dst_cumsum[0]) * 4); // 4 ints per pixel. memset(dst_cumsum, 0, width * sizeof(dst_cumsum[0]) * 4); // 4 int per pixel.
int32* previous_cumsum = dst_cumsum; int32* previous_cumsum = dst_cumsum;
for (int y = 0; y < height; ++y) { for (int y = 0; y < height; ++y) {
ComputeCumulativeSumRow(src_argb, dst_cumsum, previous_cumsum, width); ComputeCumulativeSumRow(src_argb, dst_cumsum, previous_cumsum, width);
......
...@@ -41,7 +41,6 @@ extern "C" { ...@@ -41,7 +41,6 @@ extern "C" {
#define HAS_ARGB4444TOARGBROW_SSE2 #define HAS_ARGB4444TOARGBROW_SSE2
#define HAS_ARGBATTENUATE_SSSE3 #define HAS_ARGBATTENUATE_SSSE3
#define HAS_ARGBBLENDROW_SSSE3 #define HAS_ARGBBLENDROW_SSSE3
#define HAS_ARGBBLENDROW1_SSSE3
#define HAS_ARGBTOARGB1555ROW_SSE2 #define HAS_ARGBTOARGB1555ROW_SSE2
#define HAS_ARGBTOARGB4444ROW_SSE2 #define HAS_ARGBTOARGB4444ROW_SSE2
#define HAS_ARGBTORAWROW_SSSE3 #define HAS_ARGBTORAWROW_SSSE3
...@@ -88,7 +87,6 @@ extern "C" { ...@@ -88,7 +87,6 @@ extern "C" {
#define HAS_MIRRORROW_SSE2 #define HAS_MIRRORROW_SSE2
#define HAS_ARGBATTENUATE_SSE2 #define HAS_ARGBATTENUATE_SSE2
#define HAS_ARGBBLENDROW_SSE2 #define HAS_ARGBBLENDROW_SSE2
#define HAS_ARGBBLENDROW1_SSE2
#endif #endif
// The following are available on Neon platforms // The following are available on Neon platforms
...@@ -404,17 +402,9 @@ void YToARGBRow_SSE2(const uint8* y_buf, ...@@ -404,17 +402,9 @@ void YToARGBRow_SSE2(const uint8* y_buf,
int width); int width);
// ARGB preattenuated alpha blend. // ARGB preattenuated alpha blend.
void ARGBBlendRow_Aligned_SSSE3(const uint8* src_argb0, const uint8* src_argb1, void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width); uint8* dst_argb, int width);
void ARGBBlendRow_Aligned_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBBlendRow1_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBBlendRow1_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBBlendRow_Any_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width); uint8* dst_argb, int width);
void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1, void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width); uint8* dst_argb, int width);
......
...@@ -685,66 +685,6 @@ void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1, ...@@ -685,66 +685,6 @@ void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
} }
} }
#ifdef HAS_ARGBBLENDROW_SSE2
void ARGBBlendRow_Any_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned.
if ((uintptr_t)(dst_argb) & 15) {
int count = width;
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) {
count = (-(intptr_t)(dst_argb) >> 2) & 3;
}
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, count);
src_argb0 += count * 4;
src_argb1 += count * 4;
dst_argb += count * 4;
width -= count;
}
// Do multiple of 4 pixels
if (width & ~3) {
ARGBBlendRow_Aligned_SSE2(src_argb0, src_argb1, dst_argb, width & ~3);
}
// Do remaining 1 to 3 pixels
if (width & 3) {
src_argb0 += (width & ~3) * 4;
src_argb1 += (width & ~3) * 4;
dst_argb += (width & ~3) * 4;
width &= 3;
ARGBBlendRow1_SSE2(src_argb0, src_argb1, dst_argb, width);
}
}
#endif // HAS_ARGBBLENDROW_SSE2
#ifdef HAS_ARGBBLENDROW_SSSE3
void ARGBBlendRow_Any_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
// Do 1 to 3 pixels to get destination aligned.
if ((uintptr_t)(dst_argb) & 15) {
int count = width;
if (count > 4 && ((intptr_t)(dst_argb) & 3) == 0) {
count = (-(intptr_t)(dst_argb) >> 2) & 3;
}
ARGBBlendRow1_SSSE3(src_argb0, src_argb1, dst_argb, count);
src_argb0 += count * 4;
src_argb1 += count * 4;
dst_argb += count * 4;
width -= count;
}
// Do multiple of 4 pixels.
if (width & ~3) {
ARGBBlendRow_Aligned_SSSE3(src_argb0, src_argb1, dst_argb, width & ~3);
}
// Do remaining 1 to 3 pixels
if (width & 3) {
src_argb0 += (width & ~3) * 4;
src_argb1 += (width & ~3) * 4;
dst_argb += (width & ~3) * 4;
width &= 3;
ARGBBlendRow1_SSSE3(src_argb0, src_argb1, dst_argb, width);
}
}
#endif // HAS_ARGBBLENDROW_SSSE3
// Wrappers to handle odd width // Wrappers to handle odd width
#define YANY(NAMEANY, I420TORGB_SSE, I420TORGB_C, UV_SHIFT) \ #define YANY(NAMEANY, I420TORGB_SSE, I420TORGB_C, UV_SHIFT) \
void NAMEANY(const uint8* y_buf, \ void NAMEANY(const uint8* y_buf, \
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment