Commit 120b8d7e authored by fbarchard@google.com's avatar fbarchard@google.com

ARGBRect optimized for visual c that will do rep stosd. Fix x/y calculation.

BUG=none
TEST=none
Review URL: http://webrtc-codereview.appspot.com/292008

git-svn-id: http://libyuv.googlecode.com/svn/trunk@94 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 3e8a577b
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 93
Version: 94
License: BSD
License File: LICENSE
......
......@@ -1654,17 +1654,32 @@ int BG24ToARGB(const uint8* src_bg24, int src_stride_bg24,
}
// SetRows32 writes 'count' bytes using a 32 bit value repeated
// SetRow8 writes 'count' bytes using a 32 bit value repeated
// SetRow32 writes 'count' words using a 32 bit value repeated
#if defined(__ARM_NEON__) && !defined(YUV_DISABLE_ASM)
#define HAS_SETROW_NEON
static void SetRow8_NEON(uint8* dst, uint32 v32, int count) {
asm volatile (
"vdup.u32 q0, %2 \n" // duplicate 4 ints
"1: \n"
"vst1.u32 {q0}, [%0]! \n" // store
"subs %1, %1, #16 \n" // 16 bytes per loop
"bhi 1b \n"
: "+r"(dst), // %0
"+r"(count) // %1
: "r"(v32) // %2
: "q0", "memory", "cc"
);
}
static void SetRow32_NEON(uint8* dst, uint32 v32, int count) {
asm volatile (
"vdup.u32 q0, %2 \n" // duplicate 4 ints
"vdup.u32 q0, %2 \n" // duplicate 4 ints
"1: \n"
"vst1.u32 {q0}, [%0]! \n" // store
"subs %1, %1, #16 \n" // 16 processed per loop
"bhi 1b \n"
"vst1.u32 {q0}, [%0]! \n" // store
"subs %1, %1, #4 \n" // 4 pixels per loop
"bhi 1b \n"
: "+r"(dst), // %0
"+r"(count) // %1
: "r"(v32) // %2
......@@ -1675,7 +1690,7 @@ static void SetRow32_NEON(uint8* dst, uint32 v32, int count) {
#elif defined(_M_IX86) && !defined(YUV_DISABLE_ASM)
#define HAS_SETROW_SSE2
__declspec(naked)
static void SetRow32_SSE2(uint8* dst, uint32 v32, int count) {
static void SetRow8_SSE2(uint8* dst, uint32 v32, int count) {
__asm {
mov eax, [esp + 4] // dst
movd xmm5, [esp + 8] // v32
......@@ -1691,18 +1706,54 @@ static void SetRow32_SSE2(uint8* dst, uint32 v32, int count) {
}
}
__declspec(naked)
static void SetRow32_SSE2(uint8* dst, uint32 v32, int count) {
__asm {
mov eax, [esp + 4] // dst
movd xmm5, [esp + 8] // v32
mov ecx, [esp + 12] // count
pshufd xmm5, xmm5, 0
convertloop:
movdqa [eax], xmm5
lea eax, [eax + 16]
sub ecx, 4
ja convertloop
ret
}
}
#elif (defined(__x86_64__) || defined(__i386__)) && !defined(YUV_DISABLE_ASM)
#define HAS_SETROW_SSE2
static void SetRow8_SSE2(uint8* dst, uint32 v32, int count) {
asm volatile (
"movd %2, %%xmm5 \n"
"pshufd $0x0,%%xmm5,%%xmm5 \n"
"1: \n"
"movdqa %%xmm5,(%0) \n"
"lea 0x10(%0),%0 \n"
"sub $0x10,%1 \n"
"ja 1b \n"
: "+r"(dst), // %0
"+r"(count) // %1
: "r"(v32) // %2
: "memory", "cc"
#if defined(__SSE2__)
, "xmm5"
#endif
);
}
static void SetRow32_SSE2(uint8* dst, uint32 v32, int count) {
asm volatile (
"movd %2, %%xmm5 \n"
"pshufd $0x0,%%xmm5,%%xmm5 \n"
"1: \n"
"movdqa %%xmm5,(%0) \n"
"lea 0x10(%0),%0 \n"
"sub $0x10,%1 \n"
"ja 1b \n"
"movd %2, %%xmm5 \n"
"pshufd $0x0,%%xmm5,%%xmm5 \n"
"1: \n"
"movdqa %%xmm5,(%0) \n"
"lea 0x10(%0),%0 \n"
"sub $0x4,%1 \n"
"ja 1b \n"
: "+r"(dst), // %0
"+r"(count) // %1
: "r"(v32) // %2
......@@ -1710,7 +1761,7 @@ static void SetRow32_SSE2(uint8* dst, uint32 v32, int count) {
#if defined(__SSE2__)
, "xmm5"
#endif
);
);
}
#endif
......@@ -1718,6 +1769,14 @@ static void SetRow8_C(uint8* dst, uint32 v8, int count) {
memset(dst, v8, count);
}
// count measured in bytes
static void SetRow32_C(uint8* dst, uint32 v32, int count) {
uint32* d = reinterpret_cast<uint32*>(dst);
for (int x = 0; x < count; ++x) {
d[x] = v32;
}
}
static void SetPlane(uint8* dst_y, int dst_stride_y,
int width, int height,
uint32 value) {
......@@ -1726,13 +1785,13 @@ static void SetPlane(uint8* dst_y, int dst_stride_y,
if (TestCpuFlag(kCpuHasNEON) &&
IS_ALIGNED(width, 16) &&
IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
SetRow = SetRow32_NEON;
SetRow = SetRow8_NEON;
} else
#elif defined(HAS_SETROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) &&
IS_ALIGNED(width, 16) &&
IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
SetRow = SetRow32_SSE2;
SetRow = SetRow8_SSE2;
} else
#endif
{
......@@ -1774,45 +1833,37 @@ int I420Rect(uint8* dst_y, int dst_stride_y,
return 0;
}
// count measured in bytes
static void SetRow32_C(uint8* dst, uint32 v8, int count) {
uint32* d = reinterpret_cast<uint32*>(dst);
for (int x = 0; x < count; x += 4) {
*d++ = v8;
}
}
// Draw a rectangle into ARGB
int ARGBRect(uint8* dst_argb, int dst_stride_argb,
int x, int y,
int dst_x, int dst_y,
int width, int height,
uint32 value) {
if (!dst_argb ||
width <= 0 || height <= 0 ||
x < 0 || y < 0) {
dst_x < 0 || dst_y < 0) {
return -1;
}
uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
void (*SetRow)(uint8* dst, uint32 value, int count);
#if defined(HAS_SETROW_NEON)
if (TestCpuFlag(kCpuHasNEON) &&
IS_ALIGNED(width, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
SetRow = SetRow32_NEON;
} else
#elif defined(HAS_SETROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) &&
IS_ALIGNED(width, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
SetRow = SetRow32_SSE2;
} else
#endif
{
SetRow = SetRow32_C;
}
int w32 = width << 2;
for (int y = 0; y < height; ++y) {
SetRow(dst_argb, value, w32);
dst_argb += dst_stride_argb;
SetRow(dst, value, width);
dst += dst_stride_argb;
}
return 0;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment