Commit bb92acad authored by fbarchard@google.com's avatar fbarchard@google.com

Unaligned math effects.

BUG=none
TEST=Add*
Review URL: https://webrtc-codereview.appspot.com/1345004

git-svn-id: http://libyuv.googlecode.com/svn/trunk@670 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 0f726731
Name: libyuv Name: libyuv
URL: http://code.google.com/p/libyuv/ URL: http://code.google.com/p/libyuv/
Version: 669 Version: 670
License File: LICENSE License File: LICENSE
Description: Description:
......
...@@ -11,6 +11,6 @@ ...@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 669 #define LIBYUV_VERSION 670
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
...@@ -558,10 +558,7 @@ int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0, ...@@ -558,10 +558,7 @@ int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0,
void (*ARGBMultiplyRow)(const uint8* src0, const uint8* src1, uint8* dst, void (*ARGBMultiplyRow)(const uint8* src0, const uint8* src1, uint8* dst,
int width) = ARGBMultiplyRow_C; int width) = ARGBMultiplyRow_C;
#if defined(HAS_ARGBMULTIPLYROW_SSE2) #if defined(HAS_ARGBMULTIPLYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) && width >= 4 && if (TestCpuFlag(kCpuHasSSE2) && width >= 4) {
IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) &&
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBMultiplyRow = ARGBMultiplyRow_Any_SSE2; ARGBMultiplyRow = ARGBMultiplyRow_Any_SSE2;
if (IS_ALIGNED(width, 4)) { if (IS_ALIGNED(width, 4)) {
ARGBMultiplyRow = ARGBMultiplyRow_SSE2; ARGBMultiplyRow = ARGBMultiplyRow_SSE2;
...@@ -622,11 +619,13 @@ int ARGBAdd(const uint8* src_argb0, int src_stride_argb0, ...@@ -622,11 +619,13 @@ int ARGBAdd(const uint8* src_argb0, int src_stride_argb0,
void (*ARGBAddRow)(const uint8* src0, const uint8* src1, uint8* dst, void (*ARGBAddRow)(const uint8* src0, const uint8* src1, uint8* dst,
int width) = ARGBAddRow_C; int width) = ARGBAddRow_C;
#if defined(HAS_ARGBADDROW_SSE2) #if defined(HAS_ARGBADDROW_SSE2) && defined(_MSC_VER)
if (TestCpuFlag(kCpuHasSSE2) && width >= 4 && if (TestCpuFlag(kCpuHasSSE2)) {
IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) && ARGBAddRow = ARGBAddRow_SSE2;
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) && }
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) { #endif
#if defined(HAS_ARGBADDROW_SSE2) && !defined(_MSC_VER)
if (TestCpuFlag(kCpuHasSSE2) && width >= 4) {
ARGBAddRow = ARGBAddRow_Any_SSE2; ARGBAddRow = ARGBAddRow_Any_SSE2;
if (IS_ALIGNED(width, 4)) { if (IS_ALIGNED(width, 4)) {
ARGBAddRow = ARGBAddRow_SSE2; ARGBAddRow = ARGBAddRow_SSE2;
...@@ -688,10 +687,7 @@ int ARGBSubtract(const uint8* src_argb0, int src_stride_argb0, ...@@ -688,10 +687,7 @@ int ARGBSubtract(const uint8* src_argb0, int src_stride_argb0,
void (*ARGBSubtractRow)(const uint8* src0, const uint8* src1, uint8* dst, void (*ARGBSubtractRow)(const uint8* src0, const uint8* src1, uint8* dst,
int width) = ARGBSubtractRow_C; int width) = ARGBSubtractRow_C;
#if defined(HAS_ARGBSUBTRACTROW_SSE2) #if defined(HAS_ARGBSUBTRACTROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) && width >= 4 && if (TestCpuFlag(kCpuHasSSE2) && width >= 4) {
IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) &&
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBSubtractRow = ARGBSubtractRow_Any_SSE2; ARGBSubtractRow = ARGBSubtractRow_Any_SSE2;
if (IS_ALIGNED(width, 4)) { if (IS_ALIGNED(width, 4)) {
ARGBSubtractRow = ARGBSubtractRow_SSE2; ARGBSubtractRow = ARGBSubtractRow_SSE2;
......
...@@ -732,7 +732,14 @@ void ARGBMultiplyRow_C(const uint8* src_argb0, const uint8* src_argb1, ...@@ -732,7 +732,14 @@ void ARGBMultiplyRow_C(const uint8* src_argb0, const uint8* src_argb1,
#undef REPEAT8 #undef REPEAT8
#undef SHADE #undef SHADE
#define SHADE(f, v) ((v + f) > 255) ? 255 : (v + f) #ifdef __llvm__
#define min0(v) ((-(v) >> 31) & (v))
#define max255(v) (((256 - (v)) >> 31) | (v))
#else
#define min0(v) (((v) < 0) ? 0 : v)
#define max255(v) (((v) > 255) ? 255 : (v))
#endif
#define SHADE(f, v) max255(v + f)
void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1, void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
...@@ -756,7 +763,7 @@ void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1, ...@@ -756,7 +763,7 @@ void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1,
} }
#undef SHADE #undef SHADE
#define SHADE(f, v) ((f - v) < 0) ? 0 : (f - v) #define SHADE(f, v) min0(f - v)
void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1, void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
...@@ -779,6 +786,8 @@ void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1, ...@@ -779,6 +786,8 @@ void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1,
} }
} }
#undef SHADE #undef SHADE
#undef min0
#undef max255
// Sobel functions which mimics SSSE3. // Sobel functions which mimics SSSE3.
void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2, void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2,
......
...@@ -4201,7 +4201,6 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width, ...@@ -4201,7 +4201,6 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
#ifdef HAS_ARGBMULTIPLYROW_SSE2 #ifdef HAS_ARGBMULTIPLYROW_SSE2
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time. // Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
// Aligned to 16 bytes.
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
asm volatile ( asm volatile (
...@@ -4212,10 +4211,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4212,10 +4211,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
// 4 pixel loop. // 4 pixel loop.
".p2align 4 \n" ".p2align 4 \n"
"1: \n" "1: \n"
"movdqa (%0),%%xmm0 \n" "movdqu (%0),%%xmm0 \n"
"movdqa (%0,%1),%%xmm2 \n" "movdqu (%0,%1),%%xmm2 \n"
"movdqa %%xmm0,%%xmm1 \n" "movdqu %%xmm0,%%xmm1 \n"
"movdqa %%xmm2,%%xmm3 \n" "movdqu %%xmm2,%%xmm3 \n"
"punpcklbw %%xmm0,%%xmm0 \n" "punpcklbw %%xmm0,%%xmm0 \n"
"punpckhbw %%xmm1,%%xmm1 \n" "punpckhbw %%xmm1,%%xmm1 \n"
"punpcklbw %%xmm5,%%xmm2 \n" "punpcklbw %%xmm5,%%xmm2 \n"
...@@ -4224,7 +4223,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4224,7 +4223,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
"pmulhuw %%xmm3,%%xmm1 \n" "pmulhuw %%xmm3,%%xmm1 \n"
"packuswb %%xmm1,%%xmm0 \n" "packuswb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n" "sub $0x4,%3 \n"
"movdqa %%xmm0,(%0,%2,1) \n" "movdqu %%xmm0,(%0,%2,1) \n"
"lea 0x10(%0),%0 \n" "lea 0x10(%0),%0 \n"
"jg 1b \n" "jg 1b \n"
: "+r"(src_argb0), // %0 : "+r"(src_argb0), // %0
...@@ -4242,7 +4241,6 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4242,7 +4241,6 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBADDROW_SSE2 #ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time. // Add 2 rows of ARGB pixels together, 4 pixels at a time.
// Aligned to 16 bytes.
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
asm volatile ( asm volatile (
...@@ -4252,11 +4250,11 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4252,11 +4250,11 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
// 4 pixel loop. // 4 pixel loop.
".p2align 4 \n" ".p2align 4 \n"
"1: \n" "1: \n"
"movdqa (%0),%%xmm0 \n" "movdqu (%0),%%xmm0 \n"
"movdqa (%0,%1),%%xmm1 \n" "movdqu (%0,%1),%%xmm1 \n"
"paddusb %%xmm1,%%xmm0 \n" "paddusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n" "sub $0x4,%3 \n"
"movdqa %%xmm0,(%0,%2,1) \n" "movdqu %%xmm0,(%0,%2,1) \n"
"lea 0x10(%0),%0 \n" "lea 0x10(%0),%0 \n"
"jg 1b \n" "jg 1b \n"
: "+r"(src_argb0), // %0 : "+r"(src_argb0), // %0
...@@ -4274,7 +4272,6 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4274,7 +4272,6 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBSUBTRACTROW_SSE2 #ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels, 4 pixels at a time. // Subtract 2 rows of ARGB pixels, 4 pixels at a time.
// Aligned to 16 bytes.
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
asm volatile ( asm volatile (
...@@ -4284,11 +4281,11 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4284,11 +4281,11 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
// 4 pixel loop. // 4 pixel loop.
".p2align 4 \n" ".p2align 4 \n"
"1: \n" "1: \n"
"movdqa (%0),%%xmm0 \n" "movdqu (%0),%%xmm0 \n"
"movdqa (%0,%1),%%xmm1 \n" "movdqu (%0,%1),%%xmm1 \n"
"psubusb %%xmm1,%%xmm0 \n" "psubusb %%xmm1,%%xmm0 \n"
"sub $0x4,%3 \n" "sub $0x4,%3 \n"
"movdqa %%xmm0,(%0,%2,1) \n" "movdqu %%xmm0,(%0,%2,1) \n"
"lea 0x10(%0),%0 \n" "lea 0x10(%0),%0 \n"
"jg 1b \n" "jg 1b \n"
: "+r"(src_argb0), // %0 : "+r"(src_argb0), // %0
......
...@@ -5223,7 +5223,6 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width, ...@@ -5223,7 +5223,6 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
#ifdef HAS_ARGBMULTIPLYROW_SSE2 #ifdef HAS_ARGBMULTIPLYROW_SSE2
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time. // Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
// Aligned to 16 bytes.
__declspec(naked) __declspec(align(16)) __declspec(naked) __declspec(align(16))
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
...@@ -5239,10 +5238,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5239,10 +5238,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
align 16 align 16
convertloop: convertloop:
movdqa xmm0, [eax] // read 4 pixels from src_argb0 movdqu xmm0, [eax] // read 4 pixels from src_argb0
movdqa xmm2, [eax + esi] // read 4 pixels from src_argb1 movdqu xmm2, [eax + esi] // read 4 pixels from src_argb1
movdqa xmm1, xmm0 movdqu xmm1, xmm0
movdqa xmm3, xmm2 movdqu xmm3, xmm2
punpcklbw xmm0, xmm0 // first 2 punpcklbw xmm0, xmm0 // first 2
punpckhbw xmm1, xmm1 // next 2 punpckhbw xmm1, xmm1 // next 2
punpcklbw xmm2, xmm5 // first 2 punpcklbw xmm2, xmm5 // first 2
...@@ -5251,7 +5250,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5251,7 +5250,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
pmulhuw xmm1, xmm3 // src_argb0 * src_argb1 next 2 pmulhuw xmm1, xmm3 // src_argb0 * src_argb1 next 2
packuswb xmm0, xmm1 packuswb xmm0, xmm1
sub ecx, 4 sub ecx, 4
movdqa [eax + edx], xmm0 movdqu [eax + edx], xmm0
lea eax, [eax + 16] lea eax, [eax + 16]
jg convertloop jg convertloop
...@@ -5263,7 +5262,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5263,7 +5262,7 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBADDROW_SSE2 #ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time. // Add 2 rows of ARGB pixels together, 4 pixels at a time.
// Aligned to 16 bytes. // TODO(fbarchard): Port this to posix, neon and other math functions.
__declspec(naked) __declspec(align(16)) __declspec(naked) __declspec(align(16))
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
...@@ -5273,20 +5272,36 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5273,20 +5272,36 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
mov esi, [esp + 4 + 8] // src_argb1 mov esi, [esp + 4 + 8] // src_argb1
mov edx, [esp + 4 + 12] // dst_argb mov edx, [esp + 4 + 12] // dst_argb
mov ecx, [esp + 4 + 16] // width mov ecx, [esp + 4 + 16] // width
pxor xmm5, xmm5 // constant 0
sub esi, eax sub esi, eax
sub edx, eax sub edx, eax
sub ecx, 4
jl convertloop49
align 16 align 16
convertloop: convertloop4:
movdqa xmm0, [eax] // read 4 pixels from src_argb0 movdqu xmm0, [eax] // read 4 pixels from src_argb0
movdqa xmm1, [eax + esi] // read 4 pixels from src_argb1 movdqu xmm1, [eax + esi] // read 4 pixels from src_argb1
paddusb xmm0, xmm1 // src_argb0 + src_argb1 paddusb xmm0, xmm1 // src_argb0 + src_argb1
sub ecx, 4 sub ecx, 4
movdqa [eax + edx], xmm0 movdqu [eax + edx], xmm0
lea eax, [eax + 16] lea eax, [eax + 16]
jg convertloop jge convertloop4
convertloop49:
add ecx, 4 - 1
jl convertloop19
convertloop1:
movd xmm0, [eax] // read 1 pixels from src_argb0
movd xmm1, [eax + esi] // read 1 pixels from src_argb1
paddusb xmm0, xmm1 // src_argb0 + src_argb1
sub ecx, 1
movd [eax + edx], xmm0
lea eax, [eax + 4]
jge convertloop1
convertloop19:
pop esi pop esi
ret ret
} }
...@@ -5295,7 +5310,6 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5295,7 +5310,6 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBSUBTRACTROW_SSE2 #ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels together, 4 pixels at a time. // Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
// Aligned to 16 bytes.
__declspec(naked) __declspec(align(16)) __declspec(naked) __declspec(align(16))
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) { uint8* dst_argb, int width) {
...@@ -5310,11 +5324,11 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5310,11 +5324,11 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
align 16 align 16
convertloop: convertloop:
movdqa xmm0, [eax] // read 4 pixels from src_argb0 movdqu xmm0, [eax] // read 4 pixels from src_argb0
movdqa xmm1, [eax + esi] // read 4 pixels from src_argb1 movdqu xmm1, [eax + esi] // read 4 pixels from src_argb1
psubusb xmm0, xmm1 // src_argb0 - src_argb1 psubusb xmm0, xmm1 // src_argb0 - src_argb1
sub ecx, 4 sub ecx, 4
movdqa [eax + edx], xmm0 movdqu [eax + edx], xmm0
lea eax, [eax + 16] lea eax, [eax + 16]
jg convertloop jg convertloop
...@@ -5373,7 +5387,6 @@ void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -5373,7 +5387,6 @@ void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
mov esi, [esp + 4 + 8] // src_argb1 mov esi, [esp + 4 + 8] // src_argb1
mov edx, [esp + 4 + 12] // dst_argb mov edx, [esp + 4 + 12] // dst_argb
mov ecx, [esp + 4 + 16] // width mov ecx, [esp + 4 + 16] // width
vpxor ymm5, ymm5, ymm5 // constant 0
sub esi, eax sub esi, eax
sub edx, eax sub edx, eax
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment