Commit 3028e1bd authored by Frank Barchard's avatar Frank Barchard

clang-format row_gcc.cc with some functions disabled

BUG=libyuv:654
TEST=try bots build
R=kjellander@chromium.org

Review URL: https://codereview.chromium.org/2484083003 .
parent c2bc1561
...@@ -16,8 +16,6 @@ namespace libyuv { ...@@ -16,8 +16,6 @@ namespace libyuv {
extern "C" { extern "C" {
#endif #endif
// clang-format off
// This module is for GCC x86 and x64. // This module is for GCC x86 and x64.
#if !defined(LIBYUV_DISABLE_X86) && \ #if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER))) (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER)))
...@@ -25,165 +23,133 @@ extern "C" { ...@@ -25,165 +23,133 @@ extern "C" {
#if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3) #if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3)
// Constants for ARGB // Constants for ARGB
static vec8 kARGBToY = { static vec8 kARGBToY = {13, 65, 33, 0, 13, 65, 33, 0,
13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0 13, 65, 33, 0, 13, 65, 33, 0};
};
// JPeg full range. // JPeg full range.
static vec8 kARGBToYJ = { static vec8 kARGBToYJ = {15, 75, 38, 0, 15, 75, 38, 0,
15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0 15, 75, 38, 0, 15, 75, 38, 0};
};
#endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3) #endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3)
#if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3) #if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3)
static vec8 kARGBToU = { static vec8 kARGBToU = {112, -74, -38, 0, 112, -74, -38, 0,
112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0 112, -74, -38, 0, 112, -74, -38, 0};
};
static vec8 kARGBToUJ = { static vec8 kARGBToUJ = {127, -84, -43, 0, 127, -84, -43, 0,
127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0 127, -84, -43, 0, 127, -84, -43, 0};
};
static vec8 kARGBToV = { static vec8 kARGBToV = {
-18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
}; };
static vec8 kARGBToVJ = { static vec8 kARGBToVJ = {-20, -107, 127, 0, -20, -107, 127, 0,
-20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0 -20, -107, 127, 0, -20, -107, 127, 0};
};
// Constants for BGRA // Constants for BGRA
static vec8 kBGRAToY = { static vec8 kBGRAToY = {0, 33, 65, 13, 0, 33, 65, 13,
0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13 0, 33, 65, 13, 0, 33, 65, 13};
};
static vec8 kBGRAToU = { static vec8 kBGRAToU = {0, -38, -74, 112, 0, -38, -74, 112,
0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112 0, -38, -74, 112, 0, -38, -74, 112};
};
static vec8 kBGRAToV = { static vec8 kBGRAToV = {0, 112, -94, -18, 0, 112, -94, -18,
0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18 0, 112, -94, -18, 0, 112, -94, -18};
};
// Constants for ABGR // Constants for ABGR
static vec8 kABGRToY = { static vec8 kABGRToY = {33, 65, 13, 0, 33, 65, 13, 0,
33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0 33, 65, 13, 0, 33, 65, 13, 0};
};
static vec8 kABGRToU = { static vec8 kABGRToU = {-38, -74, 112, 0, -38, -74, 112, 0,
-38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0 -38, -74, 112, 0, -38, -74, 112, 0};
};
static vec8 kABGRToV = { static vec8 kABGRToV = {112, -94, -18, 0, 112, -94, -18, 0,
112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0 112, -94, -18, 0, 112, -94, -18, 0};
};
// Constants for RGBA. // Constants for RGBA.
static vec8 kRGBAToY = { static vec8 kRGBAToY = {0, 13, 65, 33, 0, 13, 65, 33,
0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33 0, 13, 65, 33, 0, 13, 65, 33};
};
static vec8 kRGBAToU = { static vec8 kRGBAToU = {0, 112, -74, -38, 0, 112, -74, -38,
0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38 0, 112, -74, -38, 0, 112, -74, -38};
};
static vec8 kRGBAToV = { static vec8 kRGBAToV = {0, -18, -94, 112, 0, -18, -94, 112,
0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112 0, -18, -94, 112, 0, -18, -94, 112};
};
static uvec8 kAddY16 = { static uvec8 kAddY16 = {16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u,
16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u};
};
// 7 bit fixed point 0.5. // 7 bit fixed point 0.5.
static vec16 kAddYJ64 = { static vec16 kAddYJ64 = {64, 64, 64, 64, 64, 64, 64, 64};
64, 64, 64, 64, 64, 64, 64, 64
};
static uvec8 kAddUV128 = { static uvec8 kAddUV128 = {128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
};
static uvec16 kAddUVJ128 = { static uvec16 kAddUVJ128 = {0x8080u, 0x8080u, 0x8080u, 0x8080u,
0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u 0x8080u, 0x8080u, 0x8080u, 0x8080u};
};
#endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3) #endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3)
#ifdef HAS_RGB24TOARGBROW_SSSE3 #ifdef HAS_RGB24TOARGBROW_SSSE3
// Shuffle table for converting RGB24 to ARGB. // Shuffle table for converting RGB24 to ARGB.
static uvec8 kShuffleMaskRGB24ToARGB = { static uvec8 kShuffleMaskRGB24ToARGB = {0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u,
0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u};
};
// Shuffle table for converting RAW to ARGB. // Shuffle table for converting RAW to ARGB.
static uvec8 kShuffleMaskRAWToARGB = { static uvec8 kShuffleMaskRAWToARGB = {2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u,
2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u};
};
// Shuffle table for converting RAW to RGB24. First 8. // Shuffle table for converting RAW to RGB24. First 8.
static const uvec8 kShuffleMaskRAWToRGB24_0 = { static const uvec8 kShuffleMaskRAWToRGB24_0 = {
2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u, 2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u,
128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
};
// Shuffle table for converting RAW to RGB24. Middle 8. // Shuffle table for converting RAW to RGB24. Middle 8.
static const uvec8 kShuffleMaskRAWToRGB24_1 = { static const uvec8 kShuffleMaskRAWToRGB24_1 = {
2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u, 2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u,
128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
};
// Shuffle table for converting RAW to RGB24. Last 8. // Shuffle table for converting RAW to RGB24. Last 8.
static const uvec8 kShuffleMaskRAWToRGB24_2 = { static const uvec8 kShuffleMaskRAWToRGB24_2 = {
8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u, 8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u,
128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u};
};
// Shuffle table for converting ARGB to RGB24. // Shuffle table for converting ARGB to RGB24.
static uvec8 kShuffleMaskARGBToRGB24 = { static uvec8 kShuffleMaskARGBToRGB24 = {
0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u};
};
// Shuffle table for converting ARGB to RAW. // Shuffle table for converting ARGB to RAW.
static uvec8 kShuffleMaskARGBToRAW = { static uvec8 kShuffleMaskARGBToRAW = {
2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u};
};
// Shuffle table for converting ARGBToRGB24 for I422ToRGB24. First 8 + next 4 // Shuffle table for converting ARGBToRGB24 for I422ToRGB24. First 8 + next 4
static uvec8 kShuffleMaskARGBToRGB24_0 = { static uvec8 kShuffleMaskARGBToRGB24_0 = {
0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u};
};
// YUY2 shuf 16 Y to 32 Y. // YUY2 shuf 16 Y to 32 Y.
static const lvec8 kShuffleYUY2Y = { static const lvec8 kShuffleYUY2Y = {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10,
0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14, 10, 12, 12, 14, 14, 0, 0, 2, 2, 4, 4,
0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14 6, 6, 8, 8, 10, 10, 12, 12, 14, 14};
};
// YUY2 shuf 8 UV to 16 UV. // YUY2 shuf 8 UV to 16 UV.
static const lvec8 kShuffleYUY2UV = { static const lvec8 kShuffleYUY2UV = {1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9,
1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15, 11, 13, 15, 13, 15, 1, 3, 1, 3, 5, 7,
1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15 5, 7, 9, 11, 9, 11, 13, 15, 13, 15};
};
// UYVY shuf 16 Y to 32 Y. // UYVY shuf 16 Y to 32 Y.
static const lvec8 kShuffleUYVYY = { static const lvec8 kShuffleUYVYY = {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11,
1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15, 11, 13, 13, 15, 15, 1, 1, 3, 3, 5, 5,
1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15 7, 7, 9, 9, 11, 11, 13, 13, 15, 15};
};
// UYVY shuf 8 UV to 16 UV. // UYVY shuf 8 UV to 16 UV.
static const lvec8 kShuffleUYVYUV = { static const lvec8 kShuffleUYVYUV = {0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8,
0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14, 10, 12, 14, 12, 14, 0, 2, 0, 2, 4, 6,
0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14 4, 6, 8, 10, 8, 10, 12, 14, 12, 14};
};
// NV21 shuf 8 VU to 16 UV. // NV21 shuf 8 VU to 16 UV.
static const lvec8 kShuffleNV21 = { static const lvec8 kShuffleNV21 = {
1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6, 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6, 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6,
}; };
#endif // HAS_RGB24TOARGBROW_SSSE3 #endif // HAS_RGB24TOARGBROW_SSSE3
...@@ -571,98 +537,99 @@ void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int width) { ...@@ -571,98 +537,99 @@ void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int width) {
); );
} }
void ARGBToRGB565DitherRow_SSE2(const uint8* src, uint8* dst, void ARGBToRGB565DitherRow_SSE2(const uint8* src,
const uint32 dither4, int width) { uint8* dst,
asm volatile ( const uint32 dither4,
"movd %3,%%xmm6 \n" int width) {
"punpcklbw %%xmm6,%%xmm6 \n" asm volatile(
"movdqa %%xmm6,%%xmm7 \n" "movd %3,%%xmm6 \n"
"punpcklwd %%xmm6,%%xmm6 \n" "punpcklbw %%xmm6,%%xmm6 \n"
"punpckhwd %%xmm7,%%xmm7 \n" "movdqa %%xmm6,%%xmm7 \n"
"pcmpeqb %%xmm3,%%xmm3 \n" "punpcklwd %%xmm6,%%xmm6 \n"
"psrld $0x1b,%%xmm3 \n" "punpckhwd %%xmm7,%%xmm7 \n"
"pcmpeqb %%xmm4,%%xmm4 \n" "pcmpeqb %%xmm3,%%xmm3 \n"
"psrld $0x1a,%%xmm4 \n" "psrld $0x1b,%%xmm3 \n"
"pslld $0x5,%%xmm4 \n" "pcmpeqb %%xmm4,%%xmm4 \n"
"pcmpeqb %%xmm5,%%xmm5 \n" "psrld $0x1a,%%xmm4 \n"
"pslld $0xb,%%xmm5 \n" "pslld $0x5,%%xmm4 \n"
"pcmpeqb %%xmm5,%%xmm5 \n"
LABELALIGN "pslld $0xb,%%xmm5 \n"
"1: \n"
"movdqu (%0),%%xmm0 \n" LABELALIGN
"paddusb %%xmm6,%%xmm0 \n" "1: \n"
"movdqa %%xmm0,%%xmm1 \n" "movdqu (%0),%%xmm0 \n"
"movdqa %%xmm0,%%xmm2 \n" "paddusb %%xmm6,%%xmm0 \n"
"pslld $0x8,%%xmm0 \n" "movdqa %%xmm0,%%xmm1 \n"
"psrld $0x3,%%xmm1 \n" "movdqa %%xmm0,%%xmm2 \n"
"psrld $0x5,%%xmm2 \n" "pslld $0x8,%%xmm0 \n"
"psrad $0x10,%%xmm0 \n" "psrld $0x3,%%xmm1 \n"
"pand %%xmm3,%%xmm1 \n" "psrld $0x5,%%xmm2 \n"
"pand %%xmm4,%%xmm2 \n" "psrad $0x10,%%xmm0 \n"
"pand %%xmm5,%%xmm0 \n" "pand %%xmm3,%%xmm1 \n"
"por %%xmm2,%%xmm1 \n" "pand %%xmm4,%%xmm2 \n"
"por %%xmm1,%%xmm0 \n" "pand %%xmm5,%%xmm0 \n"
"packssdw %%xmm0,%%xmm0 \n" "por %%xmm2,%%xmm1 \n"
"lea 0x10(%0),%0 \n" "por %%xmm1,%%xmm0 \n"
"movq %%xmm0,(%1) \n" "packssdw %%xmm0,%%xmm0 \n"
"lea 0x8(%1),%1 \n" "lea 0x10(%0),%0 \n"
"sub $0x4,%2 \n" "movq %%xmm0,(%1) \n"
"jg 1b \n" "lea 0x8(%1),%1 \n"
: "+r"(src), // %0 "sub $0x4,%2 \n"
"+r"(dst), // %1 "jg 1b \n"
"+r"(width) // %2 : "+r"(src), // %0
: "m"(dither4) // %3 "+r"(dst), // %1
: "memory", "cc", "+r"(width) // %2
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" : "m"(dither4) // %3
); : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
} }
#ifdef HAS_ARGBTORGB565DITHERROW_AVX2 #ifdef HAS_ARGBTORGB565DITHERROW_AVX2
void ARGBToRGB565DitherRow_AVX2(const uint8* src, uint8* dst, void ARGBToRGB565DitherRow_AVX2(const uint8* src,
const uint32 dither4, int width) { uint8* dst,
asm volatile ( const uint32 dither4,
"vbroadcastss %3,%%xmm6 \n" int width) {
"vpunpcklbw %%xmm6,%%xmm6,%%xmm6 \n" asm volatile(
"vpermq $0xd8,%%ymm6,%%ymm6 \n" "vbroadcastss %3,%%xmm6 \n"
"vpunpcklwd %%ymm6,%%ymm6,%%ymm6 \n" "vpunpcklbw %%xmm6,%%xmm6,%%xmm6 \n"
"vpcmpeqb %%ymm3,%%ymm3,%%ymm3 \n" "vpermq $0xd8,%%ymm6,%%ymm6 \n"
"vpsrld $0x1b,%%ymm3,%%ymm3 \n" "vpunpcklwd %%ymm6,%%ymm6,%%ymm6 \n"
"vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n" "vpcmpeqb %%ymm3,%%ymm3,%%ymm3 \n"
"vpsrld $0x1a,%%ymm4,%%ymm4 \n" "vpsrld $0x1b,%%ymm3,%%ymm3 \n"
"vpslld $0x5,%%ymm4,%%ymm4 \n" "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n"
"vpslld $0xb,%%ymm3,%%ymm5 \n" "vpsrld $0x1a,%%ymm4,%%ymm4 \n"
"vpslld $0x5,%%ymm4,%%ymm4 \n"
LABELALIGN "vpslld $0xb,%%ymm3,%%ymm5 \n"
"1: \n"
"vmovdqu (%0),%%ymm0 \n" LABELALIGN
"vpaddusb %%ymm6,%%ymm0,%%ymm0 \n" "1: \n"
"vpsrld $0x5,%%ymm0,%%ymm2 \n" "vmovdqu (%0),%%ymm0 \n"
"vpsrld $0x3,%%ymm0,%%ymm1 \n" "vpaddusb %%ymm6,%%ymm0,%%ymm0 \n"
"vpsrld $0x8,%%ymm0,%%ymm0 \n" "vpsrld $0x5,%%ymm0,%%ymm2 \n"
"vpand %%ymm4,%%ymm2,%%ymm2 \n" "vpsrld $0x3,%%ymm0,%%ymm1 \n"
"vpand %%ymm3,%%ymm1,%%ymm1 \n" "vpsrld $0x8,%%ymm0,%%ymm0 \n"
"vpand %%ymm5,%%ymm0,%%ymm0 \n" "vpand %%ymm4,%%ymm2,%%ymm2 \n"
"vpor %%ymm2,%%ymm1,%%ymm1 \n" "vpand %%ymm3,%%ymm1,%%ymm1 \n"
"vpor %%ymm1,%%ymm0,%%ymm0 \n" "vpand %%ymm5,%%ymm0,%%ymm0 \n"
"vpackusdw %%ymm0,%%ymm0,%%ymm0 \n" "vpor %%ymm2,%%ymm1,%%ymm1 \n"
"vpermq $0xd8,%%ymm0,%%ymm0 \n" "vpor %%ymm1,%%ymm0,%%ymm0 \n"
"lea 0x20(%0),%0 \n" "vpackusdw %%ymm0,%%ymm0,%%ymm0 \n"
"vmovdqu %%xmm0,(%1) \n" "vpermq $0xd8,%%ymm0,%%ymm0 \n"
"lea 0x10(%1),%1 \n" "lea 0x20(%0),%0 \n"
"sub $0x8,%2 \n" "vmovdqu %%xmm0,(%1) \n"
"jg 1b \n" "lea 0x10(%1),%1 \n"
"vzeroupper \n" "sub $0x8,%2 \n"
: "+r"(src), // %0 "jg 1b \n"
"+r"(dst), // %1 "vzeroupper \n"
"+r"(width) // %2 : "+r"(src), // %0
: "m"(dither4) // %3 "+r"(dst), // %1
: "memory", "cc", "+r"(width) // %2
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" : "m"(dither4) // %3
); : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
} }
#endif // HAS_ARGBTORGB565DITHERROW_AVX2 #endif // HAS_ARGBTORGB565DITHERROW_AVX2
void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int width) { void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm4,%%xmm4 \n" "pcmpeqb %%xmm4,%%xmm4 \n"
...@@ -811,9 +778,7 @@ void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { ...@@ -811,9 +778,7 @@ void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) {
#ifdef HAS_ARGBTOYROW_AVX2 #ifdef HAS_ARGBTOYROW_AVX2
// vpermd for vphaddw + vpackuswb vpermd. // vpermd for vphaddw + vpackuswb vpermd.
static const lvec32 kPermdARGBToY_AVX = { static const lvec32 kPermdARGBToY_AVX = {0, 4, 1, 5, 2, 6, 3, 7};
0, 4, 1, 5, 2, 6, 3, 7
};
// Convert 32 ARGB pixels (128 bytes) to 32 Y values. // Convert 32 ARGB pixels (128 bytes) to 32 Y values.
void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) { void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) {
...@@ -898,8 +863,11 @@ void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) { ...@@ -898,8 +863,11 @@ void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) {
#endif // HAS_ARGBTOYJROW_AVX2 #endif // HAS_ARGBTOYJROW_AVX2
#ifdef HAS_ARGBTOUVROW_SSSE3 #ifdef HAS_ARGBTOUVROW_SSSE3
void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, void ARGBToUVRow_SSSE3(const uint8* src_argb0,
uint8* dst_u, uint8* dst_v, int width) { int src_stride_argb,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"movdqa %5,%%xmm3 \n" "movdqa %5,%%xmm3 \n"
"movdqa %6,%%xmm4 \n" "movdqa %6,%%xmm4 \n"
...@@ -963,11 +931,13 @@ void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, ...@@ -963,11 +931,13 @@ void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
#ifdef HAS_ARGBTOUVROW_AVX2 #ifdef HAS_ARGBTOUVROW_AVX2
// vpshufb for vphaddw + vpackuswb packed to shorts. // vpshufb for vphaddw + vpackuswb packed to shorts.
static const lvec8 kShufARGBToUV_AVX = { static const lvec8 kShufARGBToUV_AVX = {
0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
}; void ARGBToUVRow_AVX2(const uint8* src_argb0,
void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width) { uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"vbroadcastf128 %5,%%ymm5 \n" "vbroadcastf128 %5,%%ymm5 \n"
"vbroadcastf128 %6,%%ymm6 \n" "vbroadcastf128 %6,%%ymm6 \n"
...@@ -1026,8 +996,11 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb, ...@@ -1026,8 +996,11 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
#endif // HAS_ARGBTOUVROW_AVX2 #endif // HAS_ARGBTOUVROW_AVX2
#ifdef HAS_ARGBTOUVJROW_AVX2 #ifdef HAS_ARGBTOUVJROW_AVX2
void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb, void ARGBToUVJRow_AVX2(const uint8* src_argb0,
uint8* dst_u, uint8* dst_v, int width) { int src_stride_argb,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"vbroadcastf128 %5,%%ymm5 \n" "vbroadcastf128 %5,%%ymm5 \n"
"vbroadcastf128 %6,%%ymm6 \n" "vbroadcastf128 %6,%%ymm6 \n"
...@@ -1087,8 +1060,11 @@ void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb, ...@@ -1087,8 +1060,11 @@ void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb,
#endif // HAS_ARGBTOUVJROW_AVX2 #endif // HAS_ARGBTOUVJROW_AVX2
#ifdef HAS_ARGBTOUVJROW_SSSE3 #ifdef HAS_ARGBTOUVJROW_SSSE3
void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb, void ARGBToUVJRow_SSSE3(const uint8* src_argb0,
uint8* dst_u, uint8* dst_v, int width) { int src_stride_argb,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"movdqa %5,%%xmm3 \n" "movdqa %5,%%xmm3 \n"
"movdqa %6,%%xmm4 \n" "movdqa %6,%%xmm4 \n"
...@@ -1151,7 +1127,9 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb, ...@@ -1151,7 +1127,9 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
#endif // HAS_ARGBTOUVJROW_SSSE3 #endif // HAS_ARGBTOUVJROW_SSSE3
#ifdef HAS_ARGBTOUV444ROW_SSSE3 #ifdef HAS_ARGBTOUV444ROW_SSSE3
void ARGBToUV444Row_SSSE3(const uint8* src_argb, uint8* dst_u, uint8* dst_v, void ARGBToUV444Row_SSSE3(const uint8* src_argb,
uint8* dst_u,
uint8* dst_v,
int width) { int width) {
asm volatile ( asm volatile (
"movdqa %4,%%xmm3 \n" "movdqa %4,%%xmm3 \n"
...@@ -1241,8 +1219,11 @@ void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int width) { ...@@ -1241,8 +1219,11 @@ void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int width) {
); );
} }
void BGRAToUVRow_SSSE3(const uint8* src_bgra0, int src_stride_bgra, void BGRAToUVRow_SSSE3(const uint8* src_bgra0,
uint8* dst_u, uint8* dst_v, int width) { int src_stride_bgra,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"movdqa %5,%%xmm3 \n" "movdqa %5,%%xmm3 \n"
"movdqa %6,%%xmm4 \n" "movdqa %6,%%xmm4 \n"
...@@ -1370,8 +1351,11 @@ void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int width) { ...@@ -1370,8 +1351,11 @@ void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int width) {
); );
} }
void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr, void ABGRToUVRow_SSSE3(const uint8* src_abgr0,
uint8* dst_u, uint8* dst_v, int width) { int src_stride_abgr,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"movdqa %5,%%xmm3 \n" "movdqa %5,%%xmm3 \n"
"movdqa %6,%%xmm4 \n" "movdqa %6,%%xmm4 \n"
...@@ -1431,8 +1415,11 @@ void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr, ...@@ -1431,8 +1415,11 @@ void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr,
); );
} }
void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, void RGBAToUVRow_SSSE3(const uint8* src_rgba0,
uint8* dst_u, uint8* dst_v, int width) { int src_stride_rgba,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"movdqa %5,%%xmm3 \n" "movdqa %5,%%xmm3 \n"
"movdqa %6,%%xmm4 \n" "movdqa %6,%%xmm4 \n"
...@@ -1495,8 +1482,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1495,8 +1482,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
#if defined(HAS_I422TOARGBROW_SSSE3) || defined(HAS_I422TOARGBROW_AVX2) #if defined(HAS_I422TOARGBROW_SSSE3) || defined(HAS_I422TOARGBROW_AVX2)
// Read 8 UV from 444 // Read 8 UV from 444
#define READYUV444 \ #define READYUV444 \
"movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ "movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(movq, 0x00, [u_buf], [v_buf], 1, xmm1) \ MEMOPREG(movq, 0x00, [u_buf], [v_buf], 1, xmm1) \
"lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
"punpcklbw %%xmm1,%%xmm0 \n" \ "punpcklbw %%xmm1,%%xmm0 \n" \
...@@ -1505,8 +1492,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1505,8 +1492,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
// Read 4 UV from 422, upsample to 8 UV // Read 4 UV from 422, upsample to 8 UV
#define READYUV422 \ #define READYUV422 \
"movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \ "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \ MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \
"lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \ "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \
"punpcklbw %%xmm1,%%xmm0 \n" \ "punpcklbw %%xmm1,%%xmm0 \n" \
...@@ -1516,8 +1503,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1516,8 +1503,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
// Read 4 UV from 422, upsample to 8 UV. With 8 Alpha. // Read 4 UV from 422, upsample to 8 UV. With 8 Alpha.
#define READYUVA422 \ #define READYUVA422 \
"movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \ "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \ MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \
"lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \ "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \
"punpcklbw %%xmm1,%%xmm0 \n" \ "punpcklbw %%xmm1,%%xmm0 \n" \
...@@ -1529,8 +1516,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1529,8 +1516,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"lea " MEMLEA(0x8, [a_buf]) ",%[a_buf] \n" "lea " MEMLEA(0x8, [a_buf]) ",%[a_buf] \n"
// Read 4 UV from NV12, upsample to 8 UV // Read 4 UV from NV12, upsample to 8 UV
#define READNV12 \ #define READNV12 \
"movq " MEMACCESS([uv_buf]) ",%%xmm0 \n" \ "movq " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
"lea " MEMLEA(0x8, [uv_buf]) ",%[uv_buf] \n" \ "lea " MEMLEA(0x8, [uv_buf]) ",%[uv_buf] \n" \
"punpcklwd %%xmm0,%%xmm0 \n" \ "punpcklwd %%xmm0,%%xmm0 \n" \
"movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \
...@@ -1538,8 +1525,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1538,8 +1525,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
// Read 4 VU from NV21, upsample to 8 UV // Read 4 VU from NV21, upsample to 8 UV
#define READNV21 \ #define READNV21 \
"movq " MEMACCESS([vu_buf]) ",%%xmm0 \n" \ "movq " MEMACCESS([vu_buf]) ",%%xmm0 \n" \
"lea " MEMLEA(0x8, [vu_buf]) ",%[vu_buf] \n" \ "lea " MEMLEA(0x8, [vu_buf]) ",%[vu_buf] \n" \
"pshufb %[kShuffleNV21], %%xmm0 \n" \ "pshufb %[kShuffleNV21], %%xmm0 \n" \
"movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \
...@@ -1547,24 +1534,24 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1547,24 +1534,24 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n"
// Read 4 YUY2 with 8 Y and update 4 UV to 8 UV. // Read 4 YUY2 with 8 Y and update 4 UV to 8 UV.
#define READYUY2 \ #define READYUY2 \
"movdqu " MEMACCESS([yuy2_buf]) ",%%xmm4 \n" \ "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm4 \n" \
"pshufb %[kShuffleYUY2Y], %%xmm4 \n" \ "pshufb %[kShuffleYUY2Y], %%xmm4 \n" \
"movdqu " MEMACCESS([yuy2_buf]) ",%%xmm0 \n" \ "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm0 \n" \
"pshufb %[kShuffleYUY2UV], %%xmm0 \n" \ "pshufb %[kShuffleYUY2UV], %%xmm0 \n" \
"lea " MEMLEA(0x10, [yuy2_buf]) ",%[yuy2_buf] \n" "lea " MEMLEA(0x10, [yuy2_buf]) ",%[yuy2_buf] \n"
// Read 4 UYVY with 8 Y and update 4 UV to 8 UV. // Read 4 UYVY with 8 Y and update 4 UV to 8 UV.
#define READUYVY \ #define READUYVY \
"movdqu " MEMACCESS([uyvy_buf]) ",%%xmm4 \n" \ "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm4 \n" \
"pshufb %[kShuffleUYVYY], %%xmm4 \n" \ "pshufb %[kShuffleUYVYY], %%xmm4 \n" \
"movdqu " MEMACCESS([uyvy_buf]) ",%%xmm0 \n" \ "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm0 \n" \
"pshufb %[kShuffleUYVYUV], %%xmm0 \n" \ "pshufb %[kShuffleUYVYUV], %%xmm0 \n" \
"lea " MEMLEA(0x10, [uyvy_buf]) ",%[uyvy_buf] \n" "lea " MEMLEA(0x10, [uyvy_buf]) ",%[uyvy_buf] \n"
#if defined(__x86_64__) #if defined(__x86_64__)
#define YUVTORGB_SETUP(yuvconstants) \ #define YUVTORGB_SETUP(yuvconstants) \
"movdqa " MEMACCESS([yuvconstants]) ",%%xmm8 \n" \ "movdqa " MEMACCESS([yuvconstants]) ",%%xmm8 \n" \
"movdqa " MEMACCESS2(32, [yuvconstants]) ",%%xmm9 \n" \ "movdqa " MEMACCESS2(32, [yuvconstants]) ",%%xmm9 \n" \
"movdqa " MEMACCESS2(64, [yuvconstants]) ",%%xmm10 \n" \ "movdqa " MEMACCESS2(64, [yuvconstants]) ",%%xmm10 \n" \
"movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm11 \n" \ "movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm11 \n" \
...@@ -1572,37 +1559,37 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1572,37 +1559,37 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"movdqa " MEMACCESS2(160, [yuvconstants]) ",%%xmm13 \n" \ "movdqa " MEMACCESS2(160, [yuvconstants]) ",%%xmm13 \n" \
"movdqa " MEMACCESS2(192, [yuvconstants]) ",%%xmm14 \n" "movdqa " MEMACCESS2(192, [yuvconstants]) ",%%xmm14 \n"
// Convert 8 pixels: 8 UV and 8 Y // Convert 8 pixels: 8 UV and 8 Y
#define YUVTORGB(yuvconstants) \ #define YUVTORGB(yuvconstants) \
"movdqa %%xmm0,%%xmm1 \n" \ "movdqa %%xmm0,%%xmm1 \n" \
"movdqa %%xmm0,%%xmm2 \n" \ "movdqa %%xmm0,%%xmm2 \n" \
"movdqa %%xmm0,%%xmm3 \n" \ "movdqa %%xmm0,%%xmm3 \n" \
"movdqa %%xmm11,%%xmm0 \n" \ "movdqa %%xmm11,%%xmm0 \n" \
"pmaddubsw %%xmm8,%%xmm1 \n" \ "pmaddubsw %%xmm8,%%xmm1 \n" \
"psubw %%xmm1,%%xmm0 \n" \ "psubw %%xmm1,%%xmm0 \n" \
"movdqa %%xmm12,%%xmm1 \n" \ "movdqa %%xmm12,%%xmm1 \n" \
"pmaddubsw %%xmm9,%%xmm2 \n" \ "pmaddubsw %%xmm9,%%xmm2 \n" \
"psubw %%xmm2,%%xmm1 \n" \ "psubw %%xmm2,%%xmm1 \n" \
"movdqa %%xmm13,%%xmm2 \n" \ "movdqa %%xmm13,%%xmm2 \n" \
"pmaddubsw %%xmm10,%%xmm3 \n" \ "pmaddubsw %%xmm10,%%xmm3 \n" \
"psubw %%xmm3,%%xmm2 \n" \ "psubw %%xmm3,%%xmm2 \n" \
"pmulhuw %%xmm14,%%xmm4 \n" \ "pmulhuw %%xmm14,%%xmm4 \n" \
"paddsw %%xmm4,%%xmm0 \n" \ "paddsw %%xmm4,%%xmm0 \n" \
"paddsw %%xmm4,%%xmm1 \n" \ "paddsw %%xmm4,%%xmm1 \n" \
"paddsw %%xmm4,%%xmm2 \n" \ "paddsw %%xmm4,%%xmm2 \n" \
"psraw $0x6,%%xmm0 \n" \ "psraw $0x6,%%xmm0 \n" \
"psraw $0x6,%%xmm1 \n" \ "psraw $0x6,%%xmm1 \n" \
"psraw $0x6,%%xmm2 \n" \ "psraw $0x6,%%xmm2 \n" \
"packuswb %%xmm0,%%xmm0 \n" \ "packuswb %%xmm0,%%xmm0 \n" \
"packuswb %%xmm1,%%xmm1 \n" \ "packuswb %%xmm1,%%xmm1 \n" \
"packuswb %%xmm2,%%xmm2 \n" "packuswb %%xmm2,%%xmm2 \n"
#define YUVTORGB_REGS \ #define YUVTORGB_REGS \
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
#else #else
#define YUVTORGB_SETUP(yuvconstants) #define YUVTORGB_SETUP(yuvconstants)
// Convert 8 pixels: 8 UV and 8 Y // Convert 8 pixels: 8 UV and 8 Y
#define YUVTORGB(yuvconstants) \ #define YUVTORGB(yuvconstants) \
"movdqa %%xmm0,%%xmm1 \n" \ "movdqa %%xmm0,%%xmm1 \n" \
"movdqa %%xmm0,%%xmm2 \n" \ "movdqa %%xmm0,%%xmm2 \n" \
"movdqa %%xmm0,%%xmm3 \n" \ "movdqa %%xmm0,%%xmm3 \n" \
"movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm0 \n" \ "movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm0 \n" \
...@@ -1628,8 +1615,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1628,8 +1615,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
#endif #endif
// Store 8 ARGB values. // Store 8 ARGB values.
#define STOREARGB \ #define STOREARGB \
"punpcklbw %%xmm1,%%xmm0 \n" \ "punpcklbw %%xmm1,%%xmm0 \n" \
"punpcklbw %%xmm5,%%xmm2 \n" \ "punpcklbw %%xmm5,%%xmm2 \n" \
"movdqa %%xmm0,%%xmm1 \n" \ "movdqa %%xmm0,%%xmm1 \n" \
"punpcklwd %%xmm2,%%xmm0 \n" \ "punpcklwd %%xmm2,%%xmm0 \n" \
...@@ -1639,8 +1626,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, ...@@ -1639,8 +1626,8 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
"lea " MEMLEA(0x20, [dst_argb]) ", %[dst_argb] \n" "lea " MEMLEA(0x20, [dst_argb]) ", %[dst_argb] \n"
// Store 8 RGBA values. // Store 8 RGBA values.
#define STORERGBA \ #define STORERGBA \
"pcmpeqb %%xmm5,%%xmm5 \n" \ "pcmpeqb %%xmm5,%%xmm5 \n" \
"punpcklbw %%xmm2,%%xmm1 \n" \ "punpcklbw %%xmm2,%%xmm1 \n" \
"punpcklbw %%xmm0,%%xmm5 \n" \ "punpcklbw %%xmm0,%%xmm5 \n" \
"movdqa %%xmm5,%%xmm0 \n" \ "movdqa %%xmm5,%%xmm0 \n" \
...@@ -1759,6 +1746,7 @@ void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf, ...@@ -1759,6 +1746,7 @@ void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP(yuvconstants) YUVTORGB_SETUP(yuvconstants)
"sub %[u_buf],%[v_buf] \n" "sub %[u_buf],%[v_buf] \n"
...@@ -1783,6 +1771,7 @@ void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf, ...@@ -1783,6 +1771,7 @@ void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf,
: "memory", "cc", NACL_R14 YUVTORGB_REGS : "memory", "cc", NACL_R14 YUVTORGB_REGS
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
#endif // HAS_I422ALPHATOARGBROW_SSSE3 #endif // HAS_I422ALPHATOARGBROW_SSSE3
...@@ -1791,6 +1780,7 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf, ...@@ -1791,6 +1780,7 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP(yuvconstants) YUVTORGB_SETUP(yuvconstants)
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -1809,6 +1799,7 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf, ...@@ -1809,6 +1799,7 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf,
: "memory", "cc", YUVTORGB_REGS // Does not use r14. : "memory", "cc", YUVTORGB_REGS // Does not use r14.
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf, void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
...@@ -1816,6 +1807,7 @@ void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf, ...@@ -1816,6 +1807,7 @@ void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP(yuvconstants) YUVTORGB_SETUP(yuvconstants)
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -1835,12 +1827,14 @@ void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf, ...@@ -1835,12 +1827,14 @@ void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
: "memory", "cc", YUVTORGB_REGS // Does not use r14. : "memory", "cc", YUVTORGB_REGS // Does not use r14.
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf, void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP(yuvconstants) YUVTORGB_SETUP(yuvconstants)
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -1860,12 +1854,14 @@ void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf, ...@@ -1860,12 +1854,14 @@ void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf,
: "memory", "cc", YUVTORGB_REGS // Does not use r14. : "memory", "cc", YUVTORGB_REGS // Does not use r14.
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf, void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP(yuvconstants) YUVTORGB_SETUP(yuvconstants)
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -1885,6 +1881,7 @@ void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf, ...@@ -1885,6 +1881,7 @@ void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf,
: "memory", "cc", YUVTORGB_REGS // Does not use r14. : "memory", "cc", YUVTORGB_REGS // Does not use r14.
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
...@@ -1918,8 +1915,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -1918,8 +1915,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
#endif // HAS_I422TOARGBROW_SSSE3 #endif // HAS_I422TOARGBROW_SSSE3
// Read 16 UV from 444 // Read 16 UV from 444
#define READYUV444_AVX2 \ #define READYUV444_AVX2 \
"vmovdqu " MEMACCESS([u_buf]) ",%%xmm0 \n" \ "vmovdqu " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(vmovdqu, 0x00, [u_buf], [v_buf], 1, xmm1) \ MEMOPREG(vmovdqu, 0x00, [u_buf], [v_buf], 1, xmm1) \
"lea " MEMLEA(0x10, [u_buf]) ",%[u_buf] \n" \ "lea " MEMLEA(0x10, [u_buf]) ",%[u_buf] \n" \
"vpermq $0xd8,%%ymm0,%%ymm0 \n" \ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
...@@ -1931,8 +1928,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -1931,8 +1928,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
// Read 8 UV from 422, upsample to 16 UV. // Read 8 UV from 422, upsample to 16 UV.
#define READYUV422_AVX2 \ #define READYUV422_AVX2 \
"vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \ MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \
"lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
"vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
...@@ -1944,8 +1941,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -1944,8 +1941,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
// Read 8 UV from 422, upsample to 16 UV. With 16 Alpha. // Read 8 UV from 422, upsample to 16 UV. With 16 Alpha.
#define READYUVA422_AVX2 \ #define READYUVA422_AVX2 \
"vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \ MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \
"lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
"vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
...@@ -1960,8 +1957,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -1960,8 +1957,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"lea " MEMLEA(0x10, [a_buf]) ",%[a_buf] \n" "lea " MEMLEA(0x10, [a_buf]) ",%[a_buf] \n"
// Read 8 UV from NV12, upsample to 16 UV. // Read 8 UV from NV12, upsample to 16 UV.
#define READNV12_AVX2 \ #define READNV12_AVX2 \
"vmovdqu " MEMACCESS([uv_buf]) ",%%xmm0 \n" \ "vmovdqu " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
"lea " MEMLEA(0x10, [uv_buf]) ",%[uv_buf] \n" \ "lea " MEMLEA(0x10, [uv_buf]) ",%[uv_buf] \n" \
"vpermq $0xd8,%%ymm0,%%ymm0 \n" \ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
"vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \ "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \
...@@ -1971,8 +1968,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -1971,8 +1968,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
// Read 8 VU from NV21, upsample to 16 UV. // Read 8 VU from NV21, upsample to 16 UV.
#define READNV21_AVX2 \ #define READNV21_AVX2 \
"vmovdqu " MEMACCESS([vu_buf]) ",%%xmm0 \n" \ "vmovdqu " MEMACCESS([vu_buf]) ",%%xmm0 \n" \
"lea " MEMLEA(0x10, [vu_buf]) ",%[vu_buf] \n" \ "lea " MEMLEA(0x10, [vu_buf]) ",%[vu_buf] \n" \
"vpermq $0xd8,%%ymm0,%%ymm0 \n" \ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
"vpshufb %[kShuffleNV21], %%ymm0, %%ymm0 \n" \ "vpshufb %[kShuffleNV21], %%ymm0, %%ymm0 \n" \
...@@ -1982,53 +1979,53 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -1982,53 +1979,53 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
"lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n"
// Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV. // Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV.
#define READYUY2_AVX2 \ #define READYUY2_AVX2 \
"vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm4 \n" \ "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm4 \n" \
"vpshufb %[kShuffleYUY2Y], %%ymm4, %%ymm4 \n" \ "vpshufb %[kShuffleYUY2Y], %%ymm4, %%ymm4 \n" \
"vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm0 \n" \ "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm0 \n" \
"vpshufb %[kShuffleYUY2UV], %%ymm0, %%ymm0 \n" \ "vpshufb %[kShuffleYUY2UV], %%ymm0, %%ymm0 \n" \
"lea " MEMLEA(0x20, [yuy2_buf]) ",%[yuy2_buf] \n" "lea " MEMLEA(0x20, [yuy2_buf]) ",%[yuy2_buf] \n"
// Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV. // Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV.
#define READUYVY_AVX2 \ #define READUYVY_AVX2 \
"vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm4 \n" \ "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm4 \n" \
"vpshufb %[kShuffleUYVYY], %%ymm4, %%ymm4 \n" \ "vpshufb %[kShuffleUYVYY], %%ymm4, %%ymm4 \n" \
"vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm0 \n" \ "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm0 \n" \
"vpshufb %[kShuffleUYVYUV], %%ymm0, %%ymm0 \n" \ "vpshufb %[kShuffleUYVYUV], %%ymm0, %%ymm0 \n" \
"lea " MEMLEA(0x20, [uyvy_buf]) ",%[uyvy_buf] \n" "lea " MEMLEA(0x20, [uyvy_buf]) ",%[uyvy_buf] \n"
#if defined(__x86_64__) #if defined(__x86_64__)
#define YUVTORGB_SETUP_AVX2(yuvconstants) \ #define YUVTORGB_SETUP_AVX2(yuvconstants) \
"vmovdqa " MEMACCESS([yuvconstants]) ",%%ymm8 \n" \ "vmovdqa " MEMACCESS([yuvconstants]) ",%%ymm8 \n" \
"vmovdqa " MEMACCESS2(32, [yuvconstants]) ",%%ymm9 \n" \ "vmovdqa " MEMACCESS2(32, [yuvconstants]) ",%%ymm9 \n" \
"vmovdqa " MEMACCESS2(64, [yuvconstants]) ",%%ymm10 \n" \ "vmovdqa " MEMACCESS2(64, [yuvconstants]) ",%%ymm10 \n" \
"vmovdqa " MEMACCESS2(96, [yuvconstants]) ",%%ymm11 \n" \ "vmovdqa " MEMACCESS2(96, [yuvconstants]) ",%%ymm11 \n" \
"vmovdqa " MEMACCESS2(128, [yuvconstants]) ",%%ymm12 \n" \ "vmovdqa " MEMACCESS2(128, [yuvconstants]) ",%%ymm12 \n" \
"vmovdqa " MEMACCESS2(160, [yuvconstants]) ",%%ymm13 \n" \ "vmovdqa " MEMACCESS2(160, [yuvconstants]) ",%%ymm13 \n" \
"vmovdqa " MEMACCESS2(192, [yuvconstants]) ",%%ymm14 \n" "vmovdqa " MEMACCESS2(192, [yuvconstants]) ",%%ymm14 \n"
#define YUVTORGB_AVX2(yuvconstants) \ #define YUVTORGB_AVX2(yuvconstants) \
"vpmaddubsw %%ymm10,%%ymm0,%%ymm2 \n" \ "vpmaddubsw %%ymm10,%%ymm0,%%ymm2 \n" \
"vpmaddubsw %%ymm9,%%ymm0,%%ymm1 \n" \ "vpmaddubsw %%ymm9,%%ymm0,%%ymm1 \n" \
"vpmaddubsw %%ymm8,%%ymm0,%%ymm0 \n" \ "vpmaddubsw %%ymm8,%%ymm0,%%ymm0 \n" \
"vpsubw %%ymm2,%%ymm13,%%ymm2 \n" \ "vpsubw %%ymm2,%%ymm13,%%ymm2 \n" \
"vpsubw %%ymm1,%%ymm12,%%ymm1 \n" \ "vpsubw %%ymm1,%%ymm12,%%ymm1 \n" \
"vpsubw %%ymm0,%%ymm11,%%ymm0 \n" \ "vpsubw %%ymm0,%%ymm11,%%ymm0 \n" \
"vpmulhuw %%ymm14,%%ymm4,%%ymm4 \n" \ "vpmulhuw %%ymm14,%%ymm4,%%ymm4 \n" \
"vpaddsw %%ymm4,%%ymm0,%%ymm0 \n" \ "vpaddsw %%ymm4,%%ymm0,%%ymm0 \n" \
"vpaddsw %%ymm4,%%ymm1,%%ymm1 \n" \ "vpaddsw %%ymm4,%%ymm1,%%ymm1 \n" \
"vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" \ "vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" \
"vpsraw $0x6,%%ymm0,%%ymm0 \n" \ "vpsraw $0x6,%%ymm0,%%ymm0 \n" \
"vpsraw $0x6,%%ymm1,%%ymm1 \n" \ "vpsraw $0x6,%%ymm1,%%ymm1 \n" \
"vpsraw $0x6,%%ymm2,%%ymm2 \n" \ "vpsraw $0x6,%%ymm2,%%ymm2 \n" \
"vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \
"vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \ "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \
"vpackuswb %%ymm2,%%ymm2,%%ymm2 \n" "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n"
#define YUVTORGB_REGS_AVX2 \ #define YUVTORGB_REGS_AVX2 \
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
#else // Convert 16 pixels: 16 UV and 16 Y. #else // Convert 16 pixels: 16 UV and 16 Y.
#define YUVTORGB_SETUP_AVX2(yuvconstants) #define YUVTORGB_SETUP_AVX2(yuvconstants)
#define YUVTORGB_AVX2(yuvconstants) \ #define YUVTORGB_AVX2(yuvconstants) \
"vpmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%ymm0,%%ymm2 \n" \ "vpmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%ymm0,%%ymm2 \n" \
"vpmaddubsw " MEMACCESS2(32, [yuvconstants]) ",%%ymm0,%%ymm1 \n" \ "vpmaddubsw " MEMACCESS2(32, [yuvconstants]) ",%%ymm0,%%ymm1 \n" \
"vpmaddubsw " MEMACCESS([yuvconstants]) ",%%ymm0,%%ymm0 \n" \ "vpmaddubsw " MEMACCESS([yuvconstants]) ",%%ymm0,%%ymm0 \n" \
"vmovdqu " MEMACCESS2(160, [yuvconstants]) ",%%ymm3 \n" \ "vmovdqu " MEMACCESS2(160, [yuvconstants]) ",%%ymm3 \n" \
...@@ -2051,8 +2048,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, ...@@ -2051,8 +2048,8 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
#endif #endif
// Store 16 ARGB values. // Store 16 ARGB values.
#define STOREARGB_AVX2 \ #define STOREARGB_AVX2 \
"vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
"vpermq $0xd8,%%ymm0,%%ymm0 \n" \ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
"vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" \ "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" \
"vpermq $0xd8,%%ymm2,%%ymm2 \n" \ "vpermq $0xd8,%%ymm2,%%ymm2 \n" \
...@@ -2132,12 +2129,13 @@ void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf, ...@@ -2132,12 +2129,13 @@ void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf,
// 16 pixels // 16 pixels
// 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB. // 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB.
void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf, void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf,
const uint8* u_buf, const uint8* u_buf,
const uint8* v_buf, const uint8* v_buf,
const uint8* a_buf, const uint8* a_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants) YUVTORGB_SETUP_AVX2(yuvconstants)
"sub %[u_buf],%[v_buf] \n" "sub %[u_buf],%[v_buf] \n"
...@@ -2163,6 +2161,7 @@ void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf, ...@@ -2163,6 +2161,7 @@ void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf,
: "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2 : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
#endif // HAS_I422ALPHATOARGBROW_AVX2 #endif // HAS_I422ALPHATOARGBROW_AVX2
...@@ -2217,6 +2216,7 @@ void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf, ...@@ -2217,6 +2216,7 @@ void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants) YUVTORGB_SETUP_AVX2(yuvconstants)
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
...@@ -2236,6 +2236,7 @@ void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf, ...@@ -2236,6 +2236,7 @@ void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf,
: "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
"xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
#endif // HAS_NV12TOARGBROW_AVX2 #endif // HAS_NV12TOARGBROW_AVX2
...@@ -2247,6 +2248,7 @@ void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf, ...@@ -2247,6 +2248,7 @@ void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants) YUVTORGB_SETUP_AVX2(yuvconstants)
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
...@@ -2267,6 +2269,7 @@ void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf, ...@@ -2267,6 +2269,7 @@ void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf,
: "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
"xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
#endif // HAS_NV21TOARGBROW_AVX2 #endif // HAS_NV21TOARGBROW_AVX2
...@@ -2277,6 +2280,7 @@ void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf, ...@@ -2277,6 +2280,7 @@ void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants) YUVTORGB_SETUP_AVX2(yuvconstants)
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
...@@ -2297,6 +2301,7 @@ void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf, ...@@ -2297,6 +2301,7 @@ void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf,
: "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
"xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
#endif // HAS_YUY2TOARGBROW_AVX2 #endif // HAS_YUY2TOARGBROW_AVX2
...@@ -2307,6 +2312,7 @@ void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf, ...@@ -2307,6 +2312,7 @@ void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf,
uint8* dst_argb, uint8* dst_argb,
const struct YuvConstants* yuvconstants, const struct YuvConstants* yuvconstants,
int width) { int width) {
// clang-format off
asm volatile ( asm volatile (
YUVTORGB_SETUP_AVX2(yuvconstants) YUVTORGB_SETUP_AVX2(yuvconstants)
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
...@@ -2327,6 +2333,7 @@ void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf, ...@@ -2327,6 +2333,7 @@ void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf,
: "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14.
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
); );
// clang-format on
} }
#endif // HAS_UYVYTOARGBROW_AVX2 #endif // HAS_UYVYTOARGBROW_AVX2
...@@ -2424,9 +2431,8 @@ void I400ToARGBRow_AVX2(const uint8* y_buf, uint8* dst_argb, int width) { ...@@ -2424,9 +2431,8 @@ void I400ToARGBRow_AVX2(const uint8* y_buf, uint8* dst_argb, int width) {
#ifdef HAS_MIRRORROW_SSSE3 #ifdef HAS_MIRRORROW_SSSE3
// Shuffle table for reversing the bytes. // Shuffle table for reversing the bytes.
static uvec8 kShuffleMirror = { static uvec8 kShuffleMirror = {15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u,
15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u};
};
void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) { void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
intptr_t temp_width = (intptr_t)(width); intptr_t temp_width = (intptr_t)(width);
...@@ -2477,10 +2483,11 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) { ...@@ -2477,10 +2483,11 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
#ifdef HAS_MIRRORUVROW_SSSE3 #ifdef HAS_MIRRORUVROW_SSSE3
// Shuffle table for reversing the bytes of UV channels. // Shuffle table for reversing the bytes of UV channels.
static uvec8 kShuffleMirrorUV = { static uvec8 kShuffleMirrorUV = {14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u,
14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u};
}; void MirrorUVRow_SSSE3(const uint8* src,
void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v, uint8* dst_u,
uint8* dst_v,
int width) { int width) {
intptr_t temp_width = (intptr_t)(width); intptr_t temp_width = (intptr_t)(width);
asm volatile ( asm volatile (
...@@ -2535,9 +2542,7 @@ void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) { ...@@ -2535,9 +2542,7 @@ void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
#ifdef HAS_ARGBMIRRORROW_AVX2 #ifdef HAS_ARGBMIRRORROW_AVX2
// Shuffle table for reversing the bytes. // Shuffle table for reversing the bytes.
static const ulvec32 kARGBShuffleMirror_AVX2 = { static const ulvec32 kARGBShuffleMirror_AVX2 = {7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u};
7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
};
void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) { void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
intptr_t temp_width = (intptr_t)(width); intptr_t temp_width = (intptr_t)(width);
asm volatile ( asm volatile (
...@@ -2561,7 +2566,9 @@ void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) { ...@@ -2561,7 +2566,9 @@ void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
#endif // HAS_ARGBMIRRORROW_AVX2 #endif // HAS_ARGBMIRRORROW_AVX2
#ifdef HAS_SPLITUVROW_AVX2 #ifdef HAS_SPLITUVROW_AVX2
void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, void SplitUVRow_AVX2(const uint8* src_uv,
uint8* dst_u,
uint8* dst_v,
int width) { int width) {
asm volatile ( asm volatile (
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
...@@ -2598,7 +2605,9 @@ void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, ...@@ -2598,7 +2605,9 @@ void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
#endif // HAS_SPLITUVROW_AVX2 #endif // HAS_SPLITUVROW_AVX2
#ifdef HAS_SPLITUVROW_SSE2 #ifdef HAS_SPLITUVROW_SSE2
void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, void SplitUVRow_SSE2(const uint8* src_uv,
uint8* dst_u,
uint8* dst_v,
int width) { int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -2634,7 +2643,9 @@ void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, ...@@ -2634,7 +2643,9 @@ void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
#endif // HAS_SPLITUVROW_SSE2 #endif // HAS_SPLITUVROW_SSE2
#ifdef HAS_MERGEUVROW_AVX2 #ifdef HAS_MERGEUVROW_AVX2
void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv, void MergeUVRow_AVX2(const uint8* src_u,
const uint8* src_v,
uint8* dst_uv,
int width) { int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
...@@ -2665,7 +2676,9 @@ void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv, ...@@ -2665,7 +2676,9 @@ void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
#endif // HAS_MERGEUVROW_AVX2 #endif // HAS_MERGEUVROW_AVX2
#ifdef HAS_MERGEUVROW_SSE2 #ifdef HAS_MERGEUVROW_SSE2
void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv, void MergeUVRow_SSE2(const uint8* src_u,
const uint8* src_v,
uint8* dst_uv,
int width) { int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
...@@ -2759,14 +2772,12 @@ void CopyRow_AVX(const uint8* src, uint8* dst, int count) { ...@@ -2759,14 +2772,12 @@ void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
// Multiple of 1. // Multiple of 1.
void CopyRow_ERMS(const uint8* src, uint8* dst, int width) { void CopyRow_ERMS(const uint8* src, uint8* dst, int width) {
size_t width_tmp = (size_t)(width); size_t width_tmp = (size_t)(width);
asm volatile ( asm volatile("rep movsb " MEMMOVESTRING(0, 1) " \n"
"rep movsb " MEMMOVESTRING(0,1) " \n" : "+S"(src), // %0
: "+S"(src), // %0 "+D"(dst), // %1
"+D"(dst), // %1 "+c"(width_tmp) // %2
"+c"(width_tmp) // %2 :
: : "memory", "cc");
: "memory", "cc"
);
} }
#endif // HAS_COPYROW_ERMS #endif // HAS_COPYROW_ERMS
...@@ -2838,7 +2849,7 @@ void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) { ...@@ -2838,7 +2849,7 @@ void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
#ifdef HAS_ARGBEXTRACTALPHAROW_SSE2 #ifdef HAS_ARGBEXTRACTALPHAROW_SSE2
// width in pixels // width in pixels
void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) { void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) {
asm volatile ( asm volatile (
LABELALIGN LABELALIGN
"1: \n" "1: \n"
"movdqu " MEMACCESS(0) ", %%xmm0 \n" "movdqu " MEMACCESS(0) ", %%xmm0 \n"
...@@ -2864,12 +2875,11 @@ void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) { ...@@ -2864,12 +2875,11 @@ void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) {
#ifdef HAS_ARGBEXTRACTALPHAROW_AVX2 #ifdef HAS_ARGBEXTRACTALPHAROW_AVX2
static const uvec8 kShuffleAlphaShort_AVX2 = { static const uvec8 kShuffleAlphaShort_AVX2 = {
3u, 128u, 128u, 128u, 7u, 128u, 128u, 128u, 3u, 128u, 128u, 128u, 7u, 128u, 128u, 128u,
11u, 128u, 128u, 128u, 15u, 128u, 128u, 128u 11u, 128u, 128u, 128u, 15u, 128u, 128u, 128u};
};
void ARGBExtractAlphaRow_AVX2(const uint8* src_argb, uint8* dst_a, int width) { void ARGBExtractAlphaRow_AVX2(const uint8* src_argb, uint8* dst_a, int width) {
asm volatile ( asm volatile (
"vmovdqa %3,%%ymm4 \n" "vmovdqa %3,%%ymm4 \n"
"vbroadcastf128 %4,%%ymm5 \n" "vbroadcastf128 %4,%%ymm5 \n"
LABELALIGN LABELALIGN
...@@ -2976,32 +2986,29 @@ void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) { ...@@ -2976,32 +2986,29 @@ void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
void SetRow_X86(uint8* dst, uint8 v8, int width) { void SetRow_X86(uint8* dst, uint8 v8, int width) {
size_t width_tmp = (size_t)(width >> 2); size_t width_tmp = (size_t)(width >> 2);
const uint32 v32 = v8 * 0x01010101u; // Duplicate byte to all bytes. const uint32 v32 = v8 * 0x01010101u; // Duplicate byte to all bytes.
asm volatile ( asm volatile("rep stosl " MEMSTORESTRING(eax, 0) " \n"
"rep stosl " MEMSTORESTRING(eax,0) " \n" : "+D"(dst), // %0
: "+D"(dst), // %0 "+c"(width_tmp) // %1
"+c"(width_tmp) // %1 : "a"(v32) // %2
: "a"(v32) // %2 : "memory", "cc");
: "memory", "cc");
} }
void SetRow_ERMS(uint8* dst, uint8 v8, int width) { void SetRow_ERMS(uint8* dst, uint8 v8, int width) {
size_t width_tmp = (size_t)(width); size_t width_tmp = (size_t)(width);
asm volatile ( asm volatile("rep stosb " MEMSTORESTRING(al, 0) " \n"
"rep stosb " MEMSTORESTRING(al,0) " \n" : "+D"(dst), // %0
: "+D"(dst), // %0 "+c"(width_tmp) // %1
"+c"(width_tmp) // %1 : "a"(v8) // %2
: "a"(v8) // %2 : "memory", "cc");
: "memory", "cc");
} }
void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int width) { void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int width) {
size_t width_tmp = (size_t)(width); size_t width_tmp = (size_t)(width);
asm volatile ( asm volatile("rep stosl " MEMSTORESTRING(eax, 0) " \n"
"rep stosl " MEMSTORESTRING(eax,0) " \n" : "+D"(dst_argb), // %0
: "+D"(dst_argb), // %0 "+c"(width_tmp) // %1
"+c"(width_tmp) // %1 : "a"(v32) // %2
: "a"(v32) // %2 : "memory", "cc");
: "memory", "cc");
} }
#endif // HAS_SETROW_X86 #endif // HAS_SETROW_X86
...@@ -3031,8 +3038,11 @@ void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int width) { ...@@ -3031,8 +3038,11 @@ void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int width) {
); );
} }
void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, void YUY2ToUVRow_SSE2(const uint8* src_yuy2,
uint8* dst_u, uint8* dst_v, int width) { int stride_yuy2,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
"psrlw $0x8,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n"
...@@ -3070,7 +3080,9 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, ...@@ -3070,7 +3080,9 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
} }
void YUY2ToUV422Row_SSE2(const uint8* src_yuy2, void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
uint8* dst_u, uint8* dst_v, int width) { uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
"psrlw $0x8,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n"
...@@ -3126,8 +3138,11 @@ void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int width) { ...@@ -3126,8 +3138,11 @@ void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int width) {
); );
} }
void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, void UYVYToUVRow_SSE2(const uint8* src_uyvy,
uint8* dst_u, uint8* dst_v, int width) { int stride_uyvy,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
"psrlw $0x8,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n"
...@@ -3165,7 +3180,9 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, ...@@ -3165,7 +3180,9 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
} }
void UYVYToUV422Row_SSE2(const uint8* src_uyvy, void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
uint8* dst_u, uint8* dst_v, int width) { uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
"psrlw $0x8,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n"
...@@ -3227,8 +3244,11 @@ void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width) { ...@@ -3227,8 +3244,11 @@ void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width) {
); );
} }
void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, void YUY2ToUVRow_AVX2(const uint8* src_yuy2,
uint8* dst_u, uint8* dst_v, int width) { int stride_yuy2,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
"vpsrlw $0x8,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
...@@ -3267,7 +3287,9 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, ...@@ -3267,7 +3287,9 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
} }
void YUY2ToUV422Row_AVX2(const uint8* src_yuy2, void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
uint8* dst_u, uint8* dst_v, int width) { uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
"vpsrlw $0x8,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
...@@ -3327,8 +3349,11 @@ void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int width) { ...@@ -3327,8 +3349,11 @@ void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int width) {
, "xmm0", "xmm1", "xmm5" , "xmm0", "xmm1", "xmm5"
); );
} }
void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, void UYVYToUVRow_AVX2(const uint8* src_uyvy,
uint8* dst_u, uint8* dst_v, int width) { int stride_uyvy,
uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
"vpsrlw $0x8,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
...@@ -3368,7 +3393,9 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, ...@@ -3368,7 +3393,9 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
} }
void UYVYToUV422Row_AVX2(const uint8* src_uyvy, void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
uint8* dst_u, uint8* dst_v, int width) { uint8* dst_u,
uint8* dst_v,
int width) {
asm volatile ( asm volatile (
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
"vpsrlw $0x8,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
...@@ -3407,14 +3434,14 @@ void UYVYToUV422Row_AVX2(const uint8* src_uyvy, ...@@ -3407,14 +3434,14 @@ void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
#ifdef HAS_ARGBBLENDROW_SSSE3 #ifdef HAS_ARGBBLENDROW_SSSE3
// Shuffle table for isolating alpha. // Shuffle table for isolating alpha.
static uvec8 kShuffleAlpha = { static uvec8 kShuffleAlpha = {3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80, 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80};
11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
};
// Blend 8 pixels at a time // Blend 8 pixels at a time
void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, void ARGBBlendRow_SSSE3(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
"pcmpeqb %%xmm7,%%xmm7 \n" "pcmpeqb %%xmm7,%%xmm7 \n"
"psrlw $0xf,%%xmm7 \n" "psrlw $0xf,%%xmm7 \n"
...@@ -3499,46 +3526,49 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, ...@@ -3499,46 +3526,49 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
// =((A2*C2)+(B2*(255-C2))+255)/256 // =((A2*C2)+(B2*(255-C2))+255)/256
// signed version of math // signed version of math
// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256 // =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1, void BlendPlaneRow_SSSE3(const uint8* src0,
const uint8* alpha, uint8* dst, int width) { const uint8* src1,
asm volatile ( const uint8* alpha,
"pcmpeqb %%xmm5,%%xmm5 \n" uint8* dst,
"psllw $0x8,%%xmm5 \n" int width) {
"mov $0x80808080,%%eax \n" asm volatile(
"movd %%eax,%%xmm6 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
"pshufd $0x0,%%xmm6,%%xmm6 \n" "psllw $0x8,%%xmm5 \n"
"mov $0x807f807f,%%eax \n" "mov $0x80808080,%%eax \n"
"movd %%eax,%%xmm7 \n" "movd %%eax,%%xmm6 \n"
"pshufd $0x0,%%xmm7,%%xmm7 \n" "pshufd $0x0,%%xmm6,%%xmm6 \n"
"sub %2,%0 \n" "mov $0x807f807f,%%eax \n"
"sub %2,%1 \n" "movd %%eax,%%xmm7 \n"
"sub %2,%3 \n" "pshufd $0x0,%%xmm7,%%xmm7 \n"
"sub %2,%0 \n"
// 8 pixel loop. "sub %2,%1 \n"
LABELALIGN "sub %2,%3 \n"
"1: \n"
"movq (%2),%%xmm0 \n" // 8 pixel loop.
"punpcklbw %%xmm0,%%xmm0 \n" LABELALIGN
"pxor %%xmm5,%%xmm0 \n" "1: \n"
"movq (%0,%2,1),%%xmm1 \n" "movq (%2),%%xmm0 \n"
"movq (%1,%2,1),%%xmm2 \n" "punpcklbw %%xmm0,%%xmm0 \n"
"punpcklbw %%xmm2,%%xmm1 \n" "pxor %%xmm5,%%xmm0 \n"
"psubb %%xmm6,%%xmm1 \n" "movq (%0,%2,1),%%xmm1 \n"
"pmaddubsw %%xmm1,%%xmm0 \n" "movq (%1,%2,1),%%xmm2 \n"
"paddw %%xmm7,%%xmm0 \n" "punpcklbw %%xmm2,%%xmm1 \n"
"psrlw $0x8,%%xmm0 \n" "psubb %%xmm6,%%xmm1 \n"
"packuswb %%xmm0,%%xmm0 \n" "pmaddubsw %%xmm1,%%xmm0 \n"
"movq %%xmm0,(%3,%2,1) \n" "paddw %%xmm7,%%xmm0 \n"
"lea 0x8(%2),%2 \n" "psrlw $0x8,%%xmm0 \n"
"sub $0x8,%4 \n" "packuswb %%xmm0,%%xmm0 \n"
"jg 1b \n" "movq %%xmm0,(%3,%2,1) \n"
: "+r"(src0), // %0 "lea 0x8(%2),%2 \n"
"+r"(src1), // %1 "sub $0x8,%4 \n"
"+r"(alpha), // %2 "jg 1b \n"
"+r"(dst), // %3 : "+r"(src0), // %0
"+rm"(width) // %4 "+r"(src1), // %1
:: "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7" "+r"(alpha), // %2
); "+r"(dst), // %3
"+rm"(width) // %4
::"memory",
"cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7");
} }
#endif // HAS_BLENDPLANEROW_SSSE3 #endif // HAS_BLENDPLANEROW_SSSE3
...@@ -3548,67 +3578,67 @@ void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1, ...@@ -3548,67 +3578,67 @@ void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1,
// =((A2*C2)+(B2*(255-C2))+255)/256 // =((A2*C2)+(B2*(255-C2))+255)/256
// signed version of math // signed version of math
// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256 // =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1, void BlendPlaneRow_AVX2(const uint8* src0,
const uint8* alpha, uint8* dst, int width) { const uint8* src1,
asm volatile ( const uint8* alpha,
"vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" uint8* dst,
"vpsllw $0x8,%%ymm5,%%ymm5 \n" int width) {
"mov $0x80808080,%%eax \n" asm volatile(
"vmovd %%eax,%%xmm6 \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
"vbroadcastss %%xmm6,%%ymm6 \n" "vpsllw $0x8,%%ymm5,%%ymm5 \n"
"mov $0x807f807f,%%eax \n" "mov $0x80808080,%%eax \n"
"vmovd %%eax,%%xmm7 \n" "vmovd %%eax,%%xmm6 \n"
"vbroadcastss %%xmm7,%%ymm7 \n" "vbroadcastss %%xmm6,%%ymm6 \n"
"sub %2,%0 \n" "mov $0x807f807f,%%eax \n"
"sub %2,%1 \n" "vmovd %%eax,%%xmm7 \n"
"sub %2,%3 \n" "vbroadcastss %%xmm7,%%ymm7 \n"
"sub %2,%0 \n"
// 32 pixel loop. "sub %2,%1 \n"
LABELALIGN "sub %2,%3 \n"
"1: \n"
"vmovdqu (%2),%%ymm0 \n" // 32 pixel loop.
"vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n" LABELALIGN
"vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n" "1: \n"
"vpxor %%ymm5,%%ymm3,%%ymm3 \n" "vmovdqu (%2),%%ymm0 \n"
"vpxor %%ymm5,%%ymm0,%%ymm0 \n" "vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n"
"vmovdqu (%0,%2,1),%%ymm1 \n" "vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n"
"vmovdqu (%1,%2,1),%%ymm2 \n" "vpxor %%ymm5,%%ymm3,%%ymm3 \n"
"vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n" "vpxor %%ymm5,%%ymm0,%%ymm0 \n"
"vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n" "vmovdqu (%0,%2,1),%%ymm1 \n"
"vpsubb %%ymm6,%%ymm4,%%ymm4 \n" "vmovdqu (%1,%2,1),%%ymm2 \n"
"vpsubb %%ymm6,%%ymm1,%%ymm1 \n" "vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n"
"vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n"
"vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n" "vpsubb %%ymm6,%%ymm4,%%ymm4 \n"
"vpaddw %%ymm7,%%ymm3,%%ymm3 \n" "vpsubb %%ymm6,%%ymm1,%%ymm1 \n"
"vpaddw %%ymm7,%%ymm0,%%ymm0 \n" "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n"
"vpsrlw $0x8,%%ymm3,%%ymm3 \n" "vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n"
"vpsrlw $0x8,%%ymm0,%%ymm0 \n" "vpaddw %%ymm7,%%ymm3,%%ymm3 \n"
"vpackuswb %%ymm3,%%ymm0,%%ymm0 \n" "vpaddw %%ymm7,%%ymm0,%%ymm0 \n"
"vmovdqu %%ymm0,(%3,%2,1) \n" "vpsrlw $0x8,%%ymm3,%%ymm3 \n"
"lea 0x20(%2),%2 \n" "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
"sub $0x20,%4 \n" "vpackuswb %%ymm3,%%ymm0,%%ymm0 \n"
"jg 1b \n" "vmovdqu %%ymm0,(%3,%2,1) \n"
"vzeroupper \n" "lea 0x20(%2),%2 \n"
: "+r"(src0), // %0 "sub $0x20,%4 \n"
"+r"(src1), // %1 "jg 1b \n"
"+r"(alpha), // %2 "vzeroupper \n"
"+r"(dst), // %3 : "+r"(src0), // %0
"+rm"(width) // %4 "+r"(src1), // %1
:: "memory", "cc", "eax", "+r"(alpha), // %2
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" "+r"(dst), // %3
); "+rm"(width) // %4
::"memory",
"cc", "eax", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
} }
#endif // HAS_BLENDPLANEROW_AVX2 #endif // HAS_BLENDPLANEROW_AVX2
#ifdef HAS_ARGBATTENUATEROW_SSSE3 #ifdef HAS_ARGBATTENUATEROW_SSSE3
// Shuffle table duplicating alpha // Shuffle table duplicating alpha
static uvec8 kShuffleAlpha0 = { static uvec8 kShuffleAlpha0 = {3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u,
3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u};
}; static uvec8 kShuffleAlpha1 = {11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
static uvec8 kShuffleAlpha1 = { 15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u};
11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u
};
// Attenuate 4 pixels at a time. // Attenuate 4 pixels at a time.
void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) { void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
asm volatile ( asm volatile (
...@@ -3654,9 +3684,9 @@ void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) { ...@@ -3654,9 +3684,9 @@ void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
#ifdef HAS_ARGBATTENUATEROW_AVX2 #ifdef HAS_ARGBATTENUATEROW_AVX2
// Shuffle table duplicating alpha. // Shuffle table duplicating alpha.
static const uvec8 kShuffleAlpha_AVX2 = { static const uvec8 kShuffleAlpha_AVX2 = {6u, 7u, 6u, 7u, 6u, 7u,
6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u 128u, 128u, 14u, 15u, 14u, 15u,
}; 14u, 15u, 128u, 128u};
// Attenuate 8 pixels at a time. // Attenuate 8 pixels at a time.
void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) { void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
asm volatile ( asm volatile (
...@@ -3697,7 +3727,8 @@ void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) { ...@@ -3697,7 +3727,8 @@ void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
#ifdef HAS_ARGBUNATTENUATEROW_SSE2 #ifdef HAS_ARGBUNATTENUATEROW_SSE2
// Unattenuate 4 pixels at a time. // Unattenuate 4 pixels at a time.
void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, void ARGBUnattenuateRow_SSE2(const uint8* src_argb,
uint8* dst_argb,
int width) { int width) {
uintptr_t alpha; uintptr_t alpha;
asm volatile ( asm volatile (
...@@ -3744,10 +3775,10 @@ void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, ...@@ -3744,10 +3775,10 @@ void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
#ifdef HAS_ARGBUNATTENUATEROW_AVX2 #ifdef HAS_ARGBUNATTENUATEROW_AVX2
// Shuffle table duplicating alpha. // Shuffle table duplicating alpha.
static const uvec8 kUnattenShuffleAlpha_AVX2 = { static const uvec8 kUnattenShuffleAlpha_AVX2 = {
0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u 0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u};
};
// Unattenuate 8 pixels at a time. // Unattenuate 8 pixels at a time.
void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, void ARGBUnattenuateRow_AVX2(const uint8* src_argb,
uint8* dst_argb,
int width) { int width) {
uintptr_t alpha; uintptr_t alpha;
asm volatile ( asm volatile (
...@@ -3862,17 +3893,14 @@ void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) { ...@@ -3862,17 +3893,14 @@ void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
// g = (r * 45 + g * 88 + b * 22) >> 7 // g = (r * 45 + g * 88 + b * 22) >> 7
// r = (r * 50 + g * 98 + b * 24) >> 7 // r = (r * 50 + g * 98 + b * 24) >> 7
// Constant for ARGB color to sepia tone // Constant for ARGB color to sepia tone
static vec8 kARGBToSepiaB = { static vec8 kARGBToSepiaB = {17, 68, 35, 0, 17, 68, 35, 0,
17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0 17, 68, 35, 0, 17, 68, 35, 0};
};
static vec8 kARGBToSepiaG = { static vec8 kARGBToSepiaG = {22, 88, 45, 0, 22, 88, 45, 0,
22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0 22, 88, 45, 0, 22, 88, 45, 0};
};
static vec8 kARGBToSepiaR = { static vec8 kARGBToSepiaR = {24, 98, 50, 0, 24, 98, 50, 0,
24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0 24, 98, 50, 0, 24, 98, 50, 0};
};
// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels. // Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) { void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
...@@ -3935,8 +3963,10 @@ void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) { ...@@ -3935,8 +3963,10 @@ void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3 #ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
// Tranform 8 ARGB pixels (32 bytes) with color matrix. // Tranform 8 ARGB pixels (32 bytes) with color matrix.
// Same as Sepia except matrix is provided. // Same as Sepia except matrix is provided.
void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb, void ARGBColorMatrixRow_SSSE3(const uint8* src_argb,
const int8* matrix_argb, int width) { uint8* dst_argb,
const int8* matrix_argb,
int width) {
asm volatile ( asm volatile (
"movdqu " MEMACCESS(3) ",%%xmm5 \n" "movdqu " MEMACCESS(3) ",%%xmm5 \n"
"pshufd $0x00,%%xmm5,%%xmm2 \n" "pshufd $0x00,%%xmm5,%%xmm2 \n"
...@@ -3998,8 +4028,11 @@ void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb, ...@@ -3998,8 +4028,11 @@ void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
#ifdef HAS_ARGBQUANTIZEROW_SSE2 #ifdef HAS_ARGBQUANTIZEROW_SSE2
// Quantize 4 ARGB pixels (16 bytes). // Quantize 4 ARGB pixels (16 bytes).
void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size, void ARGBQuantizeRow_SSE2(uint8* dst_argb,
int interval_offset, int width) { int scale,
int interval_size,
int interval_offset,
int width) {
asm volatile ( asm volatile (
"movd %2,%%xmm2 \n" "movd %2,%%xmm2 \n"
"movd %3,%%xmm3 \n" "movd %3,%%xmm3 \n"
...@@ -4048,7 +4081,9 @@ void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size, ...@@ -4048,7 +4081,9 @@ void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
#ifdef HAS_ARGBSHADEROW_SSE2 #ifdef HAS_ARGBSHADEROW_SSE2
// Shade 4 pixels at a time by specified value. // Shade 4 pixels at a time by specified value.
void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width, void ARGBShadeRow_SSE2(const uint8* src_argb,
uint8* dst_argb,
int width,
uint32 value) { uint32 value) {
asm volatile ( asm volatile (
"movd %3,%%xmm2 \n" "movd %3,%%xmm2 \n"
...@@ -4084,8 +4119,10 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width, ...@@ -4084,8 +4119,10 @@ void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
#ifdef HAS_ARGBMULTIPLYROW_SSE2 #ifdef HAS_ARGBMULTIPLYROW_SSE2
// Multiply 2 rows of ARGB pixels together, 4 pixels at a time. // Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBMultiplyRow_SSE2(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
"pxor %%xmm5,%%xmm5 \n" "pxor %%xmm5,%%xmm5 \n"
...@@ -4122,8 +4159,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4122,8 +4159,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBMULTIPLYROW_AVX2 #ifdef HAS_ARGBMULTIPLYROW_AVX2
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time. // Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, void ARGBMultiplyRow_AVX2(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
"vpxor %%ymm5,%%ymm5,%%ymm5 \n" "vpxor %%ymm5,%%ymm5,%%ymm5 \n"
...@@ -4161,8 +4200,10 @@ void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4161,8 +4200,10 @@ void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBADDROW_SSE2 #ifdef HAS_ARGBADDROW_SSE2
// Add 2 rows of ARGB pixels together, 4 pixels at a time. // Add 2 rows of ARGB pixels together, 4 pixels at a time.
void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBAddRow_SSE2(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
// 4 pixel loop. // 4 pixel loop.
LABELALIGN LABELALIGN
...@@ -4189,8 +4230,10 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4189,8 +4230,10 @@ void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBADDROW_AVX2 #ifdef HAS_ARGBADDROW_AVX2
// Add 2 rows of ARGB pixels together, 4 pixels at a time. // Add 2 rows of ARGB pixels together, 4 pixels at a time.
void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, void ARGBAddRow_AVX2(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
// 4 pixel loop. // 4 pixel loop.
LABELALIGN LABELALIGN
...@@ -4217,8 +4260,10 @@ void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4217,8 +4260,10 @@ void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBSUBTRACTROW_SSE2 #ifdef HAS_ARGBSUBTRACTROW_SSE2
// Subtract 2 rows of ARGB pixels, 4 pixels at a time. // Subtract 2 rows of ARGB pixels, 4 pixels at a time.
void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, void ARGBSubtractRow_SSE2(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
// 4 pixel loop. // 4 pixel loop.
LABELALIGN LABELALIGN
...@@ -4245,8 +4290,10 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4245,8 +4290,10 @@ void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
#ifdef HAS_ARGBSUBTRACTROW_AVX2 #ifdef HAS_ARGBSUBTRACTROW_AVX2
// Subtract 2 rows of ARGB pixels, 8 pixels at a time. // Subtract 2 rows of ARGB pixels, 8 pixels at a time.
void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, void ARGBSubtractRow_AVX2(const uint8* src_argb0,
uint8* dst_argb, int width) { const uint8* src_argb1,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
// 4 pixel loop. // 4 pixel loop.
LABELALIGN LABELALIGN
...@@ -4276,8 +4323,11 @@ void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1, ...@@ -4276,8 +4323,11 @@ void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
// -1 0 1 // -1 0 1
// -2 0 2 // -2 0 2
// -1 0 1 // -1 0 1
void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1, void SobelXRow_SSE2(const uint8* src_y0,
const uint8* src_y2, uint8* dst_sobelx, int width) { const uint8* src_y1,
const uint8* src_y2,
uint8* dst_sobelx,
int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
"sub %0,%2 \n" "sub %0,%2 \n"
...@@ -4330,8 +4380,10 @@ void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1, ...@@ -4330,8 +4380,10 @@ void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
// -1 -2 -1 // -1 -2 -1
// 0 0 0 // 0 0 0
// 1 2 1 // 1 2 1
void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1, void SobelYRow_SSE2(const uint8* src_y0,
uint8* dst_sobely, int width) { const uint8* src_y1,
uint8* dst_sobely,
int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
"sub %0,%2 \n" "sub %0,%2 \n"
...@@ -4383,8 +4435,10 @@ void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1, ...@@ -4383,8 +4435,10 @@ void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
// R = Sobel // R = Sobel
// G = Sobel // G = Sobel
// B = Sobel // B = Sobel
void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, void SobelRow_SSE2(const uint8* src_sobelx,
uint8* dst_argb, int width) { const uint8* src_sobely,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -4430,8 +4484,10 @@ void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, ...@@ -4430,8 +4484,10 @@ void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
#ifdef HAS_SOBELTOPLANEROW_SSE2 #ifdef HAS_SOBELTOPLANEROW_SSE2
// Adds Sobel X and Sobel Y and stores Sobel into a plane. // Adds Sobel X and Sobel Y and stores Sobel into a plane.
void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, void SobelToPlaneRow_SSE2(const uint8* src_sobelx,
uint8* dst_y, int width) { const uint8* src_sobely,
uint8* dst_y,
int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -4465,8 +4521,10 @@ void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, ...@@ -4465,8 +4521,10 @@ void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
// R = Sobel X // R = Sobel X
// G = Sobel // G = Sobel
// B = Sobel Y // B = Sobel Y
void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, void SobelXYRow_SSE2(const uint8* src_sobelx,
uint8* dst_argb, int width) { const uint8* src_sobely,
uint8* dst_argb,
int width) {
asm volatile ( asm volatile (
"sub %0,%1 \n" "sub %0,%1 \n"
"pcmpeqb %%xmm5,%%xmm5 \n" "pcmpeqb %%xmm5,%%xmm5 \n"
...@@ -4512,8 +4570,10 @@ void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, ...@@ -4512,8 +4570,10 @@ void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2 #ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
// Creates a table of cumulative sums where each value is a sum of all values // Creates a table of cumulative sums where each value is a sum of all values
// above and to the left of the value, inclusive of the value. // above and to the left of the value, inclusive of the value.
void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum, void ComputeCumulativeSumRow_SSE2(const uint8* row,
const int32* previous_cumsum, int width) { int32* cumsum,
const int32* previous_cumsum,
int width) {
asm volatile ( asm volatile (
"pxor %%xmm0,%%xmm0 \n" "pxor %%xmm0,%%xmm0 \n"
"pxor %%xmm1,%%xmm1 \n" "pxor %%xmm1,%%xmm1 \n"
...@@ -4590,8 +4650,11 @@ void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum, ...@@ -4590,8 +4650,11 @@ void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
#endif // HAS_COMPUTECUMULATIVESUMROW_SSE2 #endif // HAS_COMPUTECUMULATIVESUMROW_SSE2
#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2 #ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft, void CumulativeSumToAverageRow_SSE2(const int32* topleft,
int width, int area, uint8* dst, const int32* botleft,
int width,
int area,
uint8* dst,
int count) { int count) {
asm volatile ( asm volatile (
"movd %5,%%xmm5 \n" "movd %5,%%xmm5 \n"
...@@ -4723,8 +4786,11 @@ void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft, ...@@ -4723,8 +4786,11 @@ void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
#ifdef HAS_ARGBAFFINEROW_SSE2 #ifdef HAS_ARGBAFFINEROW_SSE2
// Copy ARGB pixels from source image with slope to a row of destination. // Copy ARGB pixels from source image with slope to a row of destination.
LIBYUV_API LIBYUV_API
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride, void ARGBAffineRow_SSE2(const uint8* src_argb,
uint8* dst_argb, const float* src_dudv, int width) { int src_argb_stride,
uint8* dst_argb,
const float* src_dudv,
int width) {
intptr_t src_argb_stride_temp = src_argb_stride; intptr_t src_argb_stride_temp = src_argb_stride;
intptr_t temp; intptr_t temp;
asm volatile ( asm volatile (
...@@ -4808,8 +4874,10 @@ void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride, ...@@ -4808,8 +4874,10 @@ void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
#ifdef HAS_INTERPOLATEROW_SSSE3 #ifdef HAS_INTERPOLATEROW_SSSE3
// Bilinear filter 16x2 -> 16x1 // Bilinear filter 16x2 -> 16x1
void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, void InterpolateRow_SSSE3(uint8* dst_ptr,
ptrdiff_t src_stride, int dst_width, const uint8* src_ptr,
ptrdiff_t src_stride,
int dst_width,
int source_y_fraction) { int source_y_fraction) {
asm volatile ( asm volatile (
"sub %1,%0 \n" "sub %1,%0 \n"
...@@ -4889,8 +4957,10 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ...@@ -4889,8 +4957,10 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
#ifdef HAS_INTERPOLATEROW_AVX2 #ifdef HAS_INTERPOLATEROW_AVX2
// Bilinear filter 32x2 -> 32x1 // Bilinear filter 32x2 -> 32x1
void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, void InterpolateRow_AVX2(uint8* dst_ptr,
ptrdiff_t src_stride, int dst_width, const uint8* src_ptr,
ptrdiff_t src_stride,
int dst_width,
int source_y_fraction) { int source_y_fraction) {
asm volatile ( asm volatile (
"cmp $0x0,%3 \n" "cmp $0x0,%3 \n"
...@@ -4965,8 +5035,10 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, ...@@ -4965,8 +5035,10 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
#ifdef HAS_ARGBSHUFFLEROW_SSSE3 #ifdef HAS_ARGBSHUFFLEROW_SSSE3
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, void ARGBShuffleRow_SSSE3(const uint8* src_argb,
const uint8* shuffler, int width) { uint8* dst_argb,
const uint8* shuffler,
int width) {
asm volatile ( asm volatile (
"movdqu " MEMACCESS(3) ",%%xmm5 \n" "movdqu " MEMACCESS(3) ",%%xmm5 \n"
LABELALIGN LABELALIGN
...@@ -4993,8 +5065,10 @@ void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, ...@@ -4993,8 +5065,10 @@ void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
#ifdef HAS_ARGBSHUFFLEROW_AVX2 #ifdef HAS_ARGBSHUFFLEROW_AVX2
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, void ARGBShuffleRow_AVX2(const uint8* src_argb,
const uint8* shuffler, int width) { uint8* dst_argb,
const uint8* shuffler,
int width) {
asm volatile ( asm volatile (
"vbroadcastf128 " MEMACCESS(3) ",%%ymm5 \n" "vbroadcastf128 " MEMACCESS(3) ",%%ymm5 \n"
LABELALIGN LABELALIGN
...@@ -5022,8 +5096,10 @@ void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, ...@@ -5022,8 +5096,10 @@ void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
#ifdef HAS_ARGBSHUFFLEROW_SSE2 #ifdef HAS_ARGBSHUFFLEROW_SSE2
// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb, void ARGBShuffleRow_SSE2(const uint8* src_argb,
const uint8* shuffler, int width) { uint8* dst_argb,
const uint8* shuffler,
int width) {
uintptr_t pixel_temp; uintptr_t pixel_temp;
asm volatile ( asm volatile (
"pxor %%xmm5,%%xmm5 \n" "pxor %%xmm5,%%xmm5 \n"
...@@ -5144,8 +5220,9 @@ void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb, ...@@ -5144,8 +5220,9 @@ void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
void I422ToYUY2Row_SSE2(const uint8* src_y, void I422ToYUY2Row_SSE2(const uint8* src_y,
const uint8* src_u, const uint8* src_u,
const uint8* src_v, const uint8* src_v,
uint8* dst_frame, int width) { uint8* dst_frame,
asm volatile ( int width) {
asm volatile (
"sub %1,%2 \n" "sub %1,%2 \n"
LABELALIGN LABELALIGN
"1: \n" "1: \n"
...@@ -5179,8 +5256,9 @@ void I422ToYUY2Row_SSE2(const uint8* src_y, ...@@ -5179,8 +5256,9 @@ void I422ToYUY2Row_SSE2(const uint8* src_y,
void I422ToUYVYRow_SSE2(const uint8* src_y, void I422ToUYVYRow_SSE2(const uint8* src_y,
const uint8* src_u, const uint8* src_u,
const uint8* src_v, const uint8* src_v,
uint8* dst_frame, int width) { uint8* dst_frame,
asm volatile ( int width) {
asm volatile (
"sub %1,%2 \n" "sub %1,%2 \n"
LABELALIGN LABELALIGN
"1: \n" "1: \n"
...@@ -5212,7 +5290,8 @@ void I422ToUYVYRow_SSE2(const uint8* src_y, ...@@ -5212,7 +5290,8 @@ void I422ToUYVYRow_SSE2(const uint8* src_y,
#ifdef HAS_ARGBPOLYNOMIALROW_SSE2 #ifdef HAS_ARGBPOLYNOMIALROW_SSE2
void ARGBPolynomialRow_SSE2(const uint8* src_argb, void ARGBPolynomialRow_SSE2(const uint8* src_argb,
uint8* dst_argb, const float* poly, uint8* dst_argb,
const float* poly,
int width) { int width) {
asm volatile ( asm volatile (
"pxor %%xmm3,%%xmm3 \n" "pxor %%xmm3,%%xmm3 \n"
...@@ -5268,7 +5347,8 @@ void ARGBPolynomialRow_SSE2(const uint8* src_argb, ...@@ -5268,7 +5347,8 @@ void ARGBPolynomialRow_SSE2(const uint8* src_argb,
#ifdef HAS_ARGBPOLYNOMIALROW_AVX2 #ifdef HAS_ARGBPOLYNOMIALROW_AVX2
void ARGBPolynomialRow_AVX2(const uint8* src_argb, void ARGBPolynomialRow_AVX2(const uint8* src_argb,
uint8* dst_argb, const float* poly, uint8* dst_argb,
const float* poly,
int width) { int width) {
asm volatile ( asm volatile (
"vbroadcastf128 " MEMACCESS(3) ",%%ymm4 \n" "vbroadcastf128 " MEMACCESS(3) ",%%ymm4 \n"
...@@ -5444,7 +5524,8 @@ void HalfFloat1Row_F16C(const uint16* src, uint16* dst, float, int width) { ...@@ -5444,7 +5524,8 @@ void HalfFloat1Row_F16C(const uint16* src, uint16* dst, float, int width) {
#ifdef HAS_ARGBCOLORTABLEROW_X86 #ifdef HAS_ARGBCOLORTABLEROW_X86
// Tranform ARGB pixels with color table. // Tranform ARGB pixels with color table.
void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, void ARGBColorTableRow_X86(uint8* dst_argb,
const uint8* table_argb,
int width) { int width) {
uintptr_t pixel_temp; uintptr_t pixel_temp;
asm volatile ( asm volatile (
...@@ -5504,9 +5585,11 @@ void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) { ...@@ -5504,9 +5585,11 @@ void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3 #ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
// Tranform RGB pixels with luma table. // Tranform RGB pixels with luma table.
void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb, void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb,
uint8* dst_argb,
int width, int width,
const uint8* luma, uint32 lumacoeff) { const uint8* luma,
uint32 lumacoeff) {
uintptr_t pixel_temp; uintptr_t pixel_temp;
uintptr_t table_temp; uintptr_t table_temp;
asm volatile ( asm volatile (
...@@ -5604,8 +5687,6 @@ void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb, ...@@ -5604,8 +5687,6 @@ void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
#endif // defined(__x86_64__) || defined(__i386__) #endif // defined(__x86_64__) || defined(__i386__)
// clang-format on
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
} // namespace libyuv } // namespace libyuv
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment