Commit cd6056c0 authored by fbarchard@google.com's avatar fbarchard@google.com

InterpolateAny for unaligned and odd width interpolate. To be used in ARGBScaler in future.

BUG=208
TEST=ARGBInterpolate255_Unaligned
Review URL: https://webrtc-codereview.appspot.com/1324004

git-svn-id: http://libyuv.googlecode.com/svn/trunk@662 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 20794c46
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 661
Version: 662
License: BSD
License File: LICENSE
......
......@@ -1456,18 +1456,34 @@ LIBYUV_API
void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
uint8* dst_argb, const float* uv_dudv, int width);
// Used for ARGBScale and ARGBInterpolate.
void ARGBInterpolateRow_C(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb,
int dst_width, int source_y_fraction);
int width, int source_y_fraction);
void ARGBInterpolateRow_SSE2(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int dst_width,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_SSSE3(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int dst_width,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_NEON(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int dst_width,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_Unaligned_SSE2(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_Unaligned_SSSE3(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_Any_NEON(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_Any_SSE2(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
void ARGBInterpolateRow_Any_SSSE3(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride_argb, int width,
int source_y_fraction);
// Sobel images.
void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2,
......
......@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 661
#define LIBYUV_VERSION 662
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
......@@ -1650,23 +1650,37 @@ int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0,
ptrdiff_t src_stride, int dst_width,
int source_y_fraction) = ARGBInterpolateRow_C;
#if defined(HAS_ARGBINTERPOLATEROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) &&
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBInterpolateRow = ARGBInterpolateRow_SSE2;
if (TestCpuFlag(kCpuHasSSE2) && width >= 4) {
ARGBInterpolateRow = ARGBInterpolateRow_Any_SSE2;
if (IS_ALIGNED(width, 4)) {
ARGBInterpolateRow = ARGBInterpolateRow_Unaligned_SSE2;
if (IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) &&
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBInterpolateRow = ARGBInterpolateRow_SSE2;
}
}
}
#endif
#if defined(HAS_ARGBINTERPOLATEROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) &&
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBInterpolateRow = ARGBInterpolateRow_SSSE3;
if (TestCpuFlag(kCpuHasSSSE3) && width >= 4) {
ARGBInterpolateRow = ARGBInterpolateRow_Any_SSSE3;
if (IS_ALIGNED(width, 4)) {
ARGBInterpolateRow = ARGBInterpolateRow_Unaligned_SSSE3;
if (IS_ALIGNED(src_argb0, 16) && IS_ALIGNED(src_stride_argb0, 16) &&
IS_ALIGNED(src_argb1, 16) && IS_ALIGNED(src_stride_argb1, 16) &&
IS_ALIGNED(dst_argb, 16) && IS_ALIGNED(dst_stride_argb, 16)) {
ARGBInterpolateRow = ARGBInterpolateRow_SSSE3;
}
}
}
#elif defined(HAS_ARGBINTERPOLATEROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 4)) {
ARGBInterpolateRow = ARGBInterpolateRow_NEON;
#endif
#if defined(HAS_ARGBINTERPOLATEROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && width >= 4) {
ARGBInterpolateRow = ARGBInterpolateRow_Any_NEON;
if (IS_ALIGNED(width, 4)) {
ARGBInterpolateRow = ARGBInterpolateRow_NEON;
}
}
#endif
for (int y = 0; y < height; ++y) {
......
......@@ -482,6 +482,33 @@ YANY(ARGBShuffleRow_Any_NEON, ARGBShuffleRow_NEON,
#endif
#undef YANY
// Interpolate may want to work in place, so last16 method can not be used.
#define NANY(NAMEANY, ARGBTERP_SIMD, ARGBTERP_C, SBPP, BPP, MASK) \
void NAMEANY(uint8* dst_argb, const uint8* src_argb, \
ptrdiff_t src_stride_argb, int width, \
int source_y_fraction) { \
int n = width & ~MASK; \
ARGBTERP_SIMD(dst_argb, src_argb, src_stride_argb, \
n, source_y_fraction); \
ARGBTERP_C(dst_argb + n * BPP, \
src_argb + n * SBPP, src_stride_argb, \
width & MASK, source_y_fraction); \
}
#ifdef HAS_ARGBINTERPOLATEROW_SSSE3
NANY(ARGBInterpolateRow_Any_SSSE3, ARGBInterpolateRow_Unaligned_SSSE3,
ARGBInterpolateRow_C, 4, 4, 3)
#endif
#ifdef HAS_ARGBINTERPOLATEROW_SSE2
NANY(ARGBInterpolateRow_Any_SSE2, ARGBInterpolateRow_Unaligned_SSE2,
ARGBInterpolateRow_C, 4, 4, 3)
#endif
#ifdef HAS_ARGBINTERPOLATEROW_NEON
NANY(ARGBInterpolateRow_Any_NEON, ARGBInterpolateRow_NEON,
ARGBInterpolateRow_C, 4, 4, 3)
#endif
#undef NANY
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
......
......@@ -1730,12 +1730,12 @@ void ARGBAffineRow_C(const uint8* src_argb, int src_argb_stride,
// C version 2x2 -> 2x1.
void ARGBInterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr,
ptrdiff_t src_stride,
int dst_width, int source_y_fraction) {
int width, int source_y_fraction) {
int y1_fraction = source_y_fraction;
int y0_fraction = 256 - y1_fraction;
const uint8* src_ptr1 = src_ptr + src_stride;
uint8* end = dst_ptr + (dst_width << 2);
do {
for (int x = 0; x < width - 1; x += 2) {
dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8;
dst_ptr[1] = (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction) >> 8;
dst_ptr[2] = (src_ptr[2] * y0_fraction + src_ptr1[2] * y1_fraction) >> 8;
......@@ -1747,7 +1747,13 @@ void ARGBInterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr,
src_ptr += 8;
src_ptr1 += 8;
dst_ptr += 8;
} while (dst_ptr < end);
}
if (width & 1) {
dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8;
dst_ptr[1] = (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction) >> 8;
dst_ptr[2] = (src_ptr[2] * y0_fraction + src_ptr1[2] * y1_fraction) >> 8;
dst_ptr[3] = (src_ptr[3] * y0_fraction + src_ptr1[3] * y1_fraction) >> 8;
}
}
// Blend 2 rows into 1 for conversions such as I422ToI420.
......
......@@ -5012,6 +5012,222 @@ void ARGBInterpolateRow_SSE2(uint8* dst_argb, const uint8* src_argb,
);
}
// Bilinear image filtering.
// Same as ScaleARGBFilterRows_SSSE3 but without last pixel duplicated.
void ARGBInterpolateRow_Unaligned_SSSE3(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride, int dst_width,
int source_y_fraction) {
asm volatile (
"sub %1,%0 \n"
"shr %3 \n"
"cmp $0x0,%3 \n"
"je 100f \n"
"cmp $0x20,%3 \n"
"je 75f \n"
"cmp $0x40,%3 \n"
"je 50f \n"
"cmp $0x60,%3 \n"
"je 25f \n"
"movd %3,%%xmm0 \n"
"neg %3 \n"
"add $0x80,%3 \n"
"movd %3,%%xmm5 \n"
"punpcklbw %%xmm0,%%xmm5 \n"
"punpcklwd %%xmm5,%%xmm5 \n"
"pshufd $0x0,%%xmm5,%%xmm5 \n"
// General purpose row blend.
".p2align 4 \n"
"1: \n"
"movdqu (%1),%%xmm0 \n"
"movdqu (%1,%4,1),%%xmm2 \n"
"movdqu %%xmm0,%%xmm1 \n"
"punpcklbw %%xmm2,%%xmm0 \n"
"punpckhbw %%xmm2,%%xmm1 \n"
"pmaddubsw %%xmm5,%%xmm0 \n"
"pmaddubsw %%xmm5,%%xmm1 \n"
"psrlw $0x7,%%xmm0 \n"
"psrlw $0x7,%%xmm1 \n"
"packuswb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 1b \n"
"jmp 99f \n"
// Blend 25 / 75.
".p2align 4 \n"
"25: \n"
"movdqu (%1),%%xmm0 \n"
"movdqu (%1,%4,1),%%xmm1 \n"
"pavgb %%xmm1,%%xmm0 \n"
"pavgb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 25b \n"
"jmp 99f \n"
// Blend 50 / 50.
".p2align 4 \n"
"50: \n"
"movdqu (%1),%%xmm0 \n"
"movdqu (%1,%4,1),%%xmm1 \n"
"pavgb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 50b \n"
"jmp 99f \n"
// Blend 75 / 25.
".p2align 4 \n"
"75: \n"
"movdqu (%1),%%xmm1 \n"
"movdqu (%1,%4,1),%%xmm0 \n"
"pavgb %%xmm1,%%xmm0 \n"
"pavgb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 75b \n"
"jmp 99f \n"
// Blend 100 / 0 - Copy row unchanged.
".p2align 4 \n"
"100: \n"
"movdqu (%1),%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 100b \n"
"99: \n"
: "+r"(dst_argb), // %0
"+r"(src_argb), // %1
"+r"(dst_width), // %2
"+r"(source_y_fraction) // %3
: "r"(static_cast<intptr_t>(src_stride)) // %4
: "memory", "cc"
#if defined(__SSE2__)
, "xmm0", "xmm1", "xmm2", "xmm5"
#endif
);
}
// Bilinear image filtering.
// Same as ScaleARGBFilterRows_SSSE3 but without last pixel duplicated.
void ARGBInterpolateRow_Unaligned_SSE2(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride, int dst_width,
int source_y_fraction) {
asm volatile (
"sub %1,%0 \n"
"shr %3 \n"
"cmp $0x0,%3 \n"
"je 100f \n"
"cmp $0x20,%3 \n"
"je 75f \n"
"cmp $0x40,%3 \n"
"je 50f \n"
"cmp $0x60,%3 \n"
"je 25f \n"
"movd %3,%%xmm0 \n"
"neg %3 \n"
"add $0x80,%3 \n"
"movd %3,%%xmm5 \n"
"punpcklbw %%xmm0,%%xmm5 \n"
"punpcklwd %%xmm5,%%xmm5 \n"
"pshufd $0x0,%%xmm5,%%xmm5 \n"
"pxor %%xmm4,%%xmm4 \n"
// General purpose row blend.
".p2align 4 \n"
"1: \n"
"movdqu (%1),%%xmm0 \n"
"movdqu (%1,%4,1),%%xmm2 \n"
"movdqu %%xmm0,%%xmm1 \n"
"movdqu %%xmm2,%%xmm3 \n"
"punpcklbw %%xmm4,%%xmm2 \n"
"punpckhbw %%xmm4,%%xmm3 \n"
"punpcklbw %%xmm4,%%xmm0 \n"
"punpckhbw %%xmm4,%%xmm1 \n"
"psubw %%xmm0,%%xmm2 \n"
"psubw %%xmm1,%%xmm3 \n"
"paddw %%xmm2,%%xmm2 \n"
"paddw %%xmm3,%%xmm3 \n"
"pmulhw %%xmm5,%%xmm2 \n"
"pmulhw %%xmm5,%%xmm3 \n"
"paddw %%xmm2,%%xmm0 \n"
"paddw %%xmm3,%%xmm1 \n"
"packuswb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 1b \n"
"jmp 99f \n"
// Blend 25 / 75.
".p2align 4 \n"
"25: \n"
"movdqu (%1),%%xmm0 \n"
"movdqu (%1,%4,1),%%xmm1 \n"
"pavgb %%xmm1,%%xmm0 \n"
"pavgb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 25b \n"
"jmp 99f \n"
// Blend 50 / 50.
".p2align 4 \n"
"50: \n"
"movdqu (%1),%%xmm0 \n"
"movdqu (%1,%4,1),%%xmm1 \n"
"pavgb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 50b \n"
"jmp 99f \n"
// Blend 75 / 25.
".p2align 4 \n"
"75: \n"
"movdqu (%1),%%xmm1 \n"
"movdqu (%1,%4,1),%%xmm0 \n"
"pavgb %%xmm1,%%xmm0 \n"
"pavgb %%xmm1,%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 75b \n"
"jmp 99f \n"
// Blend 100 / 0 - Copy row unchanged.
".p2align 4 \n"
"100: \n"
"movdqu (%1),%%xmm0 \n"
"sub $0x4,%2 \n"
"movdqu %%xmm0,(%1,%0,1) \n"
"lea 0x10(%1),%1 \n"
"jg 100b \n"
"99: \n"
: "+r"(dst_argb), // %0
"+r"(src_argb), // %1
"+r"(dst_width), // %2
"+r"(source_y_fraction) // %3
: "r"(static_cast<intptr_t>(src_stride)) // %4
: "memory", "cc"
#if defined(__SSE2__)
, "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
#endif
);
}
void HalfRow_SSE2(const uint8* src_uv, int src_uv_stride,
uint8* dst_uv, int pix) {
asm volatile (
......
......@@ -2130,41 +2130,41 @@ void I422ToARGBRow_AVX2(const uint8* y_buf,
convertloop:
vmovq xmm0, qword ptr [esi] // U
vmovq xmm1, qword ptr [esi + edi] // V
lea esi, [esi + 8]
vpunpcklbw ymm0, ymm0, ymm1 // UV
lea esi, [esi + 8]
vpunpcklbw ymm0, ymm0, ymm1 // UV
vpermq ymm0, ymm0, 0xd8
vpunpcklwd ymm0, ymm0, ymm0 // UVUV
vpmaddubsw ymm2, ymm0, kUVToB_AVX // scale B UV
vpmaddubsw ymm1, ymm0, kUVToG_AVX // scale G UV
vpmaddubsw ymm0, ymm0, kUVToR_AVX // scale R UV
vpsubw ymm2, ymm2, kUVBiasB_AVX // unbias back to signed
vpsubw ymm1, ymm1, kUVBiasG_AVX
vpsubw ymm0, ymm0, kUVBiasR_AVX
// Step 2: Find Y contribution to 16 R,G,B values
vmovdqu xmm3, [eax] // NOLINT
lea eax, [eax + 16]
vpmaddubsw ymm2, ymm0, kUVToB_AVX // scale B UV
vpmaddubsw ymm1, ymm0, kUVToG_AVX // scale G UV
vpmaddubsw ymm0, ymm0, kUVToR_AVX // scale R UV
vpsubw ymm2, ymm2, kUVBiasB_AVX // unbias back to signed
vpsubw ymm1, ymm1, kUVBiasG_AVX
vpsubw ymm0, ymm0, kUVBiasR_AVX
// Step 2: Find Y contribution to 16 R,G,B values
vmovdqu xmm3, [eax] // NOLINT
lea eax, [eax + 16]
vpermq ymm3, ymm3, 0xd8
vpunpcklbw ymm3, ymm3, ymm4
vpsubsw ymm3, ymm3, kYSub16_AVX
vpmullw ymm3, ymm3, kYToRgb_AVX
vpaddsw ymm2, ymm2, ymm3 // B += Y
vpaddsw ymm1, ymm1, ymm3 // G += Y
vpaddsw ymm0, ymm0, ymm3 // R += Y
vpsraw ymm2, ymm2, 6
vpsraw ymm1, ymm1, 6
vpsraw ymm0, ymm0, 6
vpsubsw ymm3, ymm3, kYSub16_AVX
vpmullw ymm3, ymm3, kYToRgb_AVX
vpaddsw ymm2, ymm2, ymm3 // B += Y
vpaddsw ymm1, ymm1, ymm3 // G += Y
vpaddsw ymm0, ymm0, ymm3 // R += Y
vpsraw ymm2, ymm2, 6
vpsraw ymm1, ymm1, 6
vpsraw ymm0, ymm0, 6
vpackuswb ymm2, ymm2, ymm2 // B
vpackuswb ymm1, ymm1, ymm1 // G
vpackuswb ymm0, ymm0, ymm0 // R
vpackuswb ymm0, ymm0, ymm0 // R
// Step 3: Weave into ARGB
vpunpcklbw ymm2, ymm2, ymm1 // BG
vpermq ymm2, ymm2, 0xd8
vpunpcklbw ymm0, ymm0, ymm5 // RA
vpermq ymm0, ymm0, 0xd8
vpunpcklwd ymm1, ymm2, ymm0 // BGRA first 4 pixels
vpunpckhwd ymm2, ymm2, ymm0 // BGRA next 4 pixels
vpunpcklwd ymm1, ymm2, ymm0 // BGRA first 8 pixels
vpunpckhwd ymm2, ymm2, ymm0 // BGRA next 8 pixels
vmovdqu [edx], ymm1
vmovdqu [edx + 32], ymm2
lea edx, [edx + 64]
......@@ -6121,6 +6121,224 @@ void ARGBInterpolateRow_SSE2(uint8* dst_argb, const uint8* src_argb,
}
}
// Bilinear image filtering.
// Same as ScaleARGBFilterRows_SSSE3 but without last pixel duplicated.
__declspec(naked) __declspec(align(16))
void ARGBInterpolateRow_Unaligned_SSSE3(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride, int dst_width,
int source_y_fraction) {
__asm {
push esi
push edi
mov edi, [esp + 8 + 4] // dst_argb
mov esi, [esp + 8 + 8] // src_argb
mov edx, [esp + 8 + 12] // src_stride
mov ecx, [esp + 8 + 16] // dst_width
mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
sub edi, esi
shr eax, 1
// Dispatch to specialized filters if applicable.
cmp eax, 0
je xloop100 // 0 / 128. Blend 100 / 0.
cmp eax, 32
je xloop75 // 32 / 128 is 0.25. Blend 75 / 25.
cmp eax, 64
je xloop50 // 64 / 128 is 0.50. Blend 50 / 50.
cmp eax, 96
je xloop25 // 96 / 128 is 0.75. Blend 25 / 75.
movd xmm0, eax // high fraction 0..127
neg eax
add eax, 128
movd xmm5, eax // low fraction 128..1
punpcklbw xmm5, xmm0
punpcklwd xmm5, xmm5
pshufd xmm5, xmm5, 0
align 16
xloop:
movdqu xmm0, [esi]
movdqu xmm2, [esi + edx]
movdqu xmm1, xmm0
punpcklbw xmm0, xmm2
punpckhbw xmm1, xmm2
pmaddubsw xmm0, xmm5
pmaddubsw xmm1, xmm5
psrlw xmm0, 7
psrlw xmm1, 7
packuswb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop
jmp xloop99
// Blend 25 / 75.
align 16
xloop25:
movdqu xmm0, [esi]
movdqu xmm1, [esi + edx]
pavgb xmm0, xmm1
pavgb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop25
jmp xloop99
// Blend 50 / 50.
align 16
xloop50:
movdqu xmm0, [esi]
movdqu xmm1, [esi + edx]
pavgb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop50
jmp xloop99
// Blend 75 / 25.
align 16
xloop75:
movdqu xmm1, [esi]
movdqu xmm0, [esi + edx]
pavgb xmm0, xmm1
pavgb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop75
jmp xloop99
// Blend 100 / 0 - Copy row unchanged.
align 16
xloop100:
movdqu xmm0, [esi]
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop100
xloop99:
pop edi
pop esi
ret
}
}
// Bilinear image filtering.
// Same as ScaleARGBFilterRows_SSE2 but without last pixel duplicated.
__declspec(naked) __declspec(align(16))
void ARGBInterpolateRow_Unaligned_SSE2(uint8* dst_argb, const uint8* src_argb,
ptrdiff_t src_stride, int dst_width,
int source_y_fraction) {
__asm {
push esi
push edi
mov edi, [esp + 8 + 4] // dst_argb
mov esi, [esp + 8 + 8] // src_argb
mov edx, [esp + 8 + 12] // src_stride
mov ecx, [esp + 8 + 16] // dst_width
mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
sub edi, esi
// Dispatch to specialized filters if applicable.
cmp eax, 0
je xloop100 // 0 / 256. Blend 100 / 0.
cmp eax, 64
je xloop75 // 64 / 256 is 0.25. Blend 75 / 25.
cmp eax, 128
je xloop50 // 128 / 256 is 0.50. Blend 50 / 50.
cmp eax, 192
je xloop25 // 192 / 256 is 0.75. Blend 25 / 75.
movd xmm5, eax // xmm5 = y fraction
punpcklbw xmm5, xmm5
psrlw xmm5, 1
punpcklwd xmm5, xmm5
punpckldq xmm5, xmm5
punpcklqdq xmm5, xmm5
pxor xmm4, xmm4
align 16
xloop:
movdqu xmm0, [esi] // row0
movdqu xmm2, [esi + edx] // row1
movdqu xmm1, xmm0
movdqu xmm3, xmm2
punpcklbw xmm2, xmm4
punpckhbw xmm3, xmm4
punpcklbw xmm0, xmm4
punpckhbw xmm1, xmm4
psubw xmm2, xmm0 // row1 - row0
psubw xmm3, xmm1
paddw xmm2, xmm2 // 9 bits * 15 bits = 8.16
paddw xmm3, xmm3
pmulhw xmm2, xmm5 // scale diff
pmulhw xmm3, xmm5
paddw xmm0, xmm2 // sum rows
paddw xmm1, xmm3
packuswb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop
jmp xloop99
// Blend 25 / 75.
align 16
xloop25:
movdqu xmm0, [esi]
movdqu xmm1, [esi + edx]
pavgb xmm0, xmm1
pavgb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop25
jmp xloop99
// Blend 50 / 50.
align 16
xloop50:
movdqu xmm0, [esi]
movdqu xmm1, [esi + edx]
pavgb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop50
jmp xloop99
// Blend 75 / 25.
align 16
xloop75:
movdqu xmm1, [esi]
movdqu xmm0, [esi + edx]
pavgb xmm0, xmm1
pavgb xmm0, xmm1
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop75
jmp xloop99
// Blend 100 / 0 - Copy row unchanged.
align 16
xloop100:
movdqu xmm0, [esi]
sub ecx, 4
movdqu [esi + edi], xmm0
lea esi, [esi + 16]
jg xloop100
xloop99:
pop edi
pop esi
ret
}
}
__declspec(naked) __declspec(align(16))
void HalfRow_SSE2(const uint8* src_uv, int src_uv_stride,
uint8* dst_uv, int pix) {
......
......@@ -832,7 +832,9 @@ TEST_F(libyuvTest, ARGBInterpolate##TERP##N) { \
TESTTERP(ARGB, 4, 1, ARGB, 4, 1, \
benchmark_width_, TERP, 1, _Invert, -, 0) \
TESTTERP(ARGB, 4, 1, ARGB, 4, 1, \
benchmark_width_, TERP, 1, _Opt, +, 0)
benchmark_width_, TERP, 1, _Opt, +, 0) \
TESTTERP(ARGB, 4, 1, ARGB, 4, 1, \
benchmark_width_ - 1, TERP, 1, _Any_Invert, -, 0)
TESTINTERPOLATE(0)
TESTINTERPOLATE(64)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment