Commit 319f0477 authored by fbarchard@google.com's avatar fbarchard@google.com

Compute chroma using negative coefficients to extend range of U contribution on B to 2

BUG=324
TESTED=TestI420
R=tpsiaki@google.com

Review URL: https://webrtc-codereview.appspot.com/41569004

git-svn-id: http://libyuv.googlecode.com/svn/trunk@1238 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent ddf11462
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 1237
Version: 1238
License: BSD
License File: LICENSE
......
......@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1237
#define LIBYUV_VERSION 1238
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
......@@ -961,30 +961,41 @@ void I400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width) {
}
// C reference code that mimics the YUV assembly.
#define YG 19071 /* round(1.164 * 64 * 256) */
#define YGB 1192 /* round(1.164 * 64 * 16) */
#define UB 127 /* min(127, round(2.018 * 64)) */
#define UG -25 /* round(-0.391 * 64) */
#define UB -128 /* -min(128, round(2.018 * 64)) */
#define UG 25 /* -round(-0.391 * 64) */
#define UR 0
#define VB 0
#define VG -52 /* round(-0.813 * 64) */
#define VR 102 /* round(1.596 * 64) */
#define VG 52 /* -round(-0.813 * 64) */
#define VR -102 /* -round(1.596 * 64) */
// Bias
#define BB (UB * 128 + VB * 128 + YGB)
#define BG (UG * 128 + VG * 128 + YGB)
#define BR (UR * 128 + VR * 128 + YGB)
#define BB (UB * 128 + VB * 128 - YGB)
#define BG (UG * 128 + VG * 128 - YGB)
#define BR (UR * 128 + VR * 128 - YGB)
static __inline void YuvPixel(uint8 y, uint8 u, uint8 v,
uint8* b, uint8* g, uint8* r) {
uint32 y1 = (uint32)(y * 0x0101 * YG) >> 16;
*b = Clamp((int32)(u * UB + v * VB + y1 - BB) >> 6);
*g = Clamp((int32)(u * UG + v * VG + y1 - BG) >> 6);
*r = Clamp((int32)(u * UR + v * VR + y1 - BR) >> 6);
}
*b = Clamp((int32)(y1 - (v * VB + u * UB) + BB) >> 6);
*g = Clamp((int32)(y1 - (v * VG + u * UG) + BG) >> 6);
*r = Clamp((int32)(y1 - (v * VR + u * UR) + BR) >> 6);
}
#undef YG
#undef YGB
#undef UB
#undef UG
#undef UR
#undef VB
#undef VG
#undef VR
#undef BB
#undef BG
#undef BR
#if !defined(LIBYUV_DISABLE_NEON) && \
(defined(__ARM_NEON__) || defined(__aarch64__) || defined(LIBYUV_NEON))
......
......@@ -24,21 +24,22 @@ extern "C" {
#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \
(defined(_M_IX86) || defined(_M_X64))
// C reference code that mimics the YUV assembly.
#define YG 19071 /* round(1.164 * 64 * 256) */
#define YGB 1192 /* round(1.164 * 64 * 16) */
#define UB 127 /* min(63,(int8)round(2.018 * 64)) */
#define UG -25 /* (int8)round(-0.391 * 64 - 0.5) */
#define UB -128 /* -min(128, round(2.018 * 64)) */
#define UG 25 /* -round(-0.391 * 64) */
#define UR 0
#define VB 0
#define VG -52 /* (int8)round(-0.813 * 64 - 0.5) */
#define VR 102 /* (int8)round(1.596 * 64 + 0.5) */
#define VG 52 /* -round(-0.813 * 64) */
#define VR -102 /* -round(1.596 * 64) */
// Bias
#define BB (UB * 128 + VB * 128 + YGB)
#define BG (UG * 128 + VG * 128 + YGB)
#define BR (UR * 128 + VR * 128 + YGB)
#define BB (UB * 128 + VB * 128 - YGB)
#define BG (UG * 128 + VG * 128 - YGB)
#define BR (UR * 128 + VR * 128 - YGB)
static const vec8 kUVToB = {
UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB, UB, VB
......@@ -1514,9 +1515,12 @@ static const lvec16 kUVBiasR_AVX = {
__asm vpmaddubsw ymm2, ymm0, kUVToR_AVX /* scale R UV */ \
__asm vpmaddubsw ymm1, ymm0, kUVToG_AVX /* scale G UV */ \
__asm vpmaddubsw ymm0, ymm0, kUVToB_AVX /* scale B UV */ \
__asm vpsubw ymm2, ymm2, kUVBiasR_AVX /* unbias back to signed */ \
__asm vpsubw ymm1, ymm1, kUVBiasG_AVX \
__asm vpsubw ymm0, ymm0, kUVBiasB_AVX \
__asm vmovdqu ymm3, kUVBiasR_AVX \
__asm vpsubw ymm2, ymm3, ymm2 \
__asm vmovdqu ymm3, kUVBiasG_AVX \
__asm vpsubw ymm1, ymm3, ymm1 \
__asm vmovdqu ymm3, kUVBiasB_AVX \
__asm vpsubw ymm0, ymm3, ymm0 \
/* Step 2: Find Y contribution to 16 R,G,B values */ \
__asm vmovdqu xmm3, [eax] /* NOLINT */ \
__asm lea eax, [eax + 16] \
......@@ -1761,12 +1765,16 @@ void I422ToABGRRow_AVX2(const uint8* y_buf,
/* Step 1: Find 4 UV contributions to 8 R,G,B values */ \
__asm movdqa xmm1, xmm0 \
__asm movdqa xmm2, xmm0 \
__asm pmaddubsw xmm0, kUVToB /* scale B UV */ \
__asm pmaddubsw xmm1, kUVToG /* scale G UV */ \
__asm pmaddubsw xmm2, kUVToR /* scale R UV */ \
__asm psubw xmm0, kUVBiasB /* unbias back to signed */ \
__asm psubw xmm1, kUVBiasG \
__asm psubw xmm2, kUVBiasR \
__asm movdqa xmm3, xmm0 \
__asm movdqa xmm0, kUVBiasB /* unbias back to signed */ \
__asm pmaddubsw xmm1, kUVToB /* scale B UV */ \
__asm psubw xmm0, xmm1 \
__asm movdqa xmm1, kUVBiasG \
__asm pmaddubsw xmm2, kUVToG /* scale G UV */ \
__asm psubw xmm1, xmm2 \
__asm movdqa xmm2, kUVBiasR \
__asm pmaddubsw xmm3, kUVToR /* scale R UV */ \
__asm psubw xmm2, xmm3 \
/* Step 2: Find Y contribution to 8 R,G,B values */ \
__asm movq xmm3, qword ptr [eax] /* NOLINT */ \
__asm lea eax, [eax + 8] \
......@@ -1788,12 +1796,16 @@ void I422ToABGRRow_AVX2(const uint8* y_buf,
/* Step 1: Find 4 UV contributions to 8 R,G,B values */ \
__asm movdqa xmm1, xmm0 \
__asm movdqa xmm2, xmm0 \
__asm pmaddubsw xmm0, kVUToB /* scale B UV */ \
__asm pmaddubsw xmm1, kVUToG /* scale G UV */ \
__asm pmaddubsw xmm2, kVUToR /* scale R UV */ \
__asm psubw xmm0, kUVBiasB /* unbias back to signed */ \
__asm psubw xmm1, kUVBiasG \
__asm psubw xmm2, kUVBiasR \
__asm movdqa xmm3, xmm0 \
__asm movdqa xmm0, kUVBiasB /* unbias back to signed */ \
__asm pmaddubsw xmm1, kVUToB /* scale B UV */ \
__asm psubw xmm0, xmm1 \
__asm movdqa xmm1, kUVBiasG \
__asm pmaddubsw xmm2, kVUToG /* scale G UV */ \
__asm psubw xmm1, xmm2 \
__asm movdqa xmm2, kUVBiasR \
__asm pmaddubsw xmm3, kVUToR /* scale R UV */ \
__asm psubw xmm2, xmm3 \
/* Step 2: Find Y contribution to 8 R,G,B values */ \
__asm movq xmm3, qword ptr [eax] /* NOLINT */ \
__asm lea eax, [eax + 8] \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment