Commit f3fb7b69 authored by fbarchard@google.com's avatar fbarchard@google.com

Lint cleanup for Neon and Visual C inline.

BUG=none
TEST=none
Review URL: https://webrtc-codereview.appspot.com/464002

git-svn-id: http://libyuv.googlecode.com/svn/trunk@231 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 5b22506b
Name: libyuv Name: libyuv
URL: http://code.google.com/p/libyuv/ URL: http://code.google.com/p/libyuv/
Version: 230 Version: 231
License: BSD License: BSD
License File: LICENSE License File: LICENSE
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ #ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_ #define INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION 230 #define INCLUDE_LIBYUV_VERSION 231
#endif // INCLUDE_LIBYUV_VERSION_H_ #endif // INCLUDE_LIBYUV_VERSION_H_
...@@ -78,7 +78,7 @@ static void HalfRow_SSE2(const uint8* src_uv, int src_uv_stride, ...@@ -78,7 +78,7 @@ static void HalfRow_SSE2(const uint8* src_uv, int src_uv_stride,
movdqa xmm0, [eax] movdqa xmm0, [eax]
pavgb xmm0, [eax + edx] pavgb xmm0, [eax + edx]
sub ecx, 16 sub ecx, 16
movdqa [eax + edi], xmm0 // NOLINT movdqa [eax + edi], xmm0
lea eax, [eax + 16] lea eax, [eax + 16]
jg convertloop jg convertloop
pop edi pop edi
...@@ -475,7 +475,7 @@ static void SplitYUY2_SSE2(const uint8* src_yuy2, ...@@ -475,7 +475,7 @@ static void SplitYUY2_SSE2(const uint8* src_yuy2,
pand xmm2, xmm5 // even bytes are Y pand xmm2, xmm5 // even bytes are Y
pand xmm3, xmm5 pand xmm3, xmm5
packuswb xmm2, xmm3 packuswb xmm2, xmm3
movdqa [edx], xmm2 // NOLINT movdqa [edx], xmm2
lea edx, [edx + 16] lea edx, [edx + 16]
psrlw xmm0, 8 // YUYV -> UVUV psrlw xmm0, 8 // YUYV -> UVUV
psrlw xmm1, 8 psrlw xmm1, 8
...@@ -483,12 +483,12 @@ static void SplitYUY2_SSE2(const uint8* src_yuy2, ...@@ -483,12 +483,12 @@ static void SplitYUY2_SSE2(const uint8* src_yuy2,
movdqa xmm1, xmm0 movdqa xmm1, xmm0
pand xmm0, xmm5 // U pand xmm0, xmm5 // U
packuswb xmm0, xmm0 packuswb xmm0, xmm0
movq qword ptr [esi], xmm0 // NOLINT movq qword ptr [esi], xmm0
lea esi, [esi + 8] lea esi, [esi + 8]
psrlw xmm1, 8 // V psrlw xmm1, 8 // V
packuswb xmm1, xmm1 packuswb xmm1, xmm1
sub ecx, 16 sub ecx, 16
movq qword ptr [edi], xmm1 // NOLINT movq qword ptr [edi], xmm1
lea edi, [edi + 8] lea edi, [edi + 8]
jg convertloop jg convertloop
......
This diff is collapsed.
...@@ -48,7 +48,7 @@ static void ARGBToBayerRow_SSSE3(const uint8* src_argb, ...@@ -48,7 +48,7 @@ static void ARGBToBayerRow_SSSE3(const uint8* src_argb,
} }
} }
#elif (defined(__x86_64__) || defined(__i386__)) && !defined(YUV_DISABLE_ASM) #elif defined(__x86_64__) || defined(__i386__) && !defined(YUV_DISABLE_ASM)
#define HAS_ARGBTOBAYERROW_SSSE3 #define HAS_ARGBTOBAYERROW_SSSE3
static void ARGBToBayerRow_SSSE3(const uint8* src_argb, uint8* dst_bayer, static void ARGBToBayerRow_SSSE3(const uint8* src_argb, uint8* dst_bayer,
...@@ -141,18 +141,15 @@ int ARGBToBayer(const uint8* src_argb, int src_stride_argb, ...@@ -141,18 +141,15 @@ int ARGBToBayer(const uint8* src_argb, int src_stride_argb,
src_argb = src_argb + (height - 1) * src_stride_argb; src_argb = src_argb + (height - 1) * src_stride_argb;
src_stride_argb = -src_stride_argb; src_stride_argb = -src_stride_argb;
} }
void (*ARGBToBayerRow)(const uint8* src_argb, void (*ARGBToBayerRow)(const uint8* src_argb, uint8* dst_bayer,
uint8* dst_bayer, uint32 selector, int pix); uint32 selector, int pix) = ARGBToBayerRow_C;
#if defined(HAS_ARGBTOBAYERROW_SSSE3) #if defined(HAS_ARGBTOBAYERROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && if (TestCpuFlag(kCpuHasSSSE3) &&
IS_ALIGNED(width, 4) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) { IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
ARGBToBayerRow = ARGBToBayerRow_SSSE3; ARGBToBayerRow = ARGBToBayerRow_SSSE3;
} else
#endif
{
ARGBToBayerRow = ARGBToBayerRow_C;
} }
#endif
const int blue_index = 0; // Offsets for ARGB format const int blue_index = 0; // Offsets for ARGB format
const int green_index = 1; const int green_index = 1;
const int red_index = 2; const int red_index = 2;
...@@ -170,7 +167,7 @@ int ARGBToBayer(const uint8* src_argb, int src_stride_argb, ...@@ -170,7 +167,7 @@ int ARGBToBayer(const uint8* src_argb, int src_stride_argb,
return 0; return 0;
} }
#define AVG(a,b) (((a) + (b)) >> 1) #define AVG(a, b) (((a) + (b)) >> 1)
static void BayerRowBG(const uint8* src_bayer0, int src_stride_bayer, static void BayerRowBG(const uint8* src_bayer0, int src_stride_bayer,
uint8* dst_argb, int pix) { uint8* dst_argb, int pix) {
...@@ -369,9 +366,10 @@ int BayerToI420(const uint8* src_bayer, int src_stride_bayer, ...@@ -369,9 +366,10 @@ int BayerToI420(const uint8* src_bayer, int src_stride_bayer,
uint8* dst_argb, int pix); uint8* dst_argb, int pix);
void (*BayerRow1)(const uint8* src_bayer, int src_stride_bayer, void (*BayerRow1)(const uint8* src_bayer, int src_stride_bayer,
uint8* dst_argb, int pix); uint8* dst_argb, int pix);
void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix); void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
ARGBToYRow_C;
void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
uint8* dst_u, uint8* dst_v, int width); uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
SIMD_ALIGNED(uint8 row[kMaxStride * 2]); SIMD_ALIGNED(uint8 row[kMaxStride * 2]);
#if defined(HAS_ARGBTOYROW_SSSE3) #if defined(HAS_ARGBTOYROW_SSSE3)
...@@ -379,19 +377,13 @@ int BayerToI420(const uint8* src_bayer, int src_stride_bayer, ...@@ -379,19 +377,13 @@ int BayerToI420(const uint8* src_bayer, int src_stride_bayer,
IS_ALIGNED(width, 16) && IS_ALIGNED(width, 16) &&
IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) { IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
ARGBToYRow = ARGBToYRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3;
} else
#endif
{
ARGBToYRow = ARGBToYRow_C;
} }
#endif
#if defined(HAS_ARGBTOUVROW_SSSE3) #if defined(HAS_ARGBTOUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) { if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) {
ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToUVRow = ARGBToUVRow_SSSE3;
} else
#endif
{
ARGBToUVRow = ARGBToUVRow_C;
} }
#endif
switch (src_fourcc_bayer) { switch (src_fourcc_bayer) {
case FOURCC_BGGR: case FOURCC_BGGR:
...@@ -456,30 +448,24 @@ int I420ToBayer(const uint8* src_y, int src_stride_y, ...@@ -456,30 +448,24 @@ int I420ToBayer(const uint8* src_y, int src_stride_y,
const uint8* u_buf, const uint8* u_buf,
const uint8* v_buf, const uint8* v_buf,
uint8* rgb_buf, uint8* rgb_buf,
int width); int width) = I420ToARGBRow_C;
#if defined(HAS_I420TOARGBROW_NEON) #if defined(HAS_I420TOARGBROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) { if (TestCpuFlag(kCpuHasNEON)) {
I420ToARGBRow = I420ToARGBRow_NEON; I420ToARGBRow = I420ToARGBRow_NEON;
} else }
#elif defined(HAS_I420TOARGBROW_SSSE3) #elif defined(HAS_I420TOARGBROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) { if (TestCpuFlag(kCpuHasSSSE3)) {
I420ToARGBRow = I420ToARGBRow_SSSE3; I420ToARGBRow = I420ToARGBRow_SSSE3;
} else
#endif
{
I420ToARGBRow = I420ToARGBRow_C;
} }
#endif
SIMD_ALIGNED(uint8 row[kMaxStride]); SIMD_ALIGNED(uint8 row[kMaxStride]);
void (*ARGBToBayerRow)(const uint8* src_argb, void (*ARGBToBayerRow)(const uint8* src_argb, uint8* dst_bayer,
uint8* dst_bayer, uint32 selector, int pix); uint32 selector, int pix) = ARGBToBayerRow_C;
#if defined(HAS_ARGBTOBAYERROW_SSSE3) #if defined(HAS_ARGBTOBAYERROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4)) { if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4)) {
ARGBToBayerRow = ARGBToBayerRow_SSSE3; ARGBToBayerRow = ARGBToBayerRow_SSSE3;
} else
#endif
{
ARGBToBayerRow = ARGBToBayerRow_C;
} }
#endif
const int blue_index = 0; // Offsets for ARGB format const int blue_index = 0; // Offsets for ARGB format
const int green_index = 1; const int green_index = 1;
const int red_index = 2; const int red_index = 2;
......
This diff is collapsed.
...@@ -471,7 +471,7 @@ void ARGBBlendRow_C(const uint8* src_argb, uint8* dst_argb, int width) { ...@@ -471,7 +471,7 @@ void ARGBBlendRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
dst_argb[3] = 255u; dst_argb[3] = 255u;
} else { } else {
*reinterpret_cast<uint32*>(dst_argb) = *reinterpret_cast<uint32*>(dst_argb) =
*reinterpret_cast<uint32*>(src_argb); *reinterpret_cast<const uint32*>(src_argb);
} }
} }
a = src_argb[4 + 3]; a = src_argb[4 + 3];
...@@ -489,7 +489,7 @@ void ARGBBlendRow_C(const uint8* src_argb, uint8* dst_argb, int width) { ...@@ -489,7 +489,7 @@ void ARGBBlendRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
dst_argb[4 + 3] = 255u; dst_argb[4 + 3] = 255u;
} else { } else {
*reinterpret_cast<uint32*>(dst_argb + 4) = *reinterpret_cast<uint32*>(dst_argb + 4) =
*reinterpret_cast<uint32*>(src_argb + 4); *reinterpret_cast<const uint32*>(src_argb + 4);
} }
} }
src_argb += 8; src_argb += 8;
...@@ -512,7 +512,7 @@ void ARGBBlendRow_C(const uint8* src_argb, uint8* dst_argb, int width) { ...@@ -512,7 +512,7 @@ void ARGBBlendRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
dst_argb[3] = 255u; dst_argb[3] = 255u;
} else { } else {
*reinterpret_cast<uint32*>(dst_argb) = *reinterpret_cast<uint32*>(dst_argb) =
*reinterpret_cast<uint32*>(src_argb); *reinterpret_cast<const uint32*>(src_argb);
} }
} }
} }
......
...@@ -105,7 +105,7 @@ CONST uvec8 kShuffleMaskARGBToRGB24 = { ...@@ -105,7 +105,7 @@ CONST uvec8 kShuffleMaskARGBToRGB24 = {
// Shuffle table for converting ARGB to RAW. // Shuffle table for converting ARGB to RAW.
CONST uvec8 kShuffleMaskARGBToRAW = { CONST uvec8 kShuffleMaskARGBToRAW = {
2u, 1u,0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
}; };
void I400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) { void I400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
...@@ -1728,6 +1728,7 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, ...@@ -1728,6 +1728,7 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
); );
} }
void YUY2ToYRow_Unaligned_SSE2(const uint8* src_yuy2, void YUY2ToYRow_Unaligned_SSE2(const uint8* src_yuy2,
uint8* dst_y, int pix) { uint8* dst_y, int pix) {
asm volatile ( asm volatile (
......
...@@ -96,7 +96,7 @@ static const uvec8 kShuffleMaskARGBToRGB24 = { ...@@ -96,7 +96,7 @@ static const uvec8 kShuffleMaskARGBToRGB24 = {
// Shuffle table for converting ARGB to RAW. // Shuffle table for converting ARGB to RAW.
static const uvec8 kShuffleMaskARGBToRAW = { static const uvec8 kShuffleMaskARGBToRAW = {
2u, 1u,0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
}; };
__declspec(naked) __declspec(naked)
...@@ -1252,7 +1252,7 @@ static const vec16 kUVBiasR = { BR, BR, BR, BR, BR, BR, BR, BR }; ...@@ -1252,7 +1252,7 @@ static const vec16 kUVBiasR = { BR, BR, BR, BR, BR, BR, BR, BR };
__asm psubw xmm1, kUVBiasG \ __asm psubw xmm1, kUVBiasG \
__asm psubw xmm2, kUVBiasR \ __asm psubw xmm2, kUVBiasR \
/* Step 2: Find Y contribution to 8 R,G,B values */ \ /* Step 2: Find Y contribution to 8 R,G,B values */ \
__asm movq xmm3, qword ptr [eax] \ __asm movq xmm3, qword ptr [eax] /* NOLINT */ \
__asm lea eax, [eax + 8] \ __asm lea eax, [eax + 8] \
__asm punpcklbw xmm3, xmm4 \ __asm punpcklbw xmm3, xmm4 \
__asm psubsw xmm3, kYSub16 \ __asm psubsw xmm3, kYSub16 \
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment