Commit 5b0f7e11 authored by fbarchard@google.com's avatar fbarchard@google.com

Neon port of ARGBAdd and ARGBMultiply

BUG=175
TEST=out\release\libyuv_unittest --gtest_filter=*Add*
Review URL: https://webrtc-codereview.appspot.com/1051005

git-svn-id: http://libyuv.googlecode.com/svn/trunk@545 16f28f9a-4ce2-e073-06de-1de4eb20be90
parent 29aa2145
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 544
Version: 545
License: BSD
License File: LICENSE
......
......@@ -976,6 +976,10 @@ void ARGBMultiplyRow_SSE2(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBMultiplyRow_Any_SSE2(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBMultiplyRow_NEON(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBMultiplyRow_Any_NEON(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
// ARGB add images.
void ARGBAddRow_C(const uint8* src_argb, const uint8* src_argb1,
......@@ -984,6 +988,10 @@ void ARGBAddRow_SSE2(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBAddRow_Any_SSE2(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBAddRow_NEON(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBAddRow_Any_NEON(const uint8* src_argb, const uint8* src_argb1,
uint8* dst_argb, int width);
void ARGBToRGB24Row_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix);
void ARGBToRAWRow_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix);
......
......@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 544
#define LIBYUV_VERSION 545
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
......@@ -433,8 +433,11 @@ int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0,
}
}
#elif defined(HAS_ARGBMULTIPLYROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 4)) {
ARGBMultiplyRow = ARGBMultiplyRow_NEON;
if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
ARGBMultiplyRow = ARGBMultiplyRow_Any_NEON;
if (IS_ALIGNED(width, 8)) {
ARGBMultiplyRow = ARGBMultiplyRow_NEON;
}
}
#endif
......@@ -477,8 +480,11 @@ int ARGBAdd(const uint8* src_argb0, int src_stride_argb0,
}
}
#elif defined(HAS_ARGBADDROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 4)) {
ARGBAddRow = ARGBAddRow_NEON;
if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
ARGBAddRow = ARGBAddRow_Any_NEON;
if (IS_ALIGNED(width, 8)) {
ARGBAddRow = ARGBAddRow_NEON;
}
}
#endif
......
......@@ -372,12 +372,12 @@ MERGEUVROW_ANY(MergeUVRow_Any_NEON, MergeUVRow_NEON, MergeUVRow_C, 15)
#endif
#undef MERGEUVROW_ANY
#define MATHROW_ANY(NAMEANY, ARGBMULT_SIMD, ARGBMULT_C, MASK) \
#define MATHROW_ANY(NAMEANY, ARGBMATH_SIMD, ARGBMATH_C, MASK) \
void NAMEANY(const uint8* src_argb0, const uint8* src_argb1, \
uint8* dst_argb, int width) { \
int n = width & ~MASK; \
ARGBMULT_SIMD(src_argb0, src_argb1, dst_argb, n); \
ARGBMULT_C(src_argb0 + n * 4, \
ARGBMATH_SIMD(src_argb0, src_argb1, dst_argb, n); \
ARGBMATH_C(src_argb0 + n * 4, \
src_argb1 + n * 4, \
dst_argb + n * 4, \
width & MASK); \
......@@ -390,6 +390,13 @@ MATHROW_ANY(ARGBMultiplyRow_Any_SSE2, ARGBMultiplyRow_SSE2, ARGBMultiplyRow_C,
#ifdef HAS_ARGBADDROW_SSE2
MATHROW_ANY(ARGBAddRow_Any_SSE2, ARGBAddRow_SSE2, ARGBAddRow_C, 3)
#endif
#ifdef HAS_ARGBMULTIPLYROW_NEON
MATHROW_ANY(ARGBMultiplyRow_Any_NEON, ARGBMultiplyRow_NEON, ARGBMultiplyRow_C,
7)
#endif
#ifdef HAS_ARGBADDROW_NEON
MATHROW_ANY(ARGBAddRow_Any_NEON, ARGBAddRow_NEON, ARGBAddRow_C, 7)
#endif
#undef MATHROW_ANY
#ifdef __cplusplus
......
......@@ -2502,6 +2502,60 @@ void ARGBColorMatrixRow_NEON(uint8* dst_argb, const int8* matrix_argb,
);
}
// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
asm volatile (
// 8 pixel loop.
".p2align 2 \n"
"1: \n"
"vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
"vld4.8 {d1, d3, d5, d7}, [%1]! \n" // load 8 more ARGB pixels.
"subs %3, %3, #8 \n" // 8 processed per loop.
"vmull.u8 q0, d0, d1 \n" // multiply B
"vmull.u8 q1, d2, d3 \n" // multiply G
"vmull.u8 q2, d4, d5 \n" // multiply R
"vmull.u8 q3, d6, d7 \n" // multiply A
"vqshrun.u16 d0, q0, #8 \n" // 16 bit to 8 bit B
"vqshrun.u16 d1, q1, #8 \n" // 16 bit to 8 bit G
"vqshrun.u16 d2, q2, #8 \n" // 16 bit to 8 bit R
"vqshrun.u16 d3, q3, #8 \n" // 16 bit to 8 bit A
"vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
"bgt 1b \n"
: "+r"(src_argb0), // %0
"+r"(src_argb1), // %1
"+r"(dst_argb), // %2
"+r"(width) // %3
:
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
// Add 2 rows of ARGB pixels together, 8 pixels at a time.
void ARGBAddRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
uint8* dst_argb, int width) {
asm volatile (
// 8 pixel loop.
".p2align 2 \n"
"1: \n"
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
"vld4.8 {d4, d5, d6, d7}, [%1]! \n" // load 8 more ARGB pixels.
"subs %3, %3, #8 \n" // 8 processed per loop.
"vqadd.u8 q0, q0, q2 \n" // add B, G
"vqadd.u8 q1, q1, q3 \n" // add R, A
"vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
"bgt 1b \n"
: "+r"(src_argb0), // %0
"+r"(src_argb1), // %1
"+r"(dst_argb), // %2
"+r"(width) // %3
:
: "cc", "memory", "q0", "q1"
);
}
#endif // __ARM_NEON__
#ifdef __cplusplus
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment