Commit da0c29da authored by Frank Barchard's avatar Frank Barchard

Add MSA optimized ARGBToRGB565Row_MSA, ARGBToARGB1555Row_MSA,…

Add MSA optimized ARGBToRGB565Row_MSA, ARGBToARGB1555Row_MSA, ARGBToARGB4444Row_MSA, ARGBToUV444Row_MSA functions

R=fbarchard@google.com
BUG=libyuv:634

Performance Gain (vs C vectorized)
ARGBToRGB565Row_MSA       - ~1.6x
ARGBToRGB565Row_Any_MSA   - ~1.6x
ARGBToARGB1555Row_MSA     - ~1.3x
ARGBToARGB1555Row_Any_MSA - ~1.3x
ARGBToARGB4444Row_MSA     - ~3.8x
ARGBToARGB4444Row_Any_MSA - ~3.8x
ARGBToUV444Row_MSA        - ~2.4x
ARGBToUV444Row_Any_MSA    - ~2.4x

Performance Gain (vs C non-vectorized)
ARGBToRGB565Row_MSA       - ~2.8x
ARGBToRGB565Row_Any_MSA   - ~2.8x
ARGBToARGB1555Row_MSA     - ~2.2x
ARGBToARGB1555Row_Any_MSA - ~2.2x
ARGBToARGB4444Row_MSA     - ~6.8x
ARGBToARGB4444Row_Any_MSA - ~6.6x
ARGBToUV444Row_MSA        - ~6.7x
ARGBToUV444Row_Any_MSA    - ~6.7x

Review URL: https://codereview.chromium.org/2520003004 .
parent b1504a8e
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 1634
Version: 1635
License: BSD
License File: LICENSE
......
......@@ -386,6 +386,10 @@ extern "C" {
#define HAS_I422TORGB24ROW_MSA
#define HAS_ARGBTORGB24ROW_MSA
#define HAS_ARGBTORAWROW_MSA
#define HAS_ARGBTORGB565ROW_MSA
#define HAS_ARGBTOARGB1555ROW_MSA
#define HAS_ARGBTOARGB4444ROW_MSA
#define HAS_ARGBTOUV444ROW_MSA
#endif
#if defined(_MSC_VER) && !defined(__CLR_VER) && !defined(__clang__)
......@@ -714,6 +718,10 @@ void ARGBToUVRow_NEON(const uint8* src_argb,
uint8* dst_u,
uint8* dst_v,
int width);
void ARGBToUV444Row_MSA(const uint8* src_argb,
uint8* dst_u,
uint8* dst_v,
int width);
void ARGBToUVRow_MSA(const uint8* src_argb,
int src_stride_argb,
uint8* dst_u,
......@@ -884,6 +892,10 @@ void ARGBToUVRow_Any_NEON(const uint8* src_argb,
uint8* dst_u,
uint8* dst_v,
int width);
void ARGBToUV444Row_Any_MSA(const uint8* src_argb,
uint8* dst_u,
uint8* dst_v,
int width);
void ARGBToUVRow_Any_MSA(const uint8* src_argb,
int src_stride_argb,
uint8* dst_u,
......@@ -1308,6 +1320,9 @@ void ARGBToRGB565DitherRow_NEON(const uint8* src_argb,
int width);
void ARGBToRGB24Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToRAWRow_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToRGB565Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToARGB1555Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToARGB4444Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToRGBARow_C(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToRGB24Row_C(const uint8* src_argb, uint8* dst_rgb, int width);
......@@ -1898,6 +1913,13 @@ void ARGBToRGB565DitherRow_Any_NEON(const uint8* src_argb,
int width);
void ARGBToRGB24Row_Any_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToRAWRow_Any_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToRGB565Row_Any_MSA(const uint8* src_argb, uint8* dst_rgb, int width);
void ARGBToARGB1555Row_Any_MSA(const uint8* src_argb,
uint8* dst_rgb,
int width);
void ARGBToARGB4444Row_Any_MSA(const uint8* src_argb,
uint8* dst_rgb,
int width);
void I444ToARGBRow_Any_NEON(const uint8* src_y,
const uint8* src_u,
......
......@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1634
#define LIBYUV_VERSION 1635
#endif // INCLUDE_LIBYUV_VERSION_H_
......@@ -68,6 +68,14 @@ int ARGBToI444(const uint8* src_argb,
}
}
#endif
#if defined(HAS_ARGBTOUV444ROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ARGBToUV444Row = ARGBToUV444Row_Any_MSA;
if (IS_ALIGNED(width, 16)) {
ARGBToUV444Row = ARGBToUV444Row_MSA;
}
}
#endif
#if defined(HAS_ARGBTOYROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
ARGBToYRow = ARGBToYRow_Any_SSSE3;
......@@ -1029,6 +1037,14 @@ int ARGBToRGB565(const uint8* src_argb,
}
}
#endif
#if defined(HAS_ARGBTORGB565ROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ARGBToRGB565Row = ARGBToRGB565Row_Any_MSA;
if (IS_ALIGNED(width, 8)) {
ARGBToRGB565Row = ARGBToRGB565Row_MSA;
}
}
#endif
for (y = 0; y < height; ++y) {
ARGBToRGB565Row(src_argb, dst_rgb565, width);
......@@ -1087,6 +1103,14 @@ int ARGBToARGB1555(const uint8* src_argb,
}
}
#endif
#if defined(HAS_ARGBTOARGB1555ROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ARGBToARGB1555Row = ARGBToARGB1555Row_Any_MSA;
if (IS_ALIGNED(width, 8)) {
ARGBToARGB1555Row = ARGBToARGB1555Row_MSA;
}
}
#endif
for (y = 0; y < height; ++y) {
ARGBToARGB1555Row(src_argb, dst_argb1555, width);
......@@ -1145,6 +1169,14 @@ int ARGBToARGB4444(const uint8* src_argb,
}
}
#endif
#if defined(HAS_ARGBTOARGB4444ROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ARGBToARGB4444Row = ARGBToARGB4444Row_Any_MSA;
if (IS_ALIGNED(width, 8)) {
ARGBToARGB4444Row = ARGBToARGB4444Row_MSA;
}
}
#endif
for (y = 0; y < height; ++y) {
ARGBToARGB4444Row(src_argb, dst_argb4444, width);
......
......@@ -383,6 +383,9 @@ ANY11(I400ToARGBRow_Any_NEON, I400ToARGBRow_NEON, 0, 1, 4, 7)
#if defined(HAS_ARGBTORGB24ROW_MSA)
ANY11(ARGBToRGB24Row_Any_MSA, ARGBToRGB24Row_MSA, 0, 4, 3, 15)
ANY11(ARGBToRAWRow_Any_MSA, ARGBToRAWRow_MSA, 0, 4, 3, 15)
ANY11(ARGBToRGB565Row_Any_MSA, ARGBToRGB565Row_MSA, 0, 4, 2, 7)
ANY11(ARGBToARGB1555Row_Any_MSA, ARGBToARGB1555Row_MSA, 0, 4, 2, 7)
ANY11(ARGBToARGB4444Row_Any_MSA, ARGBToARGB4444Row_MSA, 0, 4, 2, 7)
#endif
#if defined(HAS_RAWTORGB24ROW_NEON)
ANY11(RAWToRGB24Row_Any_NEON, RAWToRGB24Row_NEON, 0, 3, 3, 7)
......@@ -786,6 +789,7 @@ ANY12(YUY2ToUV422Row_Any_NEON, YUY2ToUV422Row_NEON, 1, 4, 1, 15)
ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15)
#endif
#ifdef HAS_YUY2TOUV422ROW_MSA
ANY12(ARGBToUV444Row_Any_MSA, ARGBToUV444Row_MSA, 0, 4, 0, 15)
ANY12(YUY2ToUV422Row_Any_MSA, YUY2ToUV422Row_MSA, 1, 4, 1, 31)
ANY12(UYVYToUV422Row_Any_MSA, UYVYToUV422Row_MSA, 1, 4, 1, 31)
#endif
......
......@@ -788,6 +788,175 @@ void ARGBToRAWRow_MSA(const uint8* src_argb, uint8* dst_rgb, int width) {
}
}
void ARGBToRGB565Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width) {
int x;
v16u8 src0, src1, dst0;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 zero = {0};
for (x = 0; x < width; x += 8) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
vec0 = (v16u8)__msa_srai_b((v16i8)src0, 3);
vec1 = (v16u8)__msa_slli_b((v16i8)src0, 3);
vec2 = (v16u8)__msa_srai_b((v16i8)src0, 5);
vec4 = (v16u8)__msa_srai_b((v16i8)src1, 3);
vec5 = (v16u8)__msa_slli_b((v16i8)src1, 3);
vec6 = (v16u8)__msa_srai_b((v16i8)src1, 5);
vec1 = (v16u8)__msa_sldi_b(zero, (v16i8)vec1, 1);
vec2 = (v16u8)__msa_sldi_b(zero, (v16i8)vec2, 1);
vec5 = (v16u8)__msa_sldi_b(zero, (v16i8)vec5, 1);
vec6 = (v16u8)__msa_sldi_b(zero, (v16i8)vec6, 1);
vec3 = (v16u8)__msa_sldi_b(zero, (v16i8)src0, 2);
vec7 = (v16u8)__msa_sldi_b(zero, (v16i8)src1, 2);
vec0 = __msa_binsli_b(vec0, vec1, 2);
vec1 = __msa_binsli_b(vec2, vec3, 4);
vec4 = __msa_binsli_b(vec4, vec5, 2);
vec5 = __msa_binsli_b(vec6, vec7, 4);
vec0 = (v16u8)__msa_ilvev_b((v16i8)vec1, (v16i8)vec0);
vec4 = (v16u8)__msa_ilvev_b((v16i8)vec5, (v16i8)vec4);
dst0 = (v16u8)__msa_pckev_h((v8i16)vec4, (v8i16)vec0);
ST_UB(dst0, dst_rgb);
src_argb += 32;
dst_rgb += 16;
}
}
void ARGBToARGB1555Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width) {
int x;
v16u8 src0, src1, dst0;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
v16i8 zero = {0};
for (x = 0; x < width; x += 8) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
vec0 = (v16u8)__msa_srai_b((v16i8)src0, 3);
vec1 = (v16u8)__msa_slli_b((v16i8)src0, 2);
vec2 = (v16u8)__msa_srai_b((v16i8)vec0, 3);
vec1 = (v16u8)__msa_sldi_b(zero, (v16i8)vec1, 1);
vec2 = (v16u8)__msa_sldi_b(zero, (v16i8)vec2, 1);
vec3 = (v16u8)__msa_srai_b((v16i8)src0, 1);
vec5 = (v16u8)__msa_srai_b((v16i8)src1, 3);
vec6 = (v16u8)__msa_slli_b((v16i8)src1, 2);
vec7 = (v16u8)__msa_srai_b((v16i8)vec5, 3);
vec6 = (v16u8)__msa_sldi_b(zero, (v16i8)vec6, 1);
vec7 = (v16u8)__msa_sldi_b(zero, (v16i8)vec7, 1);
vec8 = (v16u8)__msa_srai_b((v16i8)src1, 1);
vec3 = (v16u8)__msa_sldi_b(zero, (v16i8)vec3, 2);
vec8 = (v16u8)__msa_sldi_b(zero, (v16i8)vec8, 2);
vec4 = (v16u8)__msa_sldi_b(zero, (v16i8)src0, 3);
vec9 = (v16u8)__msa_sldi_b(zero, (v16i8)src1, 3);
vec0 = __msa_binsli_b(vec0, vec1, 2);
vec5 = __msa_binsli_b(vec5, vec6, 2);
vec1 = __msa_binsli_b(vec2, vec3, 5);
vec6 = __msa_binsli_b(vec7, vec8, 5);
vec1 = __msa_binsli_b(vec1, vec4, 0);
vec6 = __msa_binsli_b(vec6, vec9, 0);
vec0 = (v16u8)__msa_ilvev_b((v16i8)vec1, (v16i8)vec0);
vec1 = (v16u8)__msa_ilvev_b((v16i8)vec6, (v16i8)vec5);
dst0 = (v16u8)__msa_pckev_h((v8i16)vec1, (v8i16)vec0);
ST_UB(dst0, dst_rgb);
src_argb += 32;
dst_rgb += 16;
}
}
void ARGBToARGB4444Row_MSA(const uint8* src_argb, uint8* dst_rgb, int width) {
int x;
v16u8 src0, src1;
v16u8 vec0, vec1;
v16u8 dst0;
v16i8 zero = {0};
for (x = 0; x < width; x += 8) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
vec0 = (v16u8)__msa_srai_b((v16i8)src0, 4);
vec1 = (v16u8)__msa_srai_b((v16i8)src1, 4);
src0 = (v16u8)__msa_sldi_b(zero, (v16i8)src0, 1);
src1 = (v16u8)__msa_sldi_b(zero, (v16i8)src1, 1);
vec0 = __msa_binsli_b(vec0, src0, 3);
vec1 = __msa_binsli_b(vec1, src1, 3);
dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
ST_UB(dst0, dst_rgb);
src_argb += 32;
dst_rgb += 16;
}
}
void ARGBToUV444Row_MSA(const uint8* src_argb,
uint8* dst_u,
uint8* dst_v,
int32 width) {
int32 x;
v16u8 src0, src1, src2, src3, reg0, reg1, reg2, reg3, dst0, dst1;
v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8u16 vec8, vec9, vec10, vec11;
v8u16 const_112 = (v8u16)__msa_ldi_h(112);
v8u16 const_74 = (v8u16)__msa_ldi_h(74);
v8u16 const_38 = (v8u16)__msa_ldi_h(38);
v8u16 const_94 = (v8u16)__msa_ldi_h(94);
v8u16 const_18 = (v8u16)__msa_ldi_h(18);
v8u16 const_32896 = (v8u16)__msa_fill_h(32896);
v16i8 zero = {0};
for (x = width; x > 0; x -= 16) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_argb, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_argb, 48);
reg0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
reg1 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
reg2 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
reg3 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
src0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
src1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
src2 = (v16u8)__msa_pckod_b((v16i8)reg1, (v16i8)reg0);
vec0 = (v8u16)__msa_ilvr_b(zero, (v16i8)src0);
vec1 = (v8u16)__msa_ilvl_b(zero, (v16i8)src0);
vec2 = (v8u16)__msa_ilvr_b(zero, (v16i8)src1);
vec3 = (v8u16)__msa_ilvl_b(zero, (v16i8)src1);
vec4 = (v8u16)__msa_ilvr_b(zero, (v16i8)src2);
vec5 = (v8u16)__msa_ilvl_b(zero, (v16i8)src2);
vec10 = vec0 * const_18;
vec11 = vec1 * const_18;
vec8 = vec2 * const_94;
vec9 = vec3 * const_94;
vec6 = vec4 * const_112;
vec7 = vec5 * const_112;
vec0 *= const_112;
vec1 *= const_112;
vec2 *= const_74;
vec3 *= const_74;
vec4 *= const_38;
vec5 *= const_38;
vec8 += vec10;
vec9 += vec11;
vec6 += const_32896;
vec7 += const_32896;
vec0 += const_32896;
vec1 += const_32896;
vec2 += vec4;
vec3 += vec5;
vec0 -= vec2;
vec1 -= vec3;
vec6 -= vec8;
vec7 -= vec9;
vec0 = (v8u16)__msa_srai_h((v8i16)vec0, 8);
vec1 = (v8u16)__msa_srai_h((v8i16)vec1, 8);
vec6 = (v8u16)__msa_srai_h((v8i16)vec6, 8);
vec7 = (v8u16)__msa_srai_h((v8i16)vec7, 8);
dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
dst1 = (v16u8)__msa_pckev_b((v16i8)vec7, (v16i8)vec6);
ST_UB(dst0, dst_u);
ST_UB(dst1, dst_v);
src_argb += 64;
dst_u += 16;
dst_v += 16;
}
}
void ARGB4444ToARGBRow_MSA(const uint8* src_argb4444,
uint8* dst_argb,
int width) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment