Commit 95502242 authored by Vitaly Tuzov's avatar Vitaly Tuzov

meanStdDev() implementation updated to use wide universal intrinsics

parent e51d7cb3
...@@ -180,66 +180,71 @@ struct SumSqr_SIMD ...@@ -180,66 +180,71 @@ struct SumSqr_SIMD
} }
}; };
template <typename T> #if CV_SIMD
inline void addSqrChannels(T * sum, T * sqsum, T * buf, int cn)
{
for (int i = 0; i < 4; ++i)
{
sum[i % cn] += buf[i];
sqsum[i % cn] += buf[4 + i];
}
}
#if CV_SSE2
template <> template <>
struct SumSqr_SIMD<uchar, int, int> struct SumSqr_SIMD<uchar, int, int>
{ {
int operator () (const uchar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const int operator () (const uchar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const
{ {
if (mask || (cn != 1 && cn != 2) || !USE_SSE2) if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0; return 0;
len *= cn;
int x = 0; int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero; v_int32 v_sum = vx_setzero_s32();
const int len_16 = len & ~15; v_int32 v_sqsum = vx_setzero_s32();
for ( ; x <= len_16 - 16; ) const int len0 = len & -v_uint8::nlanes;
while(x < len0)
{ {
const int len_tmp = min(x + 2048, len_16); const int len_tmp = min(x + 256*v_uint16::nlanes, len0);
__m128i v_sum_tmp = v_zero; v_uint16 v_sum16 = vx_setzero_u16();
for ( ; x <= len_tmp - 16; x += 16) for ( ; x < len_tmp; x += v_uint8::nlanes)
{ {
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); v_uint16 v_src0 = vx_load_expand(src0 + x);
__m128i v_half_0 = _mm_unpacklo_epi8(v_src, v_zero); v_uint16 v_src1 = vx_load_expand(src0 + x + v_uint16::nlanes);
__m128i v_half_1 = _mm_unpackhi_epi8(v_src, v_zero); v_sum16 += v_src0 + v_src1;
v_sum_tmp = _mm_add_epi16(v_sum_tmp, _mm_add_epi16(v_half_0, v_half_1)); v_int16 v_tmp0, v_tmp1;
__m128i v_half_2 = _mm_unpacklo_epi16(v_half_0, v_half_1); v_zip(v_reinterpret_as_s16(v_src0), v_reinterpret_as_s16(v_src1), v_tmp0, v_tmp1);
__m128i v_half_3 = _mm_unpackhi_epi16(v_half_0, v_half_1); v_sqsum += v_dotprod(v_tmp0, v_tmp0) + v_dotprod(v_tmp1, v_tmp1);
v_sqsum = _mm_add_epi32(v_sqsum, _mm_madd_epi16(v_half_2, v_half_2));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_madd_epi16(v_half_3, v_half_3));
} }
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_sum_tmp, v_zero)); v_uint32 v_half0, v_half1;
v_sum = _mm_add_epi32(v_sum, _mm_unpackhi_epi16(v_sum_tmp, v_zero)); v_expand(v_sum16, v_half0, v_half1);
v_sum += v_reinterpret_as_s32(v_half0 + v_half1);
} }
if (x <= len - v_uint16::nlanes)
for ( ; x <= len - 8; x += 8)
{ {
__m128i v_src = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i const *)(src0 + x)), v_zero); v_uint16 v_src = vx_load_expand(src0 + x);
__m128i v_half_0 = _mm_unpackhi_epi64(v_src, v_src); v_uint16 v_half = v_combine_high(v_src, v_src);
__m128i v_sum_tmp = _mm_add_epi16(v_src, v_half_0);
__m128i v_half_1 = _mm_unpacklo_epi16(v_src, v_half_0);
v_sum = _mm_add_epi32(v_sum, _mm_unpacklo_epi16(v_sum_tmp, v_zero));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_madd_epi16(v_half_1, v_half_1));
}
int CV_DECL_ALIGNED(16) ar[8]; v_uint32 v_tmp0, v_tmp1;
_mm_store_si128((__m128i*)ar, v_sum); v_expand(v_src + v_half, v_tmp0, v_tmp1);
_mm_store_si128((__m128i*)(ar + 4), v_sqsum); v_sum += v_reinterpret_as_s32(v_tmp0);
addSqrChannels(sum, sqsum, ar, cn); v_int16 v_tmp2, v_tmp3;
v_zip(v_reinterpret_as_s16(v_src), v_reinterpret_as_s16(v_half), v_tmp2, v_tmp3);
v_sqsum += v_dotprod(v_tmp2, v_tmp2);
x += v_uint16::nlanes;
}
if (cn == 1)
{
*sum += v_reduce_sum(v_sum);
*sqsum += v_reduce_sum(v_sqsum);
}
else
{
int CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[2 * v_int32::nlanes];
v_store(ar, v_sum);
v_store(ar + v_int32::nlanes, v_sqsum);
for (int i = 0; i < v_int32::nlanes; ++i)
{
sum[i % cn] += ar[i];
sqsum[i % cn] += ar[v_int32::nlanes + i];
}
}
v_cleanup();
return x / cn; return x / cn;
} }
}; };
...@@ -249,49 +254,64 @@ struct SumSqr_SIMD<schar, int, int> ...@@ -249,49 +254,64 @@ struct SumSqr_SIMD<schar, int, int>
{ {
int operator () (const schar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const int operator () (const schar * src0, const uchar * mask, int * sum, int * sqsum, int len, int cn) const
{ {
if (mask || (cn != 1 && cn != 2) || !USE_SSE2) if (mask || (cn != 1 && cn != 2 && cn != 4))
return 0; return 0;
len *= cn;
int x = 0; int x = 0;
__m128i v_zero = _mm_setzero_si128(), v_sum = v_zero, v_sqsum = v_zero; v_int32 v_sum = vx_setzero_s32();
const int len_16 = len & ~15; v_int32 v_sqsum = vx_setzero_s32();
for ( ; x <= len_16 - 16; ) const int len0 = len & -v_int8::nlanes;
while (x < len0)
{ {
const int len_tmp = min(x + 2048, len_16); const int len_tmp = min(x + 256 * v_int16::nlanes, len0);
__m128i v_sum_tmp = v_zero; v_int16 v_sum16 = vx_setzero_s16();
for ( ; x <= len_tmp - 16; x += 16) for (; x < len_tmp; x += v_int8::nlanes)
{ {
__m128i v_src = _mm_loadu_si128((const __m128i *)(src0 + x)); v_int16 v_src0 = vx_load_expand(src0 + x);
__m128i v_half_0 = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, v_src), 8); v_int16 v_src1 = vx_load_expand(src0 + x + v_int16::nlanes);
__m128i v_half_1 = _mm_srai_epi16(_mm_unpackhi_epi8(v_zero, v_src), 8); v_sum16 += v_src0 + v_src1;
v_sum_tmp = _mm_add_epi16(v_sum_tmp, _mm_add_epi16(v_half_0, v_half_1)); v_int16 v_tmp0, v_tmp1;
__m128i v_half_2 = _mm_unpacklo_epi16(v_half_0, v_half_1); v_zip(v_src0, v_src1, v_tmp0, v_tmp1);
__m128i v_half_3 = _mm_unpackhi_epi16(v_half_0, v_half_1); v_sqsum += v_dotprod(v_tmp0, v_tmp0) + v_dotprod(v_tmp1, v_tmp1);
v_sqsum = _mm_add_epi32(v_sqsum, _mm_madd_epi16(v_half_2, v_half_2));
v_sqsum = _mm_add_epi32(v_sqsum, _mm_madd_epi16(v_half_3, v_half_3));
} }
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_sum_tmp), 16)); v_int32 v_half0, v_half1;
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_sum_tmp), 16)); v_expand(v_sum16, v_half0, v_half1);
v_sum += v_half0 + v_half1;
} }
if (x <= len - v_int16::nlanes)
for ( ; x <= len - 8; x += 8)
{ {
__m128i v_src = _mm_srai_epi16(_mm_unpacklo_epi8(v_zero, _mm_loadl_epi64((__m128i const *)(src0 + x))), 8); v_int16 v_src = vx_load_expand(src0 + x);
__m128i v_half_0 = _mm_unpackhi_epi64(v_src, v_src); v_int16 v_half = v_combine_high(v_src, v_src);
__m128i v_sum_tmp = _mm_add_epi16(v_src, v_half_0);
__m128i v_half_1 = _mm_unpacklo_epi16(v_src, v_half_0);
v_sum = _mm_add_epi32(v_sum, _mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_sum_tmp), 16)); v_int32 v_tmp0, v_tmp1;
v_sqsum = _mm_add_epi32(v_sqsum, _mm_madd_epi16(v_half_1, v_half_1)); v_expand(v_src + v_half, v_tmp0, v_tmp1);
} v_sum += v_tmp0;
int CV_DECL_ALIGNED(16) ar[8]; v_int16 v_tmp2, v_tmp3;
_mm_store_si128((__m128i*)ar, v_sum); v_zip(v_src, v_half, v_tmp2, v_tmp3);
_mm_store_si128((__m128i*)(ar + 4), v_sqsum); v_sqsum += v_dotprod(v_tmp2, v_tmp2);
x += v_int16::nlanes;
addSqrChannels(sum, sqsum, ar, cn); }
if (cn == 1)
{
*sum += v_reduce_sum(v_sum);
*sqsum += v_reduce_sum(v_sqsum);
}
else
{
int CV_DECL_ALIGNED(CV_SIMD_WIDTH) ar[2 * v_int32::nlanes];
v_store(ar, v_sum);
v_store(ar + v_int32::nlanes, v_sqsum);
for (int i = 0; i < v_int32::nlanes; ++i)
{
sum[i % cn] += ar[i];
sqsum[i % cn] += ar[v_int32::nlanes + i];
}
}
v_cleanup();
return x / cn; return x / cn;
} }
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment