Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
cecb8789
Commit
cecb8789
authored
Sep 14, 2018
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #12527 from terfendail:mean_wintr
parents
29c10b67
95502242
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
93 additions
and
73 deletions
+93
-73
mean.cpp
modules/core/src/mean.cpp
+93
-73
No files found.
modules/core/src/mean.cpp
View file @
cecb8789
...
...
@@ -180,66 +180,71 @@ struct SumSqr_SIMD
}
};
template
<
typename
T
>
inline
void
addSqrChannels
(
T
*
sum
,
T
*
sqsum
,
T
*
buf
,
int
cn
)
{
for
(
int
i
=
0
;
i
<
4
;
++
i
)
{
sum
[
i
%
cn
]
+=
buf
[
i
];
sqsum
[
i
%
cn
]
+=
buf
[
4
+
i
];
}
}
#if CV_SSE2
#if CV_SIMD
template
<>
struct
SumSqr_SIMD
<
uchar
,
int
,
int
>
{
int
operator
()
(
const
uchar
*
src0
,
const
uchar
*
mask
,
int
*
sum
,
int
*
sqsum
,
int
len
,
int
cn
)
const
{
if
(
mask
||
(
cn
!=
1
&&
cn
!=
2
)
||
!
USE_SSE2
)
if
(
mask
||
(
cn
!=
1
&&
cn
!=
2
&&
cn
!=
4
)
)
return
0
;
len
*=
cn
;
int
x
=
0
;
__m128i
v_zero
=
_mm_setzero_si128
(),
v_sum
=
v_zero
,
v_sqsum
=
v_zero
;
const
int
len_16
=
len
&
~
15
;
v_int32
v_sum
=
vx_setzero_s32
()
;
v_int32
v_sqsum
=
vx_setzero_s32
()
;
for
(
;
x
<=
len_16
-
16
;
)
const
int
len0
=
len
&
-
v_uint8
::
nlanes
;
while
(
x
<
len0
)
{
const
int
len_tmp
=
min
(
x
+
2
048
,
len_16
);
__m128i
v_sum_tmp
=
v_zero
;
for
(
;
x
<
=
len_tmp
-
16
;
x
+=
16
)
const
int
len_tmp
=
min
(
x
+
2
56
*
v_uint16
::
nlanes
,
len0
);
v_uint16
v_sum16
=
vx_setzero_u16
()
;
for
(
;
x
<
len_tmp
;
x
+=
v_uint8
::
nlanes
)
{
__m128i
v_src
=
_mm_loadu_si128
((
const
__m128i
*
)(
src0
+
x
));
__m128i
v_half_0
=
_mm_unpacklo_epi8
(
v_src
,
v_zero
);
__m128i
v_half_1
=
_mm_unpackhi_epi8
(
v_src
,
v_zero
);
v_sum_tmp
=
_mm_add_epi16
(
v_sum_tmp
,
_mm_add_epi16
(
v_half_0
,
v_half_1
));
__m128i
v_half_2
=
_mm_unpacklo_epi16
(
v_half_0
,
v_half_1
);
__m128i
v_half_3
=
_mm_unpackhi_epi16
(
v_half_0
,
v_half_1
);
v_sqsum
=
_mm_add_epi32
(
v_sqsum
,
_mm_madd_epi16
(
v_half_2
,
v_half_2
));
v_sqsum
=
_mm_add_epi32
(
v_sqsum
,
_mm_madd_epi16
(
v_half_3
,
v_half_3
));
v_uint16
v_src0
=
vx_load_expand
(
src0
+
x
);
v_uint16
v_src1
=
vx_load_expand
(
src0
+
x
+
v_uint16
::
nlanes
);
v_sum16
+=
v_src0
+
v_src1
;
v_int16
v_tmp0
,
v_tmp1
;
v_zip
(
v_reinterpret_as_s16
(
v_src0
),
v_reinterpret_as_s16
(
v_src1
),
v_tmp0
,
v_tmp1
);
v_sqsum
+=
v_dotprod
(
v_tmp0
,
v_tmp0
)
+
v_dotprod
(
v_tmp1
,
v_tmp1
);
}
v_sum
=
_mm_add_epi32
(
v_sum
,
_mm_unpacklo_epi16
(
v_sum_tmp
,
v_zero
));
v_sum
=
_mm_add_epi32
(
v_sum
,
_mm_unpackhi_epi16
(
v_sum_tmp
,
v_zero
));
v_uint32
v_half0
,
v_half1
;
v_expand
(
v_sum16
,
v_half0
,
v_half1
);
v_sum
+=
v_reinterpret_as_s32
(
v_half0
+
v_half1
);
}
for
(
;
x
<=
len
-
8
;
x
+=
8
)
if
(
x
<=
len
-
v_uint16
::
nlanes
)
{
__m128i
v_src
=
_mm_unpacklo_epi8
(
_mm_loadl_epi64
((
__m128i
const
*
)(
src0
+
x
)),
v_zero
);
__m128i
v_half_0
=
_mm_unpackhi_epi64
(
v_src
,
v_src
);
__m128i
v_sum_tmp
=
_mm_add_epi16
(
v_src
,
v_half_0
);
__m128i
v_half_1
=
_mm_unpacklo_epi16
(
v_src
,
v_half_0
);
v_sum
=
_mm_add_epi32
(
v_sum
,
_mm_unpacklo_epi16
(
v_sum_tmp
,
v_zero
));
v_sqsum
=
_mm_add_epi32
(
v_sqsum
,
_mm_madd_epi16
(
v_half_1
,
v_half_1
));
}
v_uint16
v_src
=
vx_load_expand
(
src0
+
x
);
v_uint16
v_half
=
v_combine_high
(
v_src
,
v_src
);
int
CV_DECL_ALIGNED
(
16
)
ar
[
8
]
;
_mm_store_si128
((
__m128i
*
)
ar
,
v_sum
);
_mm_store_si128
((
__m128i
*
)(
ar
+
4
),
v_sqsum
);
v_uint32
v_tmp0
,
v_tmp1
;
v_expand
(
v_src
+
v_half
,
v_tmp0
,
v_tmp1
);
v_sum
+=
v_reinterpret_as_s32
(
v_tmp0
);
addSqrChannels
(
sum
,
sqsum
,
ar
,
cn
);
v_int16
v_tmp2
,
v_tmp3
;
v_zip
(
v_reinterpret_as_s16
(
v_src
),
v_reinterpret_as_s16
(
v_half
),
v_tmp2
,
v_tmp3
);
v_sqsum
+=
v_dotprod
(
v_tmp2
,
v_tmp2
);
x
+=
v_uint16
::
nlanes
;
}
if
(
cn
==
1
)
{
*
sum
+=
v_reduce_sum
(
v_sum
);
*
sqsum
+=
v_reduce_sum
(
v_sqsum
);
}
else
{
int
CV_DECL_ALIGNED
(
CV_SIMD_WIDTH
)
ar
[
2
*
v_int32
::
nlanes
];
v_store
(
ar
,
v_sum
);
v_store
(
ar
+
v_int32
::
nlanes
,
v_sqsum
);
for
(
int
i
=
0
;
i
<
v_int32
::
nlanes
;
++
i
)
{
sum
[
i
%
cn
]
+=
ar
[
i
];
sqsum
[
i
%
cn
]
+=
ar
[
v_int32
::
nlanes
+
i
];
}
}
v_cleanup
();
return
x
/
cn
;
}
};
...
...
@@ -249,49 +254,64 @@ struct SumSqr_SIMD<schar, int, int>
{
int
operator
()
(
const
schar
*
src0
,
const
uchar
*
mask
,
int
*
sum
,
int
*
sqsum
,
int
len
,
int
cn
)
const
{
if
(
mask
||
(
cn
!=
1
&&
cn
!=
2
)
||
!
USE_SSE2
)
if
(
mask
||
(
cn
!=
1
&&
cn
!=
2
&&
cn
!=
4
)
)
return
0
;
len
*=
cn
;
int
x
=
0
;
__m128i
v_zero
=
_mm_setzero_si128
(),
v_sum
=
v_zero
,
v_sqsum
=
v_zero
;
const
int
len_16
=
len
&
~
15
;
v_int32
v_sum
=
vx_setzero_s32
()
;
v_int32
v_sqsum
=
vx_setzero_s32
()
;
for
(
;
x
<=
len_16
-
16
;
)
const
int
len0
=
len
&
-
v_int8
::
nlanes
;
while
(
x
<
len0
)
{
const
int
len_tmp
=
min
(
x
+
2
048
,
len_16
);
__m128i
v_sum_tmp
=
v_zero
;
for
(
;
x
<=
len_tmp
-
16
;
x
+=
16
)
const
int
len_tmp
=
min
(
x
+
2
56
*
v_int16
::
nlanes
,
len0
);
v_int16
v_sum16
=
vx_setzero_s16
()
;
for
(
;
x
<
len_tmp
;
x
+=
v_int8
::
nlanes
)
{
__m128i
v_src
=
_mm_loadu_si128
((
const
__m128i
*
)(
src0
+
x
));
__m128i
v_half_0
=
_mm_srai_epi16
(
_mm_unpacklo_epi8
(
v_zero
,
v_src
),
8
);
__m128i
v_half_1
=
_mm_srai_epi16
(
_mm_unpackhi_epi8
(
v_zero
,
v_src
),
8
);
v_sum_tmp
=
_mm_add_epi16
(
v_sum_tmp
,
_mm_add_epi16
(
v_half_0
,
v_half_1
));
__m128i
v_half_2
=
_mm_unpacklo_epi16
(
v_half_0
,
v_half_1
);
__m128i
v_half_3
=
_mm_unpackhi_epi16
(
v_half_0
,
v_half_1
);
v_sqsum
=
_mm_add_epi32
(
v_sqsum
,
_mm_madd_epi16
(
v_half_2
,
v_half_2
));
v_sqsum
=
_mm_add_epi32
(
v_sqsum
,
_mm_madd_epi16
(
v_half_3
,
v_half_3
));
v_int16
v_src0
=
vx_load_expand
(
src0
+
x
);
v_int16
v_src1
=
vx_load_expand
(
src0
+
x
+
v_int16
::
nlanes
);
v_sum16
+=
v_src0
+
v_src1
;
v_int16
v_tmp0
,
v_tmp1
;
v_zip
(
v_src0
,
v_src1
,
v_tmp0
,
v_tmp1
);
v_sqsum
+=
v_dotprod
(
v_tmp0
,
v_tmp0
)
+
v_dotprod
(
v_tmp1
,
v_tmp1
);
}
v_sum
=
_mm_add_epi32
(
v_sum
,
_mm_srai_epi32
(
_mm_unpacklo_epi16
(
v_zero
,
v_sum_tmp
),
16
));
v_sum
=
_mm_add_epi32
(
v_sum
,
_mm_srai_epi32
(
_mm_unpackhi_epi16
(
v_zero
,
v_sum_tmp
),
16
));
v_int32
v_half0
,
v_half1
;
v_expand
(
v_sum16
,
v_half0
,
v_half1
);
v_sum
+=
v_half0
+
v_half1
;
}
for
(
;
x
<=
len
-
8
;
x
+=
8
)
if
(
x
<=
len
-
v_int16
::
nlanes
)
{
__m128i
v_src
=
_mm_srai_epi16
(
_mm_unpacklo_epi8
(
v_zero
,
_mm_loadl_epi64
((
__m128i
const
*
)(
src0
+
x
))),
8
);
__m128i
v_half_0
=
_mm_unpackhi_epi64
(
v_src
,
v_src
);
__m128i
v_sum_tmp
=
_mm_add_epi16
(
v_src
,
v_half_0
);
__m128i
v_half_1
=
_mm_unpacklo_epi16
(
v_src
,
v_half_0
);
v_int16
v_src
=
vx_load_expand
(
src0
+
x
);
v_int16
v_half
=
v_combine_high
(
v_src
,
v_src
);
v_
sum
=
_mm_add_epi32
(
v_sum
,
_mm_srai_epi32
(
_mm_unpacklo_epi16
(
v_zero
,
v_sum_tmp
),
16
))
;
v_
sqsum
=
_mm_add_epi32
(
v_sqsum
,
_mm_madd_epi16
(
v_half_1
,
v_half_1
)
);
}
v_
int32
v_tmp0
,
v_tmp1
;
v_
expand
(
v_src
+
v_half
,
v_tmp0
,
v_tmp1
);
v_sum
+=
v_tmp0
;
int
CV_DECL_ALIGNED
(
16
)
ar
[
8
]
;
_mm_store_si128
((
__m128i
*
)
ar
,
v_sum
);
_mm_store_si128
((
__m128i
*
)(
ar
+
4
),
v_sqsum
);
addSqrChannels
(
sum
,
sqsum
,
ar
,
cn
);
v_int16
v_tmp2
,
v_tmp3
;
v_zip
(
v_src
,
v_half
,
v_tmp2
,
v_tmp3
);
v_sqsum
+=
v_dotprod
(
v_tmp2
,
v_tmp2
);
x
+=
v_int16
::
nlanes
;
}
if
(
cn
==
1
)
{
*
sum
+=
v_reduce_sum
(
v_sum
);
*
sqsum
+=
v_reduce_sum
(
v_sqsum
);
}
else
{
int
CV_DECL_ALIGNED
(
CV_SIMD_WIDTH
)
ar
[
2
*
v_int32
::
nlanes
];
v_store
(
ar
,
v_sum
);
v_store
(
ar
+
v_int32
::
nlanes
,
v_sqsum
);
for
(
int
i
=
0
;
i
<
v_int32
::
nlanes
;
++
i
)
{
sum
[
i
%
cn
]
+=
ar
[
i
];
sqsum
[
i
%
cn
]
+=
ar
[
v_int32
::
nlanes
+
i
];
}
}
v_cleanup
();
return
x
/
cn
;
}
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment