Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
e11213dc
Commit
e11213dc
authored
Feb 25, 2019
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #13900 from alalek:core_dispatch_merge
parents
3bc9912f
fd49ee5f
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
240 additions
and
188 deletions
+240
-188
CMakeLists.txt
modules/core/CMakeLists.txt
+1
-0
merge.dispatch.cpp
modules/core/src/merge.dispatch.cpp
+20
-188
merge.simd.hpp
modules/core/src/merge.simd.hpp
+219
-0
No files found.
modules/core/CMakeLists.txt
View file @
e11213dc
...
...
@@ -8,6 +8,7 @@ ocv_add_dispatched_file(convert_scale SSE2 AVX2)
ocv_add_dispatched_file
(
count_non_zero SSE2 AVX2
)
ocv_add_dispatched_file
(
matmul SSE2 AVX2
)
ocv_add_dispatched_file
(
mean SSE2 AVX2
)
ocv_add_dispatched_file
(
merge SSE2 AVX2
)
ocv_add_dispatched_file
(
split SSE2 AVX2
)
ocv_add_dispatched_file
(
sum SSE2 AVX2
)
...
...
modules/core/src/merge.cpp
→
modules/core/src/merge.
dispatch.
cpp
View file @
e11213dc
...
...
@@ -6,208 +6,44 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
namespace
cv
{
namespace
hal
{
#if CV_SIMD
/*
The trick with STORE_UNALIGNED/STORE_ALIGNED_NOCACHE is the following:
on IA there are instructions movntps and such to which
v_store_interleave(...., STORE_ALIGNED_NOCACHE) is mapped.
Those instructions write directly into memory w/o touching cache
that results in dramatic speed improvements, especially on
large arrays (FullHD, 4K etc.).
Those intrinsics require the destination address to be aligned
by 16/32 bits (with SSE2 and AVX2, respectively).
So we potentially split the processing into 3 stages:
1) the optional prefix part [0:i0), where we use simple unaligned stores.
2) the optional main part [i0:len - VECSZ], where we use "nocache" mode.
But in some cases we have to use unaligned stores in this part.
3) the optional suffix part (the tail) (len - VECSZ:len) where we switch back to "unaligned" mode
to process the remaining len - VECSZ elements.
In principle there can be very poorly aligned data where there is no main part.
For that we set i0=0 and use unaligned stores for the whole array.
*/
template
<
typename
T
,
typename
VecT
>
static
void
vecmerge_
(
const
T
**
src
,
T
*
dst
,
int
len
,
int
cn
)
{
const
int
VECSZ
=
VecT
::
nlanes
;
int
i
,
i0
=
0
;
const
T
*
src0
=
src
[
0
];
const
T
*
src1
=
src
[
1
];
const
int
dstElemSize
=
cn
*
sizeof
(
T
);
int
r
=
(
int
)((
size_t
)(
void
*
)
dst
%
(
VECSZ
*
sizeof
(
T
)));
hal
::
StoreMode
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
if
(
r
!=
0
)
{
mode
=
hal
::
STORE_UNALIGNED
;
if
(
r
%
dstElemSize
==
0
&&
len
>
VECSZ
*
2
)
i0
=
VECSZ
-
(
r
/
dstElemSize
);
}
#include "merge.simd.hpp"
#include "merge.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
if
(
cn
==
2
)
{
for
(
i
=
0
;
i
<
len
;
i
+=
VECSZ
)
{
if
(
i
>
len
-
VECSZ
)
{
i
=
len
-
VECSZ
;
mode
=
hal
::
STORE_UNALIGNED
;
}
VecT
a
=
vx_load
(
src0
+
i
),
b
=
vx_load
(
src1
+
i
);
v_store_interleave
(
dst
+
i
*
cn
,
a
,
b
,
mode
);
if
(
i
<
i0
)
{
i
=
i0
-
VECSZ
;
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
}
}
}
else
if
(
cn
==
3
)
{
const
T
*
src2
=
src
[
2
];
for
(
i
=
0
;
i
<
len
;
i
+=
VECSZ
)
{
if
(
i
>
len
-
VECSZ
)
{
i
=
len
-
VECSZ
;
mode
=
hal
::
STORE_UNALIGNED
;
}
VecT
a
=
vx_load
(
src0
+
i
),
b
=
vx_load
(
src1
+
i
),
c
=
vx_load
(
src2
+
i
);
v_store_interleave
(
dst
+
i
*
cn
,
a
,
b
,
c
,
mode
);
if
(
i
<
i0
)
{
i
=
i0
-
VECSZ
;
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
}
}
}
else
{
CV_Assert
(
cn
==
4
);
const
T
*
src2
=
src
[
2
];
const
T
*
src3
=
src
[
3
];
for
(
i
=
0
;
i
<
len
;
i
+=
VECSZ
)
{
if
(
i
>
len
-
VECSZ
)
{
i
=
len
-
VECSZ
;
mode
=
hal
::
STORE_UNALIGNED
;
}
VecT
a
=
vx_load
(
src0
+
i
),
b
=
vx_load
(
src1
+
i
);
VecT
c
=
vx_load
(
src2
+
i
),
d
=
vx_load
(
src3
+
i
);
v_store_interleave
(
dst
+
i
*
cn
,
a
,
b
,
c
,
d
,
mode
);
if
(
i
<
i0
)
{
i
=
i0
-
VECSZ
;
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
}
}
}
vx_cleanup
();
}
#endif
template
<
typename
T
>
static
void
merge_
(
const
T
**
src
,
T
*
dst
,
int
len
,
int
cn
)
{
int
k
=
cn
%
4
?
cn
%
4
:
4
;
int
i
,
j
;
if
(
k
==
1
)
{
const
T
*
src0
=
src
[
0
];
for
(
i
=
j
=
0
;
i
<
len
;
i
++
,
j
+=
cn
)
dst
[
j
]
=
src0
[
i
];
}
else
if
(
k
==
2
)
{
const
T
*
src0
=
src
[
0
],
*
src1
=
src
[
1
];
i
=
j
=
0
;
for
(
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
}
}
else
if
(
k
==
3
)
{
const
T
*
src0
=
src
[
0
],
*
src1
=
src
[
1
],
*
src2
=
src
[
2
];
i
=
j
=
0
;
for
(
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
dst
[
j
+
2
]
=
src2
[
i
];
}
}
else
{
const
T
*
src0
=
src
[
0
],
*
src1
=
src
[
1
],
*
src2
=
src
[
2
],
*
src3
=
src
[
3
];
i
=
j
=
0
;
for
(
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
dst
[
j
+
2
]
=
src2
[
i
];
dst
[
j
+
3
]
=
src3
[
i
];
}
}
for
(
;
k
<
cn
;
k
+=
4
)
{
const
T
*
src0
=
src
[
k
],
*
src1
=
src
[
k
+
1
],
*
src2
=
src
[
k
+
2
],
*
src3
=
src
[
k
+
3
];
for
(
i
=
0
,
j
=
k
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
dst
[
j
+
2
]
=
src2
[
i
];
dst
[
j
+
3
]
=
src3
[
i
];
}
}
}
namespace
cv
{
namespace
hal
{
void
merge8u
(
const
uchar
**
src
,
uchar
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
CALL_HAL
(
merge8u
,
cv_hal_merge8u
,
src
,
dst
,
len
,
cn
)
#if CV_SIMD
if
(
len
>=
v_uint8
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
uchar
,
v_uint8
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
CV_CPU_DISPATCH
(
merge8u
,
(
src
,
dst
,
len
,
cn
),
CV_CPU_DISPATCH_MODES_ALL
);
}
void
merge16u
(
const
ushort
**
src
,
ushort
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
CALL_HAL
(
merge16u
,
cv_hal_merge16u
,
src
,
dst
,
len
,
cn
)
#if CV_SIMD
if
(
len
>=
v_uint16
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
ushort
,
v_uint16
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
CV_CPU_DISPATCH
(
merge16u
,
(
src
,
dst
,
len
,
cn
),
CV_CPU_DISPATCH_MODES_ALL
);
}
void
merge32s
(
const
int
**
src
,
int
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
CALL_HAL
(
merge32s
,
cv_hal_merge32s
,
src
,
dst
,
len
,
cn
)
#if CV_SIMD
if
(
len
>=
v_int32
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
int
,
v_int32
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
CV_CPU_DISPATCH
(
merge32s
,
(
src
,
dst
,
len
,
cn
),
CV_CPU_DISPATCH_MODES_ALL
);
}
void
merge64s
(
const
int64
**
src
,
int64
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
CALL_HAL
(
merge64s
,
cv_hal_merge64s
,
src
,
dst
,
len
,
cn
)
#if CV_SIMD
if
(
len
>=
v_int64
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
int64
,
v_int64
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
CV_CPU_DISPATCH
(
merge64s
,
(
src
,
dst
,
len
,
cn
),
CV_CPU_DISPATCH_MODES_ALL
);
}
}
}
//
cv::hal::
}
// namespace
cv::hal::
typedef
void
(
*
MergeFunc
)(
const
uchar
**
src
,
uchar
*
dst
,
int
len
,
int
cn
);
...
...
@@ -225,7 +61,6 @@ static MergeFunc getMergeFunc(int depth)
#ifdef HAVE_IPP
namespace
cv
{
static
bool
ipp_merge
(
const
Mat
*
mv
,
Mat
&
dst
,
int
channels
)
{
#ifdef HAVE_IPP_IW_LL
...
...
@@ -274,10 +109,9 @@ static bool ipp_merge(const Mat* mv, Mat& dst, int channels)
return
false
;
#endif
}
}
#endif
void
cv
::
merge
(
const
Mat
*
mv
,
size_t
n
,
OutputArray
_dst
)
void
merge
(
const
Mat
*
mv
,
size_t
n
,
OutputArray
_dst
)
{
CV_INSTRUMENT_REGION
();
...
...
@@ -361,8 +195,6 @@ void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
#ifdef HAVE_OPENCL
namespace
cv
{
static
bool
ocl_merge
(
InputArrayOfArrays
_mv
,
OutputArray
_dst
)
{
std
::
vector
<
UMat
>
src
,
ksrc
;
...
...
@@ -421,11 +253,9 @@ static bool ocl_merge( InputArrayOfArrays _mv, OutputArray _dst )
return
k
.
run
(
2
,
globalsize
,
NULL
,
false
);
}
}
#endif
void
cv
::
merge
(
InputArrayOfArrays
_mv
,
OutputArray
_dst
)
void
merge
(
InputArrayOfArrays
_mv
,
OutputArray
_dst
)
{
CV_INSTRUMENT_REGION
();
...
...
@@ -436,3 +266,5 @@ void cv::merge(InputArrayOfArrays _mv, OutputArray _dst)
_mv
.
getMatVector
(
mv
);
merge
(
!
mv
.
empty
()
?
&
mv
[
0
]
:
0
,
mv
.
size
(),
_dst
);
}
}
// namespace
modules/core/src/merge.simd.hpp
0 → 100644
View file @
e11213dc
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
namespace
cv
{
namespace
hal
{
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
void
merge8u
(
const
uchar
**
src
,
uchar
*
dst
,
int
len
,
int
cn
);
void
merge16u
(
const
ushort
**
src
,
ushort
*
dst
,
int
len
,
int
cn
);
void
merge32s
(
const
int
**
src
,
int
*
dst
,
int
len
,
int
cn
);
void
merge64s
(
const
int64
**
src
,
int64
*
dst
,
int
len
,
int
cn
);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#if CV_SIMD
/*
The trick with STORE_UNALIGNED/STORE_ALIGNED_NOCACHE is the following:
on IA there are instructions movntps and such to which
v_store_interleave(...., STORE_ALIGNED_NOCACHE) is mapped.
Those instructions write directly into memory w/o touching cache
that results in dramatic speed improvements, especially on
large arrays (FullHD, 4K etc.).
Those intrinsics require the destination address to be aligned
by 16/32 bits (with SSE2 and AVX2, respectively).
So we potentially split the processing into 3 stages:
1) the optional prefix part [0:i0), where we use simple unaligned stores.
2) the optional main part [i0:len - VECSZ], where we use "nocache" mode.
But in some cases we have to use unaligned stores in this part.
3) the optional suffix part (the tail) (len - VECSZ:len) where we switch back to "unaligned" mode
to process the remaining len - VECSZ elements.
In principle there can be very poorly aligned data where there is no main part.
For that we set i0=0 and use unaligned stores for the whole array.
*/
template
<
typename
T
,
typename
VecT
>
static
void
vecmerge_
(
const
T
**
src
,
T
*
dst
,
int
len
,
int
cn
)
{
const
int
VECSZ
=
VecT
::
nlanes
;
int
i
,
i0
=
0
;
const
T
*
src0
=
src
[
0
];
const
T
*
src1
=
src
[
1
];
const
int
dstElemSize
=
cn
*
sizeof
(
T
);
int
r
=
(
int
)((
size_t
)(
void
*
)
dst
%
(
VECSZ
*
sizeof
(
T
)));
hal
::
StoreMode
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
if
(
r
!=
0
)
{
mode
=
hal
::
STORE_UNALIGNED
;
if
(
r
%
dstElemSize
==
0
&&
len
>
VECSZ
*
2
)
i0
=
VECSZ
-
(
r
/
dstElemSize
);
}
if
(
cn
==
2
)
{
for
(
i
=
0
;
i
<
len
;
i
+=
VECSZ
)
{
if
(
i
>
len
-
VECSZ
)
{
i
=
len
-
VECSZ
;
mode
=
hal
::
STORE_UNALIGNED
;
}
VecT
a
=
vx_load
(
src0
+
i
),
b
=
vx_load
(
src1
+
i
);
v_store_interleave
(
dst
+
i
*
cn
,
a
,
b
,
mode
);
if
(
i
<
i0
)
{
i
=
i0
-
VECSZ
;
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
}
}
}
else
if
(
cn
==
3
)
{
const
T
*
src2
=
src
[
2
];
for
(
i
=
0
;
i
<
len
;
i
+=
VECSZ
)
{
if
(
i
>
len
-
VECSZ
)
{
i
=
len
-
VECSZ
;
mode
=
hal
::
STORE_UNALIGNED
;
}
VecT
a
=
vx_load
(
src0
+
i
),
b
=
vx_load
(
src1
+
i
),
c
=
vx_load
(
src2
+
i
);
v_store_interleave
(
dst
+
i
*
cn
,
a
,
b
,
c
,
mode
);
if
(
i
<
i0
)
{
i
=
i0
-
VECSZ
;
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
}
}
}
else
{
CV_Assert
(
cn
==
4
);
const
T
*
src2
=
src
[
2
];
const
T
*
src3
=
src
[
3
];
for
(
i
=
0
;
i
<
len
;
i
+=
VECSZ
)
{
if
(
i
>
len
-
VECSZ
)
{
i
=
len
-
VECSZ
;
mode
=
hal
::
STORE_UNALIGNED
;
}
VecT
a
=
vx_load
(
src0
+
i
),
b
=
vx_load
(
src1
+
i
);
VecT
c
=
vx_load
(
src2
+
i
),
d
=
vx_load
(
src3
+
i
);
v_store_interleave
(
dst
+
i
*
cn
,
a
,
b
,
c
,
d
,
mode
);
if
(
i
<
i0
)
{
i
=
i0
-
VECSZ
;
mode
=
hal
::
STORE_ALIGNED_NOCACHE
;
}
}
}
vx_cleanup
();
}
#endif
template
<
typename
T
>
static
void
merge_
(
const
T
**
src
,
T
*
dst
,
int
len
,
int
cn
)
{
int
k
=
cn
%
4
?
cn
%
4
:
4
;
int
i
,
j
;
if
(
k
==
1
)
{
const
T
*
src0
=
src
[
0
];
for
(
i
=
j
=
0
;
i
<
len
;
i
++
,
j
+=
cn
)
dst
[
j
]
=
src0
[
i
];
}
else
if
(
k
==
2
)
{
const
T
*
src0
=
src
[
0
],
*
src1
=
src
[
1
];
i
=
j
=
0
;
for
(
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
}
}
else
if
(
k
==
3
)
{
const
T
*
src0
=
src
[
0
],
*
src1
=
src
[
1
],
*
src2
=
src
[
2
];
i
=
j
=
0
;
for
(
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
dst
[
j
+
2
]
=
src2
[
i
];
}
}
else
{
const
T
*
src0
=
src
[
0
],
*
src1
=
src
[
1
],
*
src2
=
src
[
2
],
*
src3
=
src
[
3
];
i
=
j
=
0
;
for
(
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
dst
[
j
+
2
]
=
src2
[
i
];
dst
[
j
+
3
]
=
src3
[
i
];
}
}
for
(
;
k
<
cn
;
k
+=
4
)
{
const
T
*
src0
=
src
[
k
],
*
src1
=
src
[
k
+
1
],
*
src2
=
src
[
k
+
2
],
*
src3
=
src
[
k
+
3
];
for
(
i
=
0
,
j
=
k
;
i
<
len
;
i
++
,
j
+=
cn
)
{
dst
[
j
]
=
src0
[
i
];
dst
[
j
+
1
]
=
src1
[
i
];
dst
[
j
+
2
]
=
src2
[
i
];
dst
[
j
+
3
]
=
src3
[
i
];
}
}
}
void
merge8u
(
const
uchar
**
src
,
uchar
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
#if CV_SIMD
if
(
len
>=
v_uint8
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
uchar
,
v_uint8
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
}
void
merge16u
(
const
ushort
**
src
,
ushort
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
#if CV_SIMD
if
(
len
>=
v_uint16
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
ushort
,
v_uint16
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
}
void
merge32s
(
const
int
**
src
,
int
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
#if CV_SIMD
if
(
len
>=
v_int32
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
int
,
v_int32
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
}
void
merge64s
(
const
int64
**
src
,
int64
*
dst
,
int
len
,
int
cn
)
{
CV_INSTRUMENT_REGION
();
#if CV_SIMD
if
(
len
>=
v_int64
::
nlanes
&&
2
<=
cn
&&
cn
<=
4
)
vecmerge_
<
int64
,
v_int64
>
(
src
,
dst
,
len
,
cn
);
else
#endif
merge_
(
src
,
dst
,
len
,
cn
);
}
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
}}
// namespace
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment