Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
619180df
Commit
619180df
authored
Mar 06, 2020
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
parents
6271192a
6d113bd0
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
518 additions
and
391 deletions
+518
-391
cvstd.inl.hpp
modules/core/include/opencv2/core/cvstd.inl.hpp
+1
-0
minmax.cpp
modules/core/src/minmax.cpp
+1
-0
norm.cpp
modules/core/src/norm.cpp
+116
-93
ocl.cpp
modules/core/src/ocl.cpp
+12
-9
blank_layer.cpp
modules/dnn/src/layers/blank_layer.cpp
+14
-11
const_layer.cpp
modules/dnn/src/layers/const_layer.cpp
+4
-1
flatten_layer.cpp
modules/dnn/src/layers/flatten_layer.cpp
+15
-11
normalize_bbox_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
+30
-26
permute_layer.cpp
modules/dnn/src/layers/permute_layer.cpp
+25
-21
pooling_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
+45
-45
prior_box_layer.cpp
modules/dnn/src/layers/prior_box_layer.cpp
+53
-50
reorg_layer.cpp
modules/dnn/src/layers/reorg_layer.cpp
+15
-11
reshape_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
+15
-11
resize_layer.cpp
modules/dnn/src/layers/resize_layer.cpp
+23
-20
scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+30
-19
slice_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
+24
-21
onnx_importer.cpp
modules/dnn/src/onnx/onnx_importer.cpp
+63
-18
test_onnx_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
+17
-24
grfmt_jpeg.cpp
modules/imgcodecs/src/grfmt_jpeg.cpp
+15
-0
No files found.
modules/core/include/opencv2/core/cvstd.inl.hpp
View file @
619180df
...
@@ -46,6 +46,7 @@
...
@@ -46,6 +46,7 @@
#include <complex>
#include <complex>
#include <ostream>
#include <ostream>
#include <sstream>
//! @cond IGNORED
//! @cond IGNORED
...
...
modules/core/src/minmax.cpp
View file @
619180df
...
@@ -1089,6 +1089,7 @@ bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* minLoc
...
@@ -1089,6 +1089,7 @@ bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* minLoc
getMinMaxRes
<
double
>
getMinMaxRes
<
double
>
};
};
CV_Assert
(
ddepth
<=
CV_64F
);
getMinMaxResFunc
func
=
functab
[
ddepth
];
getMinMaxResFunc
func
=
functab
[
ddepth
];
int
locTemp
[
2
];
int
locTemp
[
2
];
...
...
modules/core/src/norm.cpp
View file @
619180df
...
@@ -710,67 +710,78 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
...
@@ -710,67 +710,78 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
result
;
result
;
result
.
d
=
0
;
result
.
d
=
0
;
NAryMatIterator
it
(
arrays
,
ptrs
);
NAryMatIterator
it
(
arrays
,
ptrs
);
int
j
,
total
=
(
int
)
it
.
size
,
blockSize
=
total
;
CV_CheckLT
((
size_t
)
it
.
size
,
(
size_t
)
INT_MAX
,
""
);
bool
blockSum
=
depth
==
CV_16F
||
(
normType
==
NORM_L1
&&
depth
<=
CV_16S
)
||
((
normType
==
NORM_L2
||
normType
==
NORM_L2SQR
)
&&
depth
<=
CV_8S
);
int
isum
=
0
;
int
*
ibuf
=
&
result
.
i
;
AutoBuffer
<
float
>
fltbuf_
;
float
*
fltbuf
=
0
;
size_t
esz
=
0
;
if
(
blockSum
)
{
esz
=
src
.
elemSize
();
if
(
depth
==
CV_16F
)
if
((
normType
==
NORM_L1
&&
depth
<=
CV_16S
)
||
{
((
normType
==
NORM_L2
||
normType
==
NORM_L2SQR
)
&&
depth
<=
CV_8S
))
blockSize
=
std
::
min
(
blockSize
,
1024
);
{
fltbuf_
.
allocate
(
blockSize
);
// special case to handle "integer" overflow in accumulator
fltbuf
=
fltbuf_
.
data
();
const
size_t
esz
=
src
.
elemSize
();
}
const
int
total
=
(
int
)
it
.
size
;
else
const
int
intSumBlockSize
=
(
normType
==
NORM_L1
&&
depth
<=
CV_8S
?
(
1
<<
23
)
:
(
1
<<
15
))
/
cn
;
const
int
blockSize
=
std
::
min
(
total
,
intSumBlockSize
);
int
isum
=
0
;
int
count
=
0
;
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
{
int
intSumBlockSize
=
(
normType
==
NORM_L1
&&
depth
<=
CV_8S
?
(
1
<<
23
)
:
(
1
<<
15
))
/
cn
;
for
(
int
j
=
0
;
j
<
total
;
j
+=
blockSize
)
blockSize
=
std
::
min
(
blockSize
,
intSumBlockSize
);
{
ibuf
=
&
isum
;
int
bsz
=
std
::
min
(
total
-
j
,
blockSize
);
func
(
ptrs
[
0
],
ptrs
[
1
],
(
uchar
*
)
&
isum
,
bsz
,
cn
);
count
+=
bsz
;
if
(
count
+
blockSize
>=
intSumBlockSize
||
(
i
+
1
>=
it
.
nplanes
&&
j
+
bsz
>=
total
))
{
result
.
d
+=
isum
;
isum
=
0
;
count
=
0
;
}
ptrs
[
0
]
+=
bsz
*
esz
;
if
(
ptrs
[
1
])
ptrs
[
1
]
+=
bsz
;
}
}
}
}
}
else
if
(
depth
==
CV_16F
)
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
{
for
(
j
=
0
;
j
<
total
;
j
+=
blockSize
)
const
size_t
esz
=
src
.
elemSize
();
const
int
total
=
(
int
)
it
.
size
;
const
int
blockSize
=
std
::
min
(
total
,
divUp
(
1024
,
cn
));
AutoBuffer
<
float
,
1024
>
fltbuf
(
blockSize
);
float
*
data0
=
fltbuf
.
data
();
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
{
int
bsz
=
std
::
min
(
total
-
j
,
blockSize
);
for
(
int
j
=
0
;
j
<
total
;
j
+=
blockSize
)
const
uchar
*
data
=
ptrs
[
0
];
if
(
depth
==
CV_16F
)
{
hal
::
cvt16f32f
((
const
float16_t
*
)
ptrs
[
0
],
fltbuf
,
bsz
);
data
=
(
const
uchar
*
)
fltbuf
;
}
func
(
data
,
ptrs
[
1
],
(
uchar
*
)
ibuf
,
bsz
,
cn
);
if
(
blockSum
&&
depth
!=
CV_16F
)
{
{
result
.
d
+=
isum
;
int
bsz
=
std
::
min
(
total
-
j
,
blockSize
);
isum
=
0
;
hal
::
cvt16f32f
((
const
float16_t
*
)
ptrs
[
0
],
data0
,
bsz
*
cn
);
func
((
uchar
*
)
data0
,
ptrs
[
1
],
(
uchar
*
)
&
result
.
d
,
bsz
,
cn
);
ptrs
[
0
]
+=
bsz
*
esz
;
if
(
ptrs
[
1
])
ptrs
[
1
]
+=
bsz
;
}
}
ptrs
[
0
]
+=
bsz
*
esz
;
}
if
(
ptrs
[
1
]
)
}
ptrs
[
1
]
+=
bsz
;
else
{
// generic implementation
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
func
(
ptrs
[
0
],
ptrs
[
1
],
(
uchar
*
)
&
result
,
(
int
)
it
.
size
,
cn
);
}
}
}
}
if
(
normType
==
NORM_INF
)
if
(
normType
==
NORM_INF
)
{
{
if
(
depth
==
CV_64F
)
if
(
depth
==
CV_64F
||
depth
==
CV_16F
)
;
return
result
.
d
;
else
if
(
depth
==
CV_32F
)
else
if
(
depth
==
CV_32F
)
re
sult
.
d
=
result
.
f
;
re
turn
result
.
f
;
else
else
re
sult
.
d
=
result
.
i
;
re
turn
result
.
i
;
}
}
else
if
(
normType
==
NORM_L2
)
else
if
(
normType
==
NORM_L2
)
re
sult
.
d
=
std
::
sqrt
(
result
.
d
);
re
turn
std
::
sqrt
(
result
.
d
);
return
result
.
d
;
return
result
.
d
;
}
}
...
@@ -1186,70 +1197,82 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
...
@@ -1186,70 +1197,82 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
result
;
result
;
result
.
d
=
0
;
result
.
d
=
0
;
NAryMatIterator
it
(
arrays
,
ptrs
);
NAryMatIterator
it
(
arrays
,
ptrs
);
int
j
,
total
=
(
int
)
it
.
size
,
blockSize
=
total
;
CV_CheckLT
((
size_t
)
it
.
size
,
(
size_t
)
INT_MAX
,
""
);
bool
blockSum
=
depth
==
CV_16F
||
(
normType
==
NORM_L1
&&
depth
<=
CV_16S
)
||
((
normType
==
NORM_L2
||
normType
==
NORM_L2SQR
)
&&
depth
<=
CV_8S
);
unsigned
isum
=
0
;
unsigned
*
ibuf
=
&
result
.
u
;
AutoBuffer
<
float
>
fltbuf_
;
float
*
fltbuf
=
0
;
size_t
esz
=
0
;
if
(
blockSum
)
{
esz
=
src1
.
elemSize
();
if
(
depth
==
CV_16F
)
if
((
normType
==
NORM_L1
&&
depth
<=
CV_16S
)
||
{
((
normType
==
NORM_L2
||
normType
==
NORM_L2SQR
)
&&
depth
<=
CV_8S
))
blockSize
=
std
::
min
(
blockSize
,
1024
);
{
fltbuf_
.
allocate
(
blockSize
*
2
);
// special case to handle "integer" overflow in accumulator
fltbuf
=
fltbuf_
.
data
();
const
size_t
esz
=
src1
.
elemSize
();
}
const
int
total
=
(
int
)
it
.
size
;
else
const
int
intSumBlockSize
=
normType
==
NORM_L1
&&
depth
<=
CV_8S
?
(
1
<<
23
)
:
(
1
<<
15
);
const
int
blockSize
=
std
::
min
(
total
,
intSumBlockSize
);
int
isum
=
0
;
int
count
=
0
;
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
{
int
intSumBlockSize
=
(
normType
==
NORM_L1
&&
depth
<=
CV_8S
?
(
1
<<
23
)
:
(
1
<<
15
))
/
cn
;
for
(
int
j
=
0
;
j
<
total
;
j
+=
blockSize
)
blockSize
=
std
::
min
(
blockSize
,
intSumBlockSize
);
{
ibuf
=
&
isum
;
int
bsz
=
std
::
min
(
total
-
j
,
blockSize
);
func
(
ptrs
[
0
],
ptrs
[
1
],
ptrs
[
2
],
(
uchar
*
)
&
isum
,
bsz
,
cn
);
count
+=
bsz
;
if
(
count
+
blockSize
>=
intSumBlockSize
||
(
i
+
1
>=
it
.
nplanes
&&
j
+
bsz
>=
total
))
{
result
.
d
+=
isum
;
isum
=
0
;
count
=
0
;
}
ptrs
[
0
]
+=
bsz
*
esz
;
ptrs
[
1
]
+=
bsz
*
esz
;
if
(
ptrs
[
2
])
ptrs
[
2
]
+=
bsz
;
}
}
}
}
}
else
if
(
depth
==
CV_16F
)
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
{
for
(
j
=
0
;
j
<
total
;
j
+=
blockSize
)
const
size_t
esz
=
src1
.
elemSize
();
const
int
total
=
(
int
)
it
.
size
;
const
int
blockSize
=
std
::
min
(
total
,
divUp
(
512
,
cn
));
AutoBuffer
<
float
,
1024
>
fltbuf
(
blockSize
*
2
);
float
*
data0
=
fltbuf
.
data
();
float
*
data1
=
fltbuf
.
data
()
+
blockSize
*
cn
;
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
{
int
bsz
=
std
::
min
(
total
-
j
,
blockSize
);
for
(
int
j
=
0
;
j
<
total
;
j
+=
blockSize
)
const
uchar
*
data0
=
ptrs
[
0
],
*
data1
=
ptrs
[
1
];
if
(
depth
==
CV_16F
)
{
hal
::
cvt16f32f
((
const
float16_t
*
)
ptrs
[
0
],
fltbuf
,
bsz
);
hal
::
cvt16f32f
((
const
float16_t
*
)
ptrs
[
1
],
fltbuf
+
bsz
,
bsz
);
data0
=
(
const
uchar
*
)
fltbuf
;
data1
=
(
const
uchar
*
)(
fltbuf
+
bsz
);
}
func
(
data0
,
data1
,
ptrs
[
2
],
(
uchar
*
)
ibuf
,
bsz
,
cn
);
if
(
blockSum
&&
depth
!=
CV_16F
)
{
{
result
.
d
+=
isum
;
int
bsz
=
std
::
min
(
total
-
j
,
blockSize
);
isum
=
0
;
hal
::
cvt16f32f
((
const
float16_t
*
)
ptrs
[
0
],
data0
,
bsz
*
cn
);
hal
::
cvt16f32f
((
const
float16_t
*
)
ptrs
[
1
],
data1
,
bsz
*
cn
);
func
((
uchar
*
)
data0
,
(
uchar
*
)
data1
,
ptrs
[
2
],
(
uchar
*
)
&
result
.
d
,
bsz
,
cn
);
ptrs
[
0
]
+=
bsz
*
esz
;
ptrs
[
1
]
+=
bsz
*
esz
;
if
(
ptrs
[
2
])
ptrs
[
2
]
+=
bsz
;
}
}
ptrs
[
0
]
+=
bsz
*
esz
;
}
ptrs
[
1
]
+=
bsz
*
esz
;
}
if
(
ptrs
[
2
]
)
else
ptrs
[
2
]
+=
bsz
;
{
// generic implementation
for
(
size_t
i
=
0
;
i
<
it
.
nplanes
;
i
++
,
++
it
)
{
func
(
ptrs
[
0
],
ptrs
[
1
],
ptrs
[
2
],
(
uchar
*
)
&
result
,
(
int
)
it
.
size
,
cn
);
}
}
}
}
if
(
normType
==
NORM_INF
)
if
(
normType
==
NORM_INF
)
{
{
if
(
depth
==
CV_64F
)
if
(
depth
==
CV_64F
||
depth
==
CV_16F
)
;
return
result
.
d
;
else
if
(
depth
==
CV_32F
)
else
if
(
depth
==
CV_32F
)
re
sult
.
d
=
result
.
f
;
re
turn
result
.
f
;
else
else
re
sult
.
d
=
result
.
u
;
re
turn
result
.
u
;
}
}
else
if
(
normType
==
NORM_L2
)
else
if
(
normType
==
NORM_L2
)
re
sult
.
d
=
std
::
sqrt
(
result
.
d
);
re
turn
std
::
sqrt
(
result
.
d
);
return
result
.
d
;
return
result
.
d
;
}
}
...
...
modules/core/src/ocl.cpp
View file @
619180df
...
@@ -6451,16 +6451,19 @@ struct Image2D::Impl
...
@@ -6451,16 +6451,19 @@ struct Image2D::Impl
CL_MEM_OBJECT_IMAGE2D
,
numFormats
,
CL_MEM_OBJECT_IMAGE2D
,
numFormats
,
NULL
,
&
numFormats
);
NULL
,
&
numFormats
);
CV_OCL_DBG_CHECK_RESULT
(
err
,
"clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, NULL)"
);
CV_OCL_DBG_CHECK_RESULT
(
err
,
"clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, NULL)"
);
AutoBuffer
<
cl_image_format
>
formats
(
numFormats
);
if
(
numFormats
>
0
)
err
=
clGetSupportedImageFormats
(
context
,
CL_MEM_READ_WRITE
,
{
CL_MEM_OBJECT_IMAGE2D
,
numFormats
,
AutoBuffer
<
cl_image_format
>
formats
(
numFormats
);
formats
.
data
(),
NULL
);
err
=
clGetSupportedImageFormats
(
context
,
CL_MEM_READ_WRITE
,
CV_OCL_DBG_CHECK_RESULT
(
err
,
"clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, formats)"
);
CL_MEM_OBJECT_IMAGE2D
,
numFormats
,
for
(
cl_uint
i
=
0
;
i
<
numFormats
;
++
i
)
formats
.
data
(),
NULL
);
{
CV_OCL_DBG_CHECK_RESULT
(
err
,
"clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, formats)"
);
if
(
!
memcmp
(
&
formats
[
i
],
&
format
,
sizeof
(
format
))
)
for
(
cl_uint
i
=
0
;
i
<
numFormats
;
++
i
)
{
{
return
true
;
if
(
!
memcmp
(
&
formats
[
i
],
&
format
,
sizeof
(
format
)))
{
return
true
;
}
}
}
}
}
return
false
;
return
false
;
...
...
modules/dnn/src/layers/blank_layer.cpp
View file @
619180df
...
@@ -115,17 +115,6 @@ public:
...
@@ -115,17 +115,6 @@ public:
inputs
[
i
].
copyTo
(
outputs
[
i
]);
inputs
[
i
].
copyTo
(
outputs
[
i
]);
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReshapeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
));
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
...
@@ -163,6 +152,20 @@ public:
...
@@ -163,6 +152,20 @@ public:
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
blank
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
blank
));
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReshapeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
));
}
#endif
};
};
Ptr
<
Layer
>
BlankLayer
::
create
(
const
LayerParams
&
params
)
Ptr
<
Layer
>
BlankLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/const_layer.cpp
View file @
619180df
...
@@ -75,6 +75,7 @@ public:
...
@@ -75,6 +75,7 @@ public:
blobs
[
0
].
copyTo
(
outputs
[
0
]);
blobs
[
0
].
copyTo
(
outputs
[
0
]);
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
{
{
...
@@ -84,6 +85,7 @@ public:
...
@@ -84,6 +85,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -93,7 +95,8 @@ public:
...
@@ -93,7 +95,8 @@ public:
blobs
[
0
].
data
);
blobs
[
0
].
data
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
node
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
node
));
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
Ptr
<
BackendNode
>
initCUDA
(
...
...
modules/dnn/src/layers/flatten_layer.cpp
View file @
619180df
...
@@ -171,17 +171,6 @@ public:
...
@@ -171,17 +171,6 @@ public:
}
}
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReshapeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
));
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
...
@@ -197,6 +186,7 @@ public:
...
@@ -197,6 +186,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -224,6 +214,20 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
...
@@ -224,6 +214,20 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReshapeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
));
}
#endif
int
_startAxis
;
int
_startAxis
;
int
_endAxis
;
int
_endAxis
;
};
};
...
...
modules/dnn/src/layers/normalize_bbox_layer.cpp
View file @
619180df
...
@@ -268,32 +268,6 @@ public:
...
@@ -268,32 +268,6 @@ public:
}
}
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
if
(
pnorm
!=
1
&&
pnorm
!=
2
)
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported normalization mode"
);
auto
input_wrapper
=
inputs
[
0
].
dynamicCast
<
CUDABackendWrapper
>
();
auto
input_shape
=
input_wrapper
->
getShape
();
NormalizeConfiguration
<
float
>
config
;
config
.
input_shape
.
assign
(
std
::
begin
(
input_shape
),
std
::
end
(
input_shape
));
config
.
axis_start
=
clamp
(
startAxis
,
input_shape
.
size
());
config
.
axis_end
=
clamp
(
endAxis
,
input_shape
.
size
())
+
1
;
/* +1 because NormalizeOp follows [start, end) convention */
config
.
norm
=
pnorm
;
config
.
eps
=
epsilon
;
const
auto
&
weightsMat
=
blobs
.
empty
()
?
Mat
()
:
blobs
[
0
];
return
make_cuda_node
<
cuda4dnn
::
NormalizeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
weightsMat
,
config
);
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
...
@@ -346,6 +320,7 @@ public:
...
@@ -346,6 +320,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -384,6 +359,35 @@ public:
...
@@ -384,6 +359,35 @@ public:
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
if
(
pnorm
!=
1
&&
pnorm
!=
2
)
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported normalization mode"
);
auto
input_wrapper
=
inputs
[
0
].
dynamicCast
<
CUDABackendWrapper
>
();
auto
input_shape
=
input_wrapper
->
getShape
();
NormalizeConfiguration
<
float
>
config
;
config
.
input_shape
.
assign
(
std
::
begin
(
input_shape
),
std
::
end
(
input_shape
));
config
.
axis_start
=
clamp
(
startAxis
,
input_shape
.
size
());
config
.
axis_end
=
clamp
(
endAxis
,
input_shape
.
size
())
+
1
;
/* +1 because NormalizeOp follows [start, end) convention */
config
.
norm
=
pnorm
;
config
.
eps
=
epsilon
;
const
auto
&
weightsMat
=
blobs
.
empty
()
?
Mat
()
:
blobs
[
0
];
return
make_cuda_node
<
cuda4dnn
::
NormalizeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
weightsMat
,
config
);
}
#endif
private
:
private
:
int
startAxis
,
endAxis
;
int
startAxis
,
endAxis
;
};
};
...
...
modules/dnn/src/layers/permute_layer.cpp
View file @
619180df
...
@@ -381,27 +381,6 @@ public:
...
@@ -381,27 +381,6 @@ public:
}
}
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
PermuteOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
_order
);
}
#endif
virtual
Ptr
<
BackendNode
>
initVkCom
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
input
)
CV_OVERRIDE
{
#ifdef HAVE_VULKAN
CV_Assert
(
!
_order
.
empty
());
std
::
shared_ptr
<
vkcom
::
OpBase
>
op
(
new
vkcom
::
OpPermute
(
_order
));
return
Ptr
<
BackendNode
>
(
new
VkComBackendNode
(
input
,
op
));
#endif // HAVE_VULKAN
return
Ptr
<
BackendNode
>
();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
...
@@ -412,6 +391,7 @@ public:
...
@@ -412,6 +391,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -424,6 +404,30 @@ public:
...
@@ -424,6 +404,30 @@ public:
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
PermuteOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
_order
);
}
#endif
#ifdef HAVE_VULKAN
virtual
Ptr
<
BackendNode
>
initVkCom
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
input
)
CV_OVERRIDE
{
CV_Assert
(
!
_order
.
empty
());
std
::
shared_ptr
<
vkcom
::
OpBase
>
op
(
new
vkcom
::
OpPermute
(
_order
));
return
Ptr
<
BackendNode
>
(
new
VkComBackendNode
(
input
,
op
));
}
#endif // HAVE_VULKAN
size_t
_count
;
size_t
_count
;
std
::
vector
<
size_t
>
_order
;
std
::
vector
<
size_t
>
_order
;
...
...
modules/dnn/src/layers/pooling_layer.cpp
View file @
619180df
...
@@ -189,7 +189,7 @@ public:
...
@@ -189,7 +189,7 @@ public:
return
type
==
MAX
||
type
==
AVE
||
type
==
ROI
;
return
type
==
MAX
||
type
==
AVE
||
type
==
ROI
;
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
else
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
{
{
if
(
computeMaxIdx
)
if
(
computeMaxIdx
)
return
false
;
return
false
;
...
@@ -207,11 +207,11 @@ public:
...
@@ -207,11 +207,11 @@ public:
return
type
!=
STOCHASTIC
;
return
type
!=
STOCHASTIC
;
}
}
#endif
#endif
else
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
if
(
backendId
==
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
)
{
{
return
!
computeMaxIdx
&&
type
!=
STOCHASTIC
;
return
!
computeMaxIdx
&&
type
!=
STOCHASTIC
;
}
}
else
if
(
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_HALIDE
||
backendId
==
DNN_BACKEND_VKCOM
)
if
(
backendId
==
DNN_BACKEND_OPENCV
||
backendId
==
DNN_BACKEND_HALIDE
||
backendId
==
DNN_BACKEND_VKCOM
)
{
{
if
(
kernel_size
.
size
()
==
3
)
if
(
kernel_size
.
size
()
==
3
)
return
(
backendId
==
DNN_BACKEND_OPENCV
&&
preferableTarget
==
DNN_TARGET_CPU
);
return
(
backendId
==
DNN_BACKEND_OPENCV
&&
preferableTarget
==
DNN_TARGET_CPU
);
...
@@ -409,9 +409,10 @@ public:
...
@@ -409,9 +409,10 @@ public:
}
}
#endif
#endif
#ifdef HAVE_VULKAN
virtual
Ptr
<
BackendNode
>
initVkCom
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initVkCom
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
)
CV_OVERRIDE
{
{
#ifdef HAVE_VULKAN
int
padding_mode
;
int
padding_mode
;
vkcom
::
PoolType
pool_type
;
vkcom
::
PoolType
pool_type
;
int
filter_size
[
2
]
=
{
kernel
.
height
,
kernel
.
width
};
int
filter_size
[
2
]
=
{
kernel
.
height
,
kernel
.
width
};
...
@@ -440,9 +441,9 @@ public:
...
@@ -440,9 +441,9 @@ public:
stride_size
,
padding_mode
,
stride_size
,
padding_mode
,
pool_type
,
avePoolPaddedArea
));
pool_type
,
avePoolPaddedArea
));
return
Ptr
<
BackendNode
>
(
new
VkComBackendNode
(
inputs
,
op
));
return
Ptr
<
BackendNode
>
(
new
VkComBackendNode
(
inputs
,
op
));
#endif
return
Ptr
<
BackendNode
>
();
}
}
#endif
virtual
Ptr
<
BackendNode
>
initHalide
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initHalide
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
)
CV_OVERRIDE
{
{
...
@@ -503,47 +504,46 @@ public:
...
@@ -503,47 +504,46 @@ public:
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
{
{
CV_Assert_N
((
inputs
.
size
()
==
1
&&
(
type
==
MAX
||
type
==
AVE
))
||
inputs
.
size
()
==
2
,
nodes
.
size
()
==
inputs
.
size
());
CV_Assert_N
((
inputs
.
size
()
==
1
&&
(
type
==
MAX
||
type
==
AVE
))
||
inputs
.
size
()
==
2
,
nodes
.
size
()
==
inputs
.
size
());
auto
&
ieInpNode
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
auto
&
ieInpNode
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
ngraph
::
op
::
PadType
pad_type
=
ngraph
::
op
::
PadType
::
EXPLICIT
;
ngraph
::
op
::
PadType
pad_type
=
ngraph
::
op
::
PadType
::
EXPLICIT
;
if
(
!
padMode
.
empty
())
if
(
!
padMode
.
empty
())
pad_type
=
padMode
==
"VALID"
?
ngraph
::
op
::
PadType
::
VALID
:
ngraph
::
op
::
PadType
::
SAME_UPPER
;
pad_type
=
padMode
==
"VALID"
?
ngraph
::
op
::
PadType
::
VALID
:
ngraph
::
op
::
PadType
::
SAME_UPPER
;
auto
rounding_type
=
ceilMode
?
ngraph
::
op
::
RoundingType
::
CEIL
:
ngraph
::
op
::
RoundingType
::
FLOOR
;
auto
rounding_type
=
ceilMode
?
ngraph
::
op
::
RoundingType
::
CEIL
:
ngraph
::
op
::
RoundingType
::
FLOOR
;
if
(
type
==
AVE
)
{
if
(
type
==
AVE
)
{
auto
exclude_pad
=
!
avePoolPaddedArea
;
auto
exclude_pad
=
!
avePoolPaddedArea
;
auto
ave_pool
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
AvgPool
>
(
ieInpNode
,
ngraph
::
Strides
(
strides
),
auto
ave_pool
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
AvgPool
>
(
ieInpNode
,
ngraph
::
Strides
(
strides
),
ngraph
::
Shape
(
pads_begin
),
ngraph
::
Shape
(
pads_end
),
ngraph
::
Shape
(
kernel_size
),
ngraph
::
Shape
(
pads_begin
),
ngraph
::
Shape
(
pads_end
),
ngraph
::
Shape
(
kernel_size
),
exclude_pad
,
rounding_type
,
pad_type
);
exclude_pad
,
rounding_type
,
pad_type
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
ave_pool
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
ave_pool
));
}
}
else
if
(
type
==
MAX
)
{
else
if
(
type
==
MAX
)
{
auto
max_pool
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
MaxPool
>
(
ieInpNode
,
ngraph
::
Strides
(
strides
),
auto
max_pool
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
MaxPool
>
(
ieInpNode
,
ngraph
::
Strides
(
strides
),
ngraph
::
Shape
(
pads_begin
),
ngraph
::
Shape
(
pads_end
),
ngraph
::
Shape
(
kernel_size
),
ngraph
::
Shape
(
pads_begin
),
ngraph
::
Shape
(
pads_end
),
ngraph
::
Shape
(
kernel_size
),
rounding_type
,
pad_type
);
rounding_type
,
pad_type
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
max_pool
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
max_pool
));
}
}
else
if
(
type
==
ROI
)
{
else
if
(
type
==
ROI
)
{
auto
&
coords
=
nodes
[
1
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
auto
&
coords
=
nodes
[
1
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
auto
roi
=
std
::
make_shared
<
ngraph
::
op
::
ROIPooling
>
(
ieInpNode
,
coords
,
auto
roi
=
std
::
make_shared
<
ngraph
::
op
::
ROIPooling
>
(
ieInpNode
,
coords
,
ngraph
::
Shape
{(
size_t
)
pooledSize
.
height
,
(
size_t
)
pooledSize
.
width
},
spatialScale
,
"max"
);
ngraph
::
Shape
{(
size_t
)
pooledSize
.
height
,
(
size_t
)
pooledSize
.
width
},
spatialScale
,
"max"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
roi
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
roi
));
}
}
else
if
(
type
==
PSROI
)
{
else
if
(
type
==
PSROI
)
{
auto
&
coords
=
nodes
[
1
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
auto
&
coords
=
nodes
[
1
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
auto
psroi
=
std
::
make_shared
<
ngraph
::
op
::
PSROIPooling
>
(
ieInpNode
,
coords
,
auto
psroi
=
std
::
make_shared
<
ngraph
::
op
::
PSROIPooling
>
(
ieInpNode
,
coords
,
(
size_t
)
psRoiOutChannels
,
(
size_t
)
pooledSize
.
width
,
spatialScale
,
1
,
1
,
"average"
);
(
size_t
)
psRoiOutChannels
,
(
size_t
)
pooledSize
.
width
,
spatialScale
,
1
,
1
,
"average"
);
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
psroi
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
psroi
));
}
else
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported pooling type"
);
}
}
else
CV_Error
(
Error
::
StsNotImplemented
,
"Unsupported pooling type"
);
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
...
...
modules/dnn/src/layers/prior_box_layer.cpp
View file @
619180df
...
@@ -504,56 +504,6 @@ public:
...
@@ -504,56 +504,6 @@ public:
}
}
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
auto
feature_map_wrapper
=
inputs
[
0
].
dynamicCast
<
CUDABackendWrapper
>
();
auto
feature_map_shape
=
feature_map_wrapper
->
getShape
();
auto
image_wrapper
=
inputs
[
1
].
dynamicCast
<
CUDABackendWrapper
>
();
auto
image_shape
=
image_wrapper
->
getShape
();
PriorBoxConfiguration
config
;
config
.
feature_map_width
=
feature_map_shape
.
rbegin
()[
0
];
config
.
feature_map_height
=
feature_map_shape
.
rbegin
()[
1
];
config
.
image_width
=
image_shape
.
rbegin
()[
0
];
config
.
image_height
=
image_shape
.
rbegin
()[
1
];
config
.
num_priors
=
_numPriors
;
config
.
box_widths
=
_boxWidths
;
config
.
box_heights
=
_boxHeights
;
config
.
offsets_x
=
_offsetsX
;
config
.
offsets_y
=
_offsetsY
;
config
.
stepX
=
_stepX
;
config
.
stepY
=
_stepY
;
config
.
variance
=
_variance
;
config
.
clip
=
_clip
;
config
.
normalize
=
_bboxesNormalized
;
return
make_cuda_node
<
cuda4dnn
::
PriorBoxOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
config
);
}
#endif
virtual
Ptr
<
BackendNode
>
initVkCom
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
input
)
CV_OVERRIDE
{
#ifdef HAVE_VULKAN
std
::
shared_ptr
<
vkcom
::
OpBase
>
op
(
new
vkcom
::
OpPriorBox
(
_stepX
,
_stepY
,
_clip
,
_numPriors
,
_variance
,
_offsetsX
,
_offsetsY
,
_boxWidths
,
_boxHeights
));
return
Ptr
<
BackendNode
>
(
new
VkComBackendNode
(
input
,
op
));
#endif // HAVE_VULKAN
return
Ptr
<
BackendNode
>
();
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
...
@@ -617,6 +567,7 @@ public:
...
@@ -617,6 +567,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
{
{
...
@@ -679,6 +630,58 @@ public:
...
@@ -679,6 +630,58 @@ public:
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
auto
feature_map_wrapper
=
inputs
[
0
].
dynamicCast
<
CUDABackendWrapper
>
();
auto
feature_map_shape
=
feature_map_wrapper
->
getShape
();
auto
image_wrapper
=
inputs
[
1
].
dynamicCast
<
CUDABackendWrapper
>
();
auto
image_shape
=
image_wrapper
->
getShape
();
PriorBoxConfiguration
config
;
config
.
feature_map_width
=
feature_map_shape
.
rbegin
()[
0
];
config
.
feature_map_height
=
feature_map_shape
.
rbegin
()[
1
];
config
.
image_width
=
image_shape
.
rbegin
()[
0
];
config
.
image_height
=
image_shape
.
rbegin
()[
1
];
config
.
num_priors
=
_numPriors
;
config
.
box_widths
=
_boxWidths
;
config
.
box_heights
=
_boxHeights
;
config
.
offsets_x
=
_offsetsX
;
config
.
offsets_y
=
_offsetsY
;
config
.
stepX
=
_stepX
;
config
.
stepY
=
_stepY
;
config
.
variance
=
_variance
;
config
.
clip
=
_clip
;
config
.
normalize
=
_bboxesNormalized
;
return
make_cuda_node
<
cuda4dnn
::
PriorBoxOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
config
);
}
#endif
#ifdef HAVE_VULKAN
virtual
Ptr
<
BackendNode
>
initVkCom
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
input
)
CV_OVERRIDE
{
std
::
shared_ptr
<
vkcom
::
OpBase
>
op
(
new
vkcom
::
OpPriorBox
(
_stepX
,
_stepY
,
_clip
,
_numPriors
,
_variance
,
_offsetsX
,
_offsetsY
,
_boxWidths
,
_boxHeights
));
return
Ptr
<
BackendNode
>
(
new
VkComBackendNode
(
input
,
op
));
}
#endif // HAVE_VULKAN
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
CV_OVERRIDE
const
std
::
vector
<
MatShape
>
&
outputs
)
const
CV_OVERRIDE
{
{
...
...
modules/dnn/src/layers/reorg_layer.cpp
View file @
619180df
...
@@ -193,17 +193,6 @@ public:
...
@@ -193,17 +193,6 @@ public:
permute
->
forward
(
inputs
,
outputs
,
internals_arr
);
permute
->
forward
(
inputs
,
outputs
,
internals_arr
);
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReorgOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
reorgStride
);
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
...
@@ -214,6 +203,7 @@ public:
...
@@ -214,6 +203,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -224,6 +214,20 @@ public:
...
@@ -224,6 +214,20 @@ public:
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReorgOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
reorgStride
);
}
#endif
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
CV_OVERRIDE
const
std
::
vector
<
MatShape
>
&
outputs
)
const
CV_OVERRIDE
{
{
...
...
modules/dnn/src/layers/reshape_layer.cpp
View file @
619180df
...
@@ -267,17 +267,6 @@ public:
...
@@ -267,17 +267,6 @@ public:
}
}
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReshapeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
));
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
)
CV_OVERRIDE
...
@@ -289,6 +278,7 @@ public:
...
@@ -289,6 +278,7 @@ public:
}
}
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -304,6 +294,20 @@ public:
...
@@ -304,6 +294,20 @@ public:
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
return
make_cuda_node
<
cuda4dnn
::
ReshapeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
));
}
#endif
private
:
private
:
std
::
vector
<
MatShape
>
outShapes
;
std
::
vector
<
MatShape
>
outShapes
;
};
};
...
...
modules/dnn/src/layers/resize_layer.cpp
View file @
619180df
...
@@ -170,26 +170,6 @@ public:
...
@@ -170,26 +170,6 @@ public:
CV_Error
(
Error
::
StsNotImplemented
,
"Unknown interpolation: "
+
interpolation
);
CV_Error
(
Error
::
StsNotImplemented
,
"Unknown interpolation: "
+
interpolation
);
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
cuda4dnn
::
InterpolationType
itype
;
if
(
interpolation
==
"nearest"
)
itype
=
InterpolationType
::
NEAREST_NEIGHBOUR
;
else
if
(
interpolation
==
"bilinear"
)
itype
=
InterpolationType
::
BILINEAR
;
else
CV_Error
(
Error
::
StsNotImplemented
,
"Requested interpolation mode is not available in resize layer."
);
return
make_cuda_node
<
cuda4dnn
::
ResizeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
itype
,
scaleHeight
,
scaleWidth
);
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initInfEngine
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
)
CV_OVERRIDE
...
@@ -251,6 +231,29 @@ public:
...
@@ -251,6 +231,29 @@ public:
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
cuda4dnn
::
InterpolationType
itype
;
if
(
interpolation
==
"nearest"
)
itype
=
InterpolationType
::
NEAREST_NEIGHBOUR
;
else
if
(
interpolation
==
"bilinear"
)
itype
=
InterpolationType
::
BILINEAR
;
else
CV_Error
(
Error
::
StsNotImplemented
,
"Requested interpolation mode is not available in resize layer."
);
return
make_cuda_node
<
cuda4dnn
::
ResizeOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
itype
,
scaleHeight
,
scaleWidth
);
}
#endif
protected
:
protected
:
int
outWidth
,
outHeight
;
int
outWidth
,
outHeight
;
const
int
zoomFactorWidth
,
zoomFactorHeight
;
const
int
zoomFactorWidth
,
zoomFactorHeight
;
...
...
modules/dnn/src/layers/scale_layer.cpp
View file @
619180df
...
@@ -52,7 +52,7 @@ public:
...
@@ -52,7 +52,7 @@ public:
{
{
std
::
vector
<
Mat
>
inputs
;
std
::
vector
<
Mat
>
inputs
;
inputs_arr
.
getMatVector
(
inputs
);
inputs_arr
.
getMatVector
(
inputs
);
hasWeights
=
blobs
.
size
()
==
2
||
(
blobs
.
size
()
=
=
1
&&
!
hasBias
);
hasWeights
=
blobs
.
size
()
==
2
||
(
blobs
.
size
()
<
=
1
&&
!
hasBias
);
CV_Assert
((
inputs
.
size
()
==
2
&&
blobs
.
empty
())
||
blobs
.
size
()
==
(
int
)
hasWeights
+
(
int
)
hasBias
);
CV_Assert
((
inputs
.
size
()
==
2
&&
blobs
.
empty
())
||
blobs
.
size
()
==
(
int
)
hasWeights
+
(
int
)
hasBias
);
}
}
...
@@ -86,10 +86,9 @@ public:
...
@@ -86,10 +86,9 @@ public:
Mat
&
outBlob
=
outputs
[
0
];
Mat
&
outBlob
=
outputs
[
0
];
// There is a mode when we multiply a first blob by a second one
// There is a mode when we multiply a first blob by a second one
// instead of trainable weights.
// instead of trainable weights.
Mat
weights
=
blobs
.
empty
()
?
inputs
[
1
]
:
(
hasWeights
?
blobs
[
0
]
:
Mat
());
Mat
weights
=
hasWeights
?
(
blobs
.
empty
()
?
inputs
[
1
]
:
blobs
[
0
]).
reshape
(
1
,
1
)
:
Mat
();;
Mat
bias
=
hasBias
?
blobs
.
back
().
reshape
(
1
,
1
)
:
Mat
();
Mat
bias
=
hasBias
?
(
blobs
.
empty
()
?
inputs
[
1
]
:
blobs
.
back
()).
reshape
(
1
,
1
)
:
Mat
();
if
(
!
weights
.
empty
())
weights
=
weights
.
reshape
(
1
,
1
);
MatShape
inpShape
=
shape
(
inpBlob
);
MatShape
inpShape
=
shape
(
inpBlob
);
const
int
numWeights
=
!
weights
.
empty
()
?
weights
.
total
()
:
bias
.
total
();
const
int
numWeights
=
!
weights
.
empty
()
?
weights
.
total
()
:
bias
.
total
();
CV_Assert
(
numWeights
!=
0
);
CV_Assert
(
numWeights
!=
0
);
...
@@ -259,28 +258,40 @@ public:
...
@@ -259,28 +258,40 @@ public:
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
{
{
CV_Assert
(
!
blobs
.
empty
());
auto
ieInpNode0
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
const
size_t
numChannels
=
blobs
[
0
].
total
();
auto
ieInpNode1
=
nodes
.
size
()
>
1
?
nodes
[
1
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
:
nullptr
;
auto
ieInpNode
=
nodes
[
0
].
dynamicCast
<
InfEngineNgraphNode
>
()
->
node
;
size_t
numChannels
=
1
;
if
(
blobs
.
empty
())
for
(
const
size_t
&
dim
:
ieInpNode1
->
get_shape
())
numChannels
*=
dim
;
else
numChannels
=
blobs
[
0
].
total
();
std
::
vector
<
size_t
>
shape
(
ieInpNode
->
get_shape
().
size
(),
1
);
std
::
vector
<
size_t
>
shape
(
ieInpNode
0
->
get_shape
().
size
(),
1
);
int
cAxis
=
clamp
(
axis
,
shape
.
size
());
int
cAxis
=
clamp
(
axis
,
shape
.
size
());
shape
[
cAxis
]
=
numChannels
;
shape
[
cAxis
]
=
numChannels
;
auto
node
=
ieInpNode
;
auto
node
=
ieInpNode
0
;
if
(
hasWeights
)
if
(
hasWeights
)
{
{
auto
weight
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
auto
weight
=
blobs
.
empty
()
?
ieInpNode1
:
ngraph
::
Shape
(
shape
),
blobs
[
0
].
data
);
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
blobs
[
0
].
data
);
node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Multiply
>
(
node
,
weight
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Multiply
>
(
node
,
weight
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
}
}
if
(
hasBias
||
!
hasWeights
)
if
(
hasBias
||
!
hasWeights
)
{
{
auto
bias
=
hasBias
?
std
::
shared_ptr
<
ngraph
::
Node
>
bias
;
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
if
(
hasBias
)
ngraph
::
Shape
(
shape
),
blobs
.
back
().
data
)
:
{
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
bias
=
blobs
.
empty
()
?
ieInpNode1
:
ngraph
::
Shape
(
shape
),
std
::
vector
<
float
>
(
numChannels
,
0
).
data
());
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
blobs
.
back
().
data
);
}
else
bias
=
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
ngraph
::
element
::
f32
,
ngraph
::
Shape
(
shape
),
std
::
vector
<
float
>
(
numChannels
,
0
).
data
());
node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Add
>
(
node
,
bias
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
node
=
std
::
make_shared
<
ngraph
::
op
::
v1
::
Add
>
(
node
,
bias
,
ngraph
::
op
::
AutoBroadcastType
::
NUMPY
);
}
}
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
node
));
return
Ptr
<
BackendNode
>
(
new
InfEngineNgraphNode
(
node
));
...
@@ -289,8 +300,8 @@ public:
...
@@ -289,8 +300,8 @@ public:
void
getScaleShift
(
Mat
&
scale
,
Mat
&
shift
)
const
CV_OVERRIDE
void
getScaleShift
(
Mat
&
scale
,
Mat
&
shift
)
const
CV_OVERRIDE
{
{
scale
=
hasWeights
?
blobs
[
0
]
:
Mat
();
scale
=
(
hasWeights
&&
!
blobs
.
empty
())
?
blobs
[
0
]
:
Mat
();
shift
=
hasBias
?
blobs
.
back
()
:
Mat
();
shift
=
(
hasBias
&&
!
blobs
.
empty
())
?
blobs
.
back
()
:
Mat
();
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
modules/dnn/src/layers/slice_layer.cpp
View file @
619180df
...
@@ -273,27 +273,6 @@ public:
...
@@ -273,27 +273,6 @@ public:
}
}
}
}
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
std
::
vector
<
std
::
vector
<
std
::
size_t
>>
offsets
;
for
(
const
auto
&
ranges
:
sliceRanges
)
{
std
::
vector
<
std
::
size_t
>
offsets_i
;
for
(
const
auto
&
range
:
ranges
)
offsets_i
.
push_back
(
range
.
start
);
offsets
.
push_back
(
std
::
move
(
offsets_i
));
}
return
make_cuda_node
<
cuda4dnn
::
SliceOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
std
::
move
(
offsets
));
}
#endif
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
...
@@ -352,6 +331,7 @@ public:
...
@@ -352,6 +331,7 @@ public:
#endif
#endif
#endif
#endif
#ifdef HAVE_DNN_NGRAPH
#ifdef HAVE_DNN_NGRAPH
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
virtual
Ptr
<
BackendNode
>
initNgraph
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
const
std
::
vector
<
Ptr
<
BackendNode
>
>&
nodes
)
CV_OVERRIDE
...
@@ -381,6 +361,29 @@ public:
...
@@ -381,6 +361,29 @@ public:
}
}
#endif // HAVE_DNN_NGRAPH
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CUDA
Ptr
<
BackendNode
>
initCUDA
(
void
*
context_
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
inputs
,
const
std
::
vector
<
Ptr
<
BackendWrapper
>>&
outputs
)
override
{
auto
context
=
reinterpret_cast
<
csl
::
CSLContext
*>
(
context_
);
std
::
vector
<
std
::
vector
<
std
::
size_t
>>
offsets
;
for
(
const
auto
&
ranges
:
sliceRanges
)
{
std
::
vector
<
std
::
size_t
>
offsets_i
;
for
(
const
auto
&
range
:
ranges
)
offsets_i
.
push_back
(
range
.
start
);
offsets
.
push_back
(
std
::
move
(
offsets_i
));
}
return
make_cuda_node
<
cuda4dnn
::
SliceOp
>
(
preferableTarget
,
std
::
move
(
context
->
stream
),
std
::
move
(
offsets
));
}
#endif
};
};
class
CropLayerImpl
CV_FINAL
:
public
SliceLayerImpl
class
CropLayerImpl
CV_FINAL
:
public
SliceLayerImpl
...
...
modules/dnn/src/onnx/onnx_importer.cpp
View file @
619180df
...
@@ -427,24 +427,57 @@ void ONNXImporter::populateNet(Net dstNet)
...
@@ -427,24 +427,57 @@ void ONNXImporter::populateNet(Net dstNet)
}
}
layerParams
.
type
=
"Slice"
;
layerParams
.
type
=
"Slice"
;
}
}
else
if
(
layer_type
==
"Add"
||
layer_type
==
"Sum"
)
else
if
(
layer_type
==
"Add"
||
layer_type
==
"Sum"
||
layer_type
==
"Sub"
)
{
{
bool
isSub
=
layer_type
==
"Sub"
;
CV_CheckEQ
(
node_proto
.
input_size
(),
2
,
""
);
if
(
layer_id
.
find
(
node_proto
.
input
(
1
))
==
layer_id
.
end
())
if
(
layer_id
.
find
(
node_proto
.
input
(
1
))
==
layer_id
.
end
())
{
{
Mat
blob
=
getBlob
(
node_proto
,
constBlobs
,
1
);
Mat
blob
=
getBlob
(
node_proto
,
constBlobs
,
1
);
blob
=
blob
.
reshape
(
1
,
1
);
blob
=
blob
.
reshape
(
1
,
1
);
if
(
blob
.
total
()
==
1
)
{
if
(
blob
.
total
()
==
1
)
{
layerParams
.
type
=
"Power"
;
layerParams
.
type
=
"Power"
;
layerParams
.
set
(
"shift"
,
blob
.
at
<
float
>
(
0
));
layerParams
.
set
(
"shift"
,
(
isSub
?
-
1
:
1
)
*
blob
.
at
<
float
>
(
0
));
}
}
else
{
else
{
layerParams
.
type
=
"Scale"
;
layerParams
.
type
=
"Scale"
;
layerParams
.
set
(
"bias_term"
,
true
);
layerParams
.
set
(
"bias_term"
,
true
);
layerParams
.
blobs
.
push_back
(
blob
);
layerParams
.
blobs
.
push_back
(
(
isSub
?
-
1
:
1
)
*
blob
);
}
}
}
}
else
{
else
if
(
outShapes
[
node_proto
.
input
(
0
)]
==
outShapes
[
node_proto
.
input
(
1
)])
{
layerParams
.
type
=
"Eltwise"
;
layerParams
.
type
=
"Eltwise"
;
if
(
isSub
)
{
static
float
subCoeffs
[]
=
{
1.
f
,
-
1.
f
};
layerParams
.
set
(
"coeff"
,
DictValue
::
arrayReal
<
float
*>
(
subCoeffs
,
2
));
}
}
else
{
if
(
isSub
)
{
LayerParams
powerParams
;
powerParams
.
name
=
layerParams
.
name
+
"/neg"
;
powerParams
.
type
=
"Power"
;
powerParams
.
set
(
"scale"
,
-
1
);
//Create Power layer
int
id
=
dstNet
.
addLayer
(
powerParams
.
name
,
powerParams
.
type
,
powerParams
);
//Connect to input
layerId
=
layer_id
.
find
(
node_proto
.
input
(
1
));
CV_Assert
(
layerId
!=
layer_id
.
end
());
dstNet
.
connect
(
layerId
->
second
.
layerId
,
layerId
->
second
.
outputId
,
id
,
0
);
//Add shape
layer_id
.
insert
(
std
::
make_pair
(
powerParams
.
name
,
LayerInfo
(
id
,
0
)));
outShapes
[
powerParams
.
name
]
=
outShapes
[
node_proto
.
input
(
1
)];
//Replace input to Power
node_proto
.
set_input
(
1
,
powerParams
.
name
);
}
layerParams
.
type
=
"Scale"
;
layerParams
.
set
(
"bias_term"
,
true
);
}
}
}
}
else
if
(
layer_type
==
"Max"
)
else
if
(
layer_type
==
"Max"
)
...
@@ -452,19 +485,6 @@ void ONNXImporter::populateNet(Net dstNet)
...
@@ -452,19 +485,6 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams
.
type
=
"Eltwise"
;
layerParams
.
type
=
"Eltwise"
;
layerParams
.
set
(
"operation"
,
"max"
);
layerParams
.
set
(
"operation"
,
"max"
);
}
}
else
if
(
layer_type
==
"Sub"
)
{
Mat
blob
=
getBlob
(
node_proto
,
constBlobs
,
1
);
if
(
blob
.
total
()
==
1
)
{
layerParams
.
type
=
"Power"
;
layerParams
.
set
(
"shift"
,
-
blob
.
at
<
float
>
(
0
));
}
else
{
layerParams
.
type
=
"Scale"
;
layerParams
.
set
(
"has_bias"
,
true
);
layerParams
.
blobs
.
push_back
(
-
1.0
f
*
blob
.
reshape
(
1
,
1
));
}
}
else
if
(
layer_type
==
"Neg"
)
else
if
(
layer_type
==
"Neg"
)
{
{
layerParams
.
type
=
"Power"
;
layerParams
.
type
=
"Power"
;
...
@@ -643,10 +663,35 @@ void ONNXImporter::populateNet(Net dstNet)
...
@@ -643,10 +663,35 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams
.
type
=
"Scale"
;
layerParams
.
type
=
"Scale"
;
}
}
}
}
else
{
else
if
(
outShapes
[
node_proto
.
input
(
0
)]
==
outShapes
[
node_proto
.
input
(
1
)])
{
layerParams
.
type
=
"Eltwise"
;
layerParams
.
type
=
"Eltwise"
;
layerParams
.
set
(
"operation"
,
isDiv
?
"div"
:
"prod"
);
layerParams
.
set
(
"operation"
,
isDiv
?
"div"
:
"prod"
);
}
}
else
{
if
(
isDiv
)
{
LayerParams
powerParams
;
powerParams
.
name
=
layerParams
.
name
+
"/inv"
;
powerParams
.
type
=
"Power"
;
powerParams
.
set
(
"power"
,
-
1
);
//Create Power layer
int
id
=
dstNet
.
addLayer
(
powerParams
.
name
,
powerParams
.
type
,
powerParams
);
//Connect to input
layerId
=
layer_id
.
find
(
node_proto
.
input
(
1
));
CV_Assert
(
layerId
!=
layer_id
.
end
());
dstNet
.
connect
(
layerId
->
second
.
layerId
,
layerId
->
second
.
outputId
,
id
,
0
);
//Add shape
layer_id
.
insert
(
std
::
make_pair
(
powerParams
.
name
,
LayerInfo
(
id
,
0
)));
outShapes
[
powerParams
.
name
]
=
outShapes
[
node_proto
.
input
(
1
)];
//Replace input to Power
node_proto
.
set_input
(
1
,
powerParams
.
name
);
}
layerParams
.
type
=
"Scale"
;
}
if
(
!
haveVariables
)
if
(
!
haveVariables
)
{
{
...
...
modules/dnn/test/test_onnx_importer.cpp
View file @
619180df
...
@@ -32,29 +32,33 @@ public:
...
@@ -32,29 +32,33 @@ public:
void
testONNXModels
(
const
String
&
basename
,
const
Extension
ext
=
npy
,
void
testONNXModels
(
const
String
&
basename
,
const
Extension
ext
=
npy
,
const
double
l1
=
0
,
const
float
lInf
=
0
,
const
bool
useSoftmax
=
false
,
const
double
l1
=
0
,
const
float
lInf
=
0
,
const
bool
useSoftmax
=
false
,
bool
checkNoFallbacks
=
true
)
bool
checkNoFallbacks
=
true
,
int
numInps
=
1
)
{
{
String
onnxmodel
=
_tf
(
"models/"
+
basename
+
".onnx"
,
required
);
String
onnxmodel
=
_tf
(
"models/"
+
basename
+
".onnx"
,
required
);
Mat
inp
,
ref
;
std
::
vector
<
Mat
>
inps
(
numInps
);
Mat
ref
;
if
(
ext
==
npy
)
{
if
(
ext
==
npy
)
{
inp
=
blobFromNPY
(
_tf
(
"data/input_"
+
basename
+
".npy"
));
for
(
int
i
=
0
;
i
<
numInps
;
++
i
)
inps
[
i
]
=
blobFromNPY
(
_tf
(
"data/input_"
+
basename
+
(
numInps
>
1
?
format
(
"_%d"
,
i
)
:
""
)
+
".npy"
));
ref
=
blobFromNPY
(
_tf
(
"data/output_"
+
basename
+
".npy"
));
ref
=
blobFromNPY
(
_tf
(
"data/output_"
+
basename
+
".npy"
));
}
}
else
if
(
ext
==
pb
)
{
else
if
(
ext
==
pb
)
{
inp
=
readTensorFromONNX
(
_tf
(
"data/input_"
+
basename
+
".pb"
));
for
(
int
i
=
0
;
i
<
numInps
;
++
i
)
inps
[
i
]
=
readTensorFromONNX
(
_tf
(
"data/input_"
+
basename
+
(
numInps
>
1
?
format
(
"_%d"
,
i
)
:
""
)
+
".pb"
));
ref
=
readTensorFromONNX
(
_tf
(
"data/output_"
+
basename
+
".pb"
));
ref
=
readTensorFromONNX
(
_tf
(
"data/output_"
+
basename
+
".pb"
));
}
}
else
else
CV_Error
(
Error
::
StsUnsupportedFormat
,
"Unsupported extension"
);
CV_Error
(
Error
::
StsUnsupportedFormat
,
"Unsupported extension"
);
checkBackend
(
&
inp
,
&
ref
);
checkBackend
(
&
inp
s
[
0
]
,
&
ref
);
Net
net
=
readNetFromONNX
(
onnxmodel
);
Net
net
=
readNetFromONNX
(
onnxmodel
);
ASSERT_FALSE
(
net
.
empty
());
ASSERT_FALSE
(
net
.
empty
());
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
net
.
setPreferableTarget
(
target
);
net
.
setInput
(
inp
);
for
(
int
i
=
0
;
i
<
numInps
;
++
i
)
net
.
setInput
(
inps
[
i
],
numInps
>
1
?
format
(
"%d"
,
i
)
:
""
);
Mat
out
=
net
.
forward
(
""
);
Mat
out
=
net
.
forward
(
""
);
if
(
useSoftmax
)
if
(
useSoftmax
)
...
@@ -352,25 +356,14 @@ TEST_P(Test_ONNX_layers, ResizeUnfused)
...
@@ -352,25 +356,14 @@ TEST_P(Test_ONNX_layers, ResizeUnfused)
TEST_P
(
Test_ONNX_layers
,
MultyInputs
)
TEST_P
(
Test_ONNX_layers
,
MultyInputs
)
{
{
const
String
model
=
_tf
(
"models/multy_inputs.onnx"
);
testONNXModels
(
"multy_inputs"
,
npy
,
0
,
0
,
false
,
true
,
2
);
}
Net
net
=
readNetFromONNX
(
model
);
ASSERT_FALSE
(
net
.
empty
());
net
.
setPreferableBackend
(
backend
);
net
.
setPreferableTarget
(
target
);
Mat
inp1
=
blobFromNPY
(
_tf
(
"data/input_multy_inputs_0.npy"
));
Mat
inp2
=
blobFromNPY
(
_tf
(
"data/input_multy_inputs_1.npy"
));
Mat
ref
=
blobFromNPY
(
_tf
(
"data/output_multy_inputs.npy"
));
checkBackend
(
&
inp1
,
&
ref
);
net
.
setInput
(
inp1
,
"0"
);
net
.
setInput
(
inp2
,
"1"
);
Mat
out
=
net
.
forward
();
normAssert
(
ref
,
out
,
""
,
default_l1
,
default_lInf
);
TEST_P
(
Test_ONNX_layers
,
Broadcast
)
expectNoFallbacksFromIE
(
net
);
{
if
(
backend
==
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
)
applyTestTag
(
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
);
testONNXModels
(
"channel_broadcast"
,
npy
,
0
,
0
,
false
,
true
,
2
);
}
}
TEST_P
(
Test_ONNX_layers
,
Div
)
TEST_P
(
Test_ONNX_layers
,
Div
)
...
...
modules/imgcodecs/src/grfmt_jpeg.cpp
View file @
619180df
...
@@ -75,6 +75,17 @@ extern "C" {
...
@@ -75,6 +75,17 @@ extern "C" {
#include "jpeglib.h"
#include "jpeglib.h"
}
}
#ifndef CV_MANUAL_JPEG_STD_HUFF_TABLES
#if defined(LIBJPEG_TURBO_VERSION_NUMBER) && LIBJPEG_TURBO_VERSION_NUMBER >= 1003090
#define CV_MANUAL_JPEG_STD_HUFF_TABLES 0 // libjpeg-turbo handles standard huffman tables itself (jstdhuff.c)
#else
#define CV_MANUAL_JPEG_STD_HUFF_TABLES 1
#endif
#endif
#if CV_MANUAL_JPEG_STD_HUFF_TABLES == 0
#undef CV_MANUAL_JPEG_STD_HUFF_TABLES
#endif
namespace
cv
namespace
cv
{
{
...
@@ -252,6 +263,7 @@ bool JpegDecoder::readHeader()
...
@@ -252,6 +263,7 @@ bool JpegDecoder::readHeader()
return
result
;
return
result
;
}
}
#ifdef CV_MANUAL_JPEG_STD_HUFF_TABLES
/***************************************************************************
/***************************************************************************
* following code is for supporting MJPEG image files
* following code is for supporting MJPEG image files
* based on a message of Laurent Pinchart on the video4linux mailing list
* based on a message of Laurent Pinchart on the video4linux mailing list
...
@@ -385,6 +397,7 @@ int my_jpeg_load_dht (struct jpeg_decompress_struct *info, unsigned char *dht,
...
@@ -385,6 +397,7 @@ int my_jpeg_load_dht (struct jpeg_decompress_struct *info, unsigned char *dht,
* end of code for supportting MJPEG image files
* end of code for supportting MJPEG image files
* based on a message of Laurent Pinchart on the video4linux mailing list
* based on a message of Laurent Pinchart on the video4linux mailing list
***************************************************************************/
***************************************************************************/
#endif // CV_MANUAL_JPEG_STD_HUFF_TABLES
bool
JpegDecoder
::
readData
(
Mat
&
img
)
bool
JpegDecoder
::
readData
(
Mat
&
img
)
{
{
...
@@ -400,6 +413,7 @@ bool JpegDecoder::readData( Mat& img )
...
@@ -400,6 +413,7 @@ bool JpegDecoder::readData( Mat& img )
if
(
setjmp
(
jerr
->
setjmp_buffer
)
==
0
)
if
(
setjmp
(
jerr
->
setjmp_buffer
)
==
0
)
{
{
#ifdef CV_MANUAL_JPEG_STD_HUFF_TABLES
/* check if this is a mjpeg image format */
/* check if this is a mjpeg image format */
if
(
cinfo
->
ac_huff_tbl_ptrs
[
0
]
==
NULL
&&
if
(
cinfo
->
ac_huff_tbl_ptrs
[
0
]
==
NULL
&&
cinfo
->
ac_huff_tbl_ptrs
[
1
]
==
NULL
&&
cinfo
->
ac_huff_tbl_ptrs
[
1
]
==
NULL
&&
...
@@ -413,6 +427,7 @@ bool JpegDecoder::readData( Mat& img )
...
@@ -413,6 +427,7 @@ bool JpegDecoder::readData( Mat& img )
cinfo
->
ac_huff_tbl_ptrs
,
cinfo
->
ac_huff_tbl_ptrs
,
cinfo
->
dc_huff_tbl_ptrs
);
cinfo
->
dc_huff_tbl_ptrs
);
}
}
#endif
if
(
color
)
if
(
color
)
{
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment