Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv_contrib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv_contrib
Commits
4b1834ac
Commit
4b1834ac
authored
May 23, 2017
by
Vadim Pisarevsky
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #1156 from arrybn:layers_shapes
parents
3e9b1f66
9b73fee2
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
37 changed files
with
672 additions
and
663 deletions
+672
-663
all_layers.hpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
+2
-16
dnn.hpp
modules/dnn/include/opencv2/dnn/dnn.hpp
+59
-11
shape_utils.hpp
modules/dnn/include/opencv2/dnn/shape_utils.hpp
+68
-20
pyopencv_dnn.hpp
modules/dnn/misc/python/pyopencv_dnn.hpp
+2
-0
perf_convolution.cpp
modules/dnn/perf/perf_convolution.cpp
+21
-7
caffe_importer.cpp
modules/dnn/src/caffe/caffe_importer.cpp
+2
-2
dnn.cpp
modules/dnn/src/dnn.cpp
+0
-0
batch_norm_layer.cpp
modules/dnn/src/layers/batch_norm_layer.cpp
+5
-18
blank_layer.cpp
modules/dnn/src/layers/blank_layer.cpp
+1
-1
concat_layer.cpp
modules/dnn/src/layers/concat_layer.cpp
+20
-19
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+0
-0
crop_layer.cpp
modules/dnn/src/layers/crop_layer.cpp
+24
-9
detection_output_layer.cpp
modules/dnn/src/layers/detection_output_layer.cpp
+21
-20
elementwise_layers.cpp
modules/dnn/src/layers/elementwise_layers.cpp
+14
-18
eltwise_layer.cpp
modules/dnn/src/layers/eltwise_layer.cpp
+12
-8
flatten_layer.cpp
modules/dnn/src/layers/flatten_layer.cpp
+33
-28
fully_connected_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
+21
-28
layers_common.cpp
modules/dnn/src/layers/layers_common.cpp
+25
-13
layers_common.hpp
modules/dnn/src/layers/layers_common.hpp
+7
-3
lrn_layer.cpp
modules/dnn/src/layers/lrn_layer.cpp
+19
-29
max_unpooling_layer.cpp
modules/dnn/src/layers/max_unpooling_layer.cpp
+11
-8
mvn_layer.cpp
modules/dnn/src/layers/mvn_layer.cpp
+1
-12
normalize_bbox_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
+51
-63
padding_layer.cpp
modules/dnn/src/layers/padding_layer.cpp
+15
-14
permute_layer.cpp
modules/dnn/src/layers/permute_layer.cpp
+35
-24
pooling_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
+37
-34
prior_box_layer.cpp
modules/dnn/src/layers/prior_box_layer.cpp
+23
-27
recurrent_layers.cpp
modules/dnn/src/layers/recurrent_layers.cpp
+0
-0
reshape_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
+35
-25
scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+1
-11
shift_layer.cpp
modules/dnn/src/layers/shift_layer.cpp
+11
-35
slice_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
+31
-28
softmax_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
+17
-21
split_layer.cpp
modules/dnn/src/layers/split_layer.cpp
+8
-7
tf_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
+4
-4
npy_blob.hpp
modules/dnn/test/npy_blob.hpp
+0
-69
test_layers.cpp
modules/dnn/test/test_layers.cpp
+36
-31
No files found.
modules/dnn/include/opencv2/dnn/all_layers.hpp
View file @
4b1834ac
...
...
@@ -121,21 +121,7 @@ namespace dnn
* @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used,
* where `Wh` is parameter from setWeights().
*/
virtual
void
setOutShape
(
const
std
::
vector
<
int
>
&
outTailShape
=
std
::
vector
<
int
>
())
=
0
;
/** @brief Set @f$ h_{t-1} @f$ value that will be used in next forward() calls.
* @details By-default @f$ h_{t-1} @f$ is inited by zeros and updated after each forward() call.
*/
virtual
void
setH
(
const
Mat
&
H
)
=
0
;
/** @brief Returns current @f$ h_{t-1} @f$ value (deep copy). */
virtual
Mat
getH
()
const
=
0
;
/** @brief Set @f$ c_{t-1} @f$ value that will be used in next forward() calls.
* @details By-default @f$ c_{t-1} @f$ is inited by zeros and updated after each forward() call.
*/
virtual
void
setC
(
const
Mat
&
C
)
=
0
;
/** @brief Returns current @f$ c_{t-1} @f$ value (deep copy). */
virtual
Mat
getC
()
const
=
0
;
virtual
void
setOutShape
(
const
MatShape
&
outTailShape
=
MatShape
())
=
0
;
/** @brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample.
*
...
...
@@ -289,7 +275,7 @@ namespace dnn
class
CV_EXPORTS
ReshapeLayer
:
public
Layer
{
public
:
std
::
vector
<
int
>
newShapeDesc
;
MatShape
newShapeDesc
;
Range
newShapeRange
;
static
Ptr
<
ReshapeLayer
>
create
(
const
LayerParams
&
params
);
...
...
modules/dnn/include/opencv2/dnn/dnn.hpp
View file @
4b1834ac
...
...
@@ -53,6 +53,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
//! @addtogroup dnn
//! @{
typedef
std
::
vector
<
int
>
MatShape
;
/** @brief Initialize dnn module and built-in layers.
*
* This function automatically called on most of OpenCV builds,
...
...
@@ -87,33 +89,35 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
//! List of learned parameters must be stored here to allow read them by using Net::getParam().
CV_PROP_RW
std
::
vector
<
Mat
>
blobs
;
/** @brief
Allocates internal buffers and output blobs with respect to the shape of input
s.
/** @brief
Computes and sets internal parameters according to inputs, outputs and blob
s.
* @param[in] input vector of already allocated input blobs
* @param[out] output vector of
output blobs, which must be allocated
* @param[out] output vector of
already allocated output blobs
*
* This method must create each produced blob according to shape of @p input blobs and internal layer params.
* If this method is called first time then @p output vector consists from empty blobs and its size determined by number of output connections.
* This method can be called multiple times if size of any @p input blob was changed.
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
virtual
void
allocate
(
const
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
)
=
0
;
virtual
void
finalize
(
const
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
)
;
/** @brief Given the @p input blobs, computes the output @p blobs.
* @param[in] input the input blobs.
* @param[out] output allocated output blobs, which will store results of the computation.
* @param[out] internals allocated internal blobs
*/
virtual
void
forward
(
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
)
=
0
;
virtual
void
forward
(
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
,
std
::
vector
<
Mat
>
&
internals
)
=
0
;
/** @brief @overload */
CV_WRAP
void
allocat
e
(
const
std
::
vector
<
Mat
>
&
inputs
,
CV_OUT
std
::
vector
<
Mat
>
&
outputs
);
CV_WRAP
void
finaliz
e
(
const
std
::
vector
<
Mat
>
&
inputs
,
CV_OUT
std
::
vector
<
Mat
>
&
outputs
);
/** @brief @overload */
CV_WRAP
std
::
vector
<
Mat
>
allocat
e
(
const
std
::
vector
<
Mat
>
&
inputs
);
CV_WRAP
std
::
vector
<
Mat
>
finaliz
e
(
const
std
::
vector
<
Mat
>
&
inputs
);
/** @brief @overload */
CV_WRAP
void
forward
(
const
std
::
vector
<
Mat
>
&
inputs
,
CV_IN_OUT
std
::
vector
<
Mat
>
&
outputs
);
CV_WRAP
void
forward
(
const
std
::
vector
<
Mat
>
&
inputs
,
CV_IN_OUT
std
::
vector
<
Mat
>
&
outputs
,
CV_IN_OUT
std
::
vector
<
Mat
>
&
internals
);
/** @brief Allocates layer and computes output. */
CV_WRAP
void
run
(
const
std
::
vector
<
Mat
>
&
inputs
,
CV_OUT
std
::
vector
<
Mat
>
&
outputs
);
CV_WRAP
void
run
(
const
std
::
vector
<
Mat
>
&
inputs
,
CV_OUT
std
::
vector
<
Mat
>
&
outputs
,
CV_IN_OUT
std
::
vector
<
Mat
>
&
internals
);
/** @brief Returns index of input blob into the input array.
* @param inputName label of input blob
...
...
@@ -127,6 +131,11 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/
virtual
int
outputNameToIndex
(
String
outputName
);
virtual
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
;
CV_PROP
String
name
;
//!< Name of the layer instance, can be used for logging or other internal purposes.
CV_PROP
String
type
;
//!< Type name which was used for creating layer by layer factory.
...
...
@@ -275,6 +284,45 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
/** @brief Returns indexes of layers with unconnected outputs.
*/
CV_WRAP
std
::
vector
<
int
>
getUnconnectedOutLayers
()
const
;
/** @brief Returns input and output shapes for all layers in loaded model;
* preliminary inferencing isn't necessary.
* @param netInputShapes shapes for all input blobs in net input layer.
* @param layersIds output parameter for layer IDs.
* @param inLayersShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayersShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP
void
getLayersShapes
(
const
std
::
vector
<
MatShape
>&
netInputShapes
,
std
::
vector
<
int
>*
layersIds
,
std
::
vector
<
std
::
vector
<
MatShape
>
>*
inLayersShapes
,
std
::
vector
<
std
::
vector
<
MatShape
>
>*
outLayersShapes
)
const
;
/** @overload */
CV_WRAP
void
getLayersShapes
(
const
MatShape
&
netInputShape
,
std
::
vector
<
int
>*
layersIds
,
std
::
vector
<
std
::
vector
<
MatShape
>
>*
inLayersShapes
,
std
::
vector
<
std
::
vector
<
MatShape
>
>*
outLayersShapes
)
const
;
/** @brief Returns input and output shapes for layer with specified
* id in loaded model; preliminary inferencing isn't necessary.
* @param netInputShape shape input blob in net input layer.
* @param layerId id for layer.
* @param inLayerShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayerShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP
void
getLayerShapes
(
const
MatShape
&
netInputShape
,
const
int
layerId
,
std
::
vector
<
MatShape
>*
inLayerShapes
,
std
::
vector
<
MatShape
>*
outLayerShapes
)
const
;
/** @overload */
CV_WRAP
void
getLayerShapes
(
const
std
::
vector
<
MatShape
>&
netInputShapes
,
const
int
layerId
,
std
::
vector
<
MatShape
>*
inLayerShapes
,
std
::
vector
<
MatShape
>*
outLayerShapes
)
const
;
private
:
struct
Impl
;
...
...
modules/dnn/include/opencv2/dnn/shape_utils.hpp
View file @
4b1834ac
...
...
@@ -55,22 +55,6 @@ inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
return
s
<<
"["
<<
r
.
start
<<
", "
<<
r
.
end
<<
")"
;
}
//Reshaping
//TODO: add -1 specifier for automatic size inferring
/*template<typename Mat>
void reshape(Mat &m, const BlobShape &shape)
{
m = m.reshape(1, shape.dims(), shape.ptr());
}
template<typename Mat>
Mat reshaped(const Mat &m, const BlobShape &shape)
{
return m.reshape(1, shape.dims(), shape.ptr());
}*/
//Slicing
struct
_Range
:
public
cv
::
Range
...
...
@@ -139,12 +123,76 @@ static inline Mat getPlane(const Mat &m, int n, int cn)
return
m
(
range
).
reshape
(
1
,
m
.
dims
-
2
,
sz
);
}
static
inline
size_t
shapeTotal
(
const
std
::
vector
<
int
>&
shape
)
static
inline
MatShape
shape
(
const
int
*
dims
,
const
int
n
=
4
)
{
MatShape
shape
;
shape
.
assign
(
dims
,
dims
+
n
);
return
shape
;
}
static
inline
MatShape
shape
(
const
MatSize
&
size
)
{
return
shape
((
const
int
*
)
size
,
size
.
dims
());
}
static
inline
MatShape
shape
(
const
Mat
&
mat
)
{
return
shape
(
mat
.
size
);
}
namespace
{
inline
bool
is_neg
(
int
i
)
{
return
i
<
0
;
}}
static
inline
MatShape
shape
(
int
a0
,
int
a1
=-
1
,
int
a2
=-
1
,
int
a3
=-
1
)
{
int
dims
[]
=
{
a0
,
a1
,
a2
,
a3
};
MatShape
s
=
shape
(
dims
);
s
.
erase
(
std
::
remove_if
(
s
.
begin
(),
s
.
end
(),
is_neg
),
s
.
end
());
return
s
;
}
static
inline
int
total
(
const
MatShape
&
shape
,
int
start
=
-
1
,
int
end
=
-
1
)
{
if
(
start
==
-
1
)
start
=
0
;
if
(
end
==
-
1
)
end
=
shape
.
size
();
if
(
shape
.
empty
())
return
0
;
int
elems
=
1
;
CV_Assert
(
start
<
shape
.
size
()
&&
end
<=
shape
.
size
()
&&
start
<=
end
);
for
(
int
i
=
start
;
i
<
end
;
i
++
)
{
elems
*=
shape
[
i
];
}
return
elems
;
}
static
inline
MatShape
concat
(
const
MatShape
&
a
,
const
MatShape
&
b
)
{
size_t
i
,
n
=
shape
.
size
(),
p
=
1
;
for
(
i
=
0
;
i
<
n
;
i
++
)
p
*=
shape
[
i
]
;
MatShape
c
=
a
;
c
.
insert
(
c
.
end
(),
b
.
begin
(),
b
.
end
())
;
return
p
;
return
c
;
}
inline
void
print
(
const
MatShape
&
shape
,
const
String
&
name
=
""
)
{
printf
(
"%s: ["
,
name
.
c_str
());
size_t
i
,
n
=
shape
.
size
();
for
(
i
=
0
;
i
<
n
;
i
++
)
printf
(
" %d"
,
shape
[
i
]);
printf
(
" ]
\n
"
);
}
inline
int
clamp
(
int
ax
,
int
dims
)
{
return
ax
<
0
?
ax
+
dims
:
ax
;
}
inline
int
clamp
(
int
ax
,
const
MatShape
&
shape
)
{
return
clamp
(
ax
,
shape
.
size
());
}
}
...
...
modules/dnn/misc/python/pyopencv_dnn.hpp
View file @
4b1834ac
#ifdef HAVE_OPENCV_DNN
typedef
dnn
::
DictValue
LayerId
;
typedef
std
::
vector
<
dnn
::
MatShape
>
vector_MatShape
;
typedef
std
::
vector
<
std
::
vector
<
dnn
::
MatShape
>
>
vector_vector_MatShape
;
template
<>
bool
pyopencv_to
(
PyObject
*
o
,
dnn
::
DictValue
&
dv
,
const
char
*
name
)
...
...
modules/dnn/perf/perf_convolution.cpp
View file @
4b1834ac
#include "perf_precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace
cvtest
{
...
...
@@ -21,14 +22,14 @@ CV_ENUM(GroupSize, GROUP_OFF, GROUP_2);
//Squared Size
#define SSZ(n) cv::Size(n, n)
typedef
std
::
pair
<
std
::
vector
<
int
>
,
int
>
InpShapeNumOut
;
typedef
std
::
pair
<
MatShape
,
int
>
InpShapeNumOut
;
typedef
tuple
<
Size
,
InpShapeNumOut
,
GroupSize
,
StrideSize
>
ConvParam
;
//kernel_size, inp shape, groups, stride
typedef
TestBaseWithParam
<
ConvParam
>
ConvolutionPerfTest
;
static
inline
std
::
vector
<
int
>
blobShape
(
int
count
,
int
nplanes
,
int
height
,
int
width
)
static
inline
MatShape
blobShape
(
int
count
,
int
nplanes
,
int
height
,
int
width
)
{
int
data
[]
=
{
count
,
nplanes
,
height
,
width
};
return
std
::
vector
<
int
>
(
data
,
data
+
4
);
return
MatShape
(
data
,
data
+
4
);
}
PERF_TEST_P
(
ConvolutionPerfTest
,
perf
,
Combine
(
...
...
@@ -44,7 +45,7 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
ConvParam
params
=
GetParam
();
int
ksz
=
get
<
0
>
(
params
).
width
;
std
::
vector
<
int
>
inpShape
=
get
<
1
>
(
params
).
first
;
MatShape
inpShape
=
get
<
1
>
(
params
).
first
;
int
outCn
=
get
<
1
>
(
params
).
second
;
int
groups
=
get
<
2
>
(
params
);
int
stride
=
(
ksz
>=
11
)
?
4
:
(
int
)
get
<
3
>
(
params
);
...
...
@@ -69,12 +70,25 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
lp
.
blobs
.
push_back
(
biasBlob
);
std
::
vector
<
Mat
*>
inpBlobs
(
1
,
&
inpBlob
);
std
::
vector
<
Mat
>
outBlobs
;
std
::
vector
<
Mat
>
outBlobs
,
internalBlobs
;
cv
::
setNumThreads
(
cv
::
getNumberOfCPUs
());
Ptr
<
Layer
>
layer
=
cv
::
dnn
::
LayerFactory
::
createLayerInstance
(
"Convolution"
,
lp
);
layer
->
allocate
(
inpBlobs
,
outBlobs
);
std
::
vector
<
MatShape
>
inputShapes
(
1
,
shape
(
inpBlob
)),
outShapes
,
internals
;
layer
->
getMemoryShapes
(
inputShapes
,
0
,
outShapes
,
internals
);
for
(
int
i
=
0
;
i
<
outShapes
.
size
();
i
++
)
{
outBlobs
.
push_back
(
Mat
(
outShapes
[
i
],
CV_32F
));
}
for
(
int
i
=
0
;
i
<
internals
.
size
();
i
++
)
{
internalBlobs
.
push_back
(
Mat
());
if
(
total
(
internals
[
i
]))
internalBlobs
.
back
().
create
(
internals
[
i
],
CV_32F
);
}
layer
->
finalize
(
inpBlobs
,
outBlobs
);
Mat
inpBlob2D
=
inpBlob
.
reshape
(
1
,
outCn
);
Mat
wgtBlob2D
=
wgtBlob
.
reshape
(
1
,
outCn
*
(
inpCn
/
groups
));
...
...
@@ -83,7 +97,7 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
TEST_CYCLE_N
(
10
)
{
layer
->
forward
(
inpBlobs
,
outBlobs
);
layer
->
forward
(
inpBlobs
,
outBlobs
,
internalBlobs
);
}
SANITY_CHECK_NOTHING
();
...
...
modules/dnn/src/caffe/caffe_importer.cpp
View file @
4b1834ac
...
...
@@ -192,7 +192,7 @@ public:
}
}
void
blobShapeFromProto
(
const
caffe
::
BlobProto
&
pbBlob
,
std
::
vector
<
int
>
&
shape
)
void
blobShapeFromProto
(
const
caffe
::
BlobProto
&
pbBlob
,
MatShape
&
shape
)
{
shape
.
clear
();
if
(
pbBlob
.
has_num
()
||
pbBlob
.
has_channels
()
||
pbBlob
.
has_height
()
||
pbBlob
.
has_width
())
...
...
@@ -215,7 +215,7 @@ public:
void
blobFromProto
(
const
caffe
::
BlobProto
&
pbBlob
,
cv
::
Mat
&
dstBlob
)
{
std
::
vector
<
int
>
shape
;
MatShape
shape
;
blobShapeFromProto
(
pbBlob
,
shape
);
dstBlob
.
create
((
int
)
shape
.
size
(),
&
shape
[
0
],
CV_32F
);
...
...
modules/dnn/src/dnn.cpp
View file @
4b1834ac
This diff is collapsed.
Click to expand it.
modules/dnn/src/layers/batch_norm_layer.cpp
View file @
4b1834ac
...
...
@@ -29,32 +29,20 @@ public:
epsilon
=
params
.
get
<
float
>
(
"eps"
,
1E-5
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
output
s
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internal
s
)
{
CV_Assert
(
blobs
.
size
()
>=
2
);
CV_Assert
(
inputs
.
size
()
==
1
);
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
blobs
[
0
].
total
()
==
inputs
[
i
]
->
size
[
1
]);
CV_Assert
(
blobs
[
1
].
total
()
==
inputs
[
i
]
->
size
[
1
]);
Mat
*
inp
=
inputs
[
i
];
outputs
[
i
].
create
(
inp
->
dims
,
&
inp
->
size
.
p
[
0
],
inp
->
type
());
}
varMeanScale
=
1.
f
;
float
varMeanScale
=
1.
f
;
if
(
!
hasWeights
&&
!
hasBias
)
{
varMeanScale
=
*
blobs
[
2
].
ptr
<
float
>
();
if
(
varMeanScale
!=
0
)
varMeanScale
=
1
/
varMeanScale
;
}
Mat
invStdMat
;
cv
::
pow
(
blobs
[
1
]
*
varMeanScale
+
epsilon
,
-
0.5
,
invStdMat
);
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
CV_Assert
(
inputs
.
size
()
==
1
);
Mat
&
inpBlob
=
*
inputs
[
0
];
...
...
@@ -91,8 +79,7 @@ public:
}
bool
hasWeights
,
hasBias
;
float
epsilon
,
varMeanScale
;
Mat
invStdMat
;
float
epsilon
;
};
Ptr
<
BatchNormLayer
>
BatchNormLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/blank_layer.cpp
View file @
4b1834ac
...
...
@@ -56,7 +56,7 @@ public:
outputs
[
i
]
=
*
inputs
[
i
];
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
outputs
[
i
]
=
*
inputs
[
i
];
...
...
modules/dnn/src/layers/concat_layer.cpp
View file @
4b1834ac
...
...
@@ -56,49 +56,50 @@ public:
axis
=
params
.
get
<
int
>
(
"axis"
,
1
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
virtual
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
>
0
);
int
dims
=
inputs
[
0
]
->
dims
,
dtype
=
inputs
[
0
]
->
type
();
std
::
vector
<
int
>
refShape
(
inputs
[
0
]
->
size
.
p
,
inputs
[
0
]
->
size
.
p
+
dims
);
axisIdx
=
axis
<
0
?
axis
+
dims
:
axis
;
outputs
.
clear
();
outputs
.
push_back
(
inputs
[
0
]);
int
cAxis
=
clamp
(
axis
,
inputs
[
0
]);
int
axisSum
=
0
;
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
type
()
==
dtype
);
for
(
int
curAxis
=
0
;
curAxis
<
dims
;
curAxis
++
)
MatShape
curShape
=
inputs
[
i
];
CV_Assert
(
curShape
.
size
()
==
outputs
.
back
().
size
());
for
(
int
curAxis
=
0
;
curAxis
<
outputs
.
back
().
size
();
curAxis
++
)
{
if
(
curAxis
!=
axisIdx
&&
inputs
[
0
]
->
size
[
curAxis
]
!=
inputs
[
i
]
->
siz
e
[
curAxis
])
if
(
curAxis
!=
cAxis
&&
outputs
.
back
()[
curAxis
]
!=
curShap
e
[
curAxis
])
CV_Error
(
Error
::
StsBadSize
,
"Inconsitent shape for ConcatLayer"
);
}
axisSum
+=
inputs
[
i
]
->
size
[
axisIdx
];
axisSum
+=
curShape
[
cAxis
];
}
refShape
[
axisIdx
]
=
axisSum
;
outputs
.
back
()[
cAxis
]
=
axisSum
;
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
dims
,
&
refShape
[
0
],
dtype
);
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
int
cAxis
=
clamp
(
axis
,
inputs
[
0
]
->
dims
);
Mat
&
outMat
=
outputs
[
0
];
std
::
vector
<
Range
>
ranges
(
outputs
[
0
].
dims
,
Range
::
all
());
ranges
[
axisIdx
].
start
=
0
;
ranges
[
cAxis
].
start
=
0
;
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
ranges
[
axisIdx
].
end
=
ranges
[
axisIdx
].
start
+
inputs
[
i
]
->
size
[
axisIdx
];
ranges
[
cAxis
].
end
=
ranges
[
cAxis
].
start
+
inputs
[
i
]
->
size
[
cAxis
];
inputs
[
i
]
->
copyTo
(
outMat
(
&
ranges
[
0
]));
ranges
[
axisIdx
].
start
=
ranges
[
axisIdx
].
end
;
ranges
[
cAxis
].
start
=
ranges
[
cAxis
].
end
;
}
}
int
axisIdx
;
};
Ptr
<
ConcatLayer
>
ConcatLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
4b1834ac
This diff is collapsed.
Click to expand it.
modules/dnn/src/layers/crop_layer.cpp
View file @
4b1834ac
...
...
@@ -63,7 +63,26 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
==
2
);
MatShape
dstShape
=
inputs
[
0
];
int
start
=
clamp
(
startAxis
,
dstShape
);
for
(
int
i
=
start
;
i
<
dstShape
.
size
();
i
++
)
{
dstShape
[
i
]
=
inputs
[
1
][
i
];
}
outputs
.
resize
(
1
,
dstShape
);
return
false
;
}
void
finalize
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
CV_Assert
(
2
==
inputs
.
size
());
...
...
@@ -71,7 +90,7 @@ public:
const
Mat
&
inpSzBlob
=
*
inputs
[
1
];
int
dims
=
inpBlob
.
dims
;
int
start_axis
=
startAxis
<
0
?
startAxis
+
dims
:
startAxis
;
int
start_axis
=
clamp
(
startAxis
,
dims
)
;
std
::
vector
<
int
>
offset_final
(
dims
,
0
);
if
(
offset
.
size
()
==
1
)
...
...
@@ -82,17 +101,16 @@ public:
else
if
(
offset
.
size
()
>
1
)
{
if
((
int
)
offset
.
size
()
!=
dims
-
start_axis
)
CV_Error
(
Error
::
StsBadArg
,
"number of offset values specified must be equal to the number of dimensions following axis."
);
CV_Error
(
Error
::
StsBadArg
,
"number of offset values specified must be "
"equal to the number of dimensions following axis."
);
for
(
int
i
=
start_axis
;
i
<
dims
;
i
++
)
offset_final
[
i
]
=
offset
[
i
-
start_axis
];
}
std
::
vector
<
int
>
dstShape
(
dims
);
crop_ranges
.
resize
(
dims
,
Range
::
all
());
for
(
int
i
=
0
;
i
<
dims
;
i
++
)
{
dstShape
[
i
]
=
inpSzBlob
.
size
[
i
];
if
(
i
<
start_axis
)
continue
;
...
...
@@ -112,12 +130,9 @@ public:
crop_ranges
[
i
]
=
Range
(
cur_crop
,
cur_crop
+
inpSzBlob
.
size
[
i
]);
}
}
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
dims
,
&
dstShape
[
0
],
inpBlob
.
type
());
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
Mat
&
input
=
*
inputs
[
0
];
Mat
&
output
=
outputs
[
0
];
...
...
modules/dnn/src/layers/detection_output_layer.cpp
View file @
4b1834ac
...
...
@@ -94,9 +94,6 @@ public:
int
_keepTopK
;
float
_confidenceThreshold
;
int
_num
;
int
_numPriors
;
float
_nmsThreshold
;
int
_topK
;
...
...
@@ -184,58 +181,62 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
>
0
);
CV_Assert
(
inputs
[
0
]
->
size
[
0
]
==
inputs
[
1
]
->
size
[
0
]);
_num
=
inputs
[
0
]
->
size
[
0
];
CV_Assert
(
inputs
[
0
][
0
]
==
inputs
[
1
][
0
]);
_numPriors
=
inputs
[
2
]
->
size
[
2
]
/
4
;
CV_Assert
((
_numPriors
*
_numLocClasses
*
4
)
==
inputs
[
0
]
->
size
[
1
]);
CV_Assert
(
int
(
_numPriors
*
_numClasses
)
==
inputs
[
1
]
->
size
[
1
]);
int
numPriors
=
inputs
[
2
]
[
2
]
/
4
;
CV_Assert
((
numPriors
*
_numLocClasses
*
4
)
==
inputs
[
0
]
[
1
]);
CV_Assert
(
int
(
numPriors
*
_numClasses
)
==
inputs
[
1
]
[
1
]);
// num() and channels() are 1.
// Since the number of bboxes to be kept is unknown before nms, we manually
// set it to (fake) 1.
// Each row is a 7 dimension std::vector, which stores
// [image_id, label, confidence, xmin, ymin, xmax, ymax]
int
outputShape
[]
=
{
1
,
1
,
1
,
7
};
outputs
[
0
].
create
(
4
,
outputShape
,
CV_32F
);
outputs
.
resize
(
1
,
shape
(
1
,
1
,
1
,
7
));
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
const
float
*
locationData
=
inputs
[
0
]
->
ptr
<
float
>
();
const
float
*
confidenceData
=
inputs
[
1
]
->
ptr
<
float
>
();
const
float
*
priorData
=
inputs
[
2
]
->
ptr
<
float
>
();
int
num
=
inputs
[
0
]
->
size
[
0
];
int
numPriors
=
inputs
[
2
]
->
size
[
2
]
/
4
;
// Retrieve all location predictions.
std
::
vector
<
LabelBBox
>
allLocationPredictions
;
GetLocPredictions
(
locationData
,
_num
,
_
numPriors
,
_numLocClasses
,
GetLocPredictions
(
locationData
,
num
,
numPriors
,
_numLocClasses
,
_shareLocation
,
&
allLocationPredictions
);
// Retrieve all confidences.
std
::
vector
<
std
::
map
<
int
,
std
::
vector
<
float
>
>
>
allConfidenceScores
;
GetConfidenceScores
(
confidenceData
,
_num
,
_
numPriors
,
_numClasses
,
GetConfidenceScores
(
confidenceData
,
num
,
numPriors
,
_numClasses
,
&
allConfidenceScores
);
// Retrieve all prior bboxes. It is same within a batch since we assume all
// images in a batch are of same dimension.
std
::
vector
<
caffe
::
NormalizedBBox
>
priorBBoxes
;
std
::
vector
<
std
::
vector
<
float
>
>
priorVariances
;
GetPriorBBoxes
(
priorData
,
_
numPriors
,
&
priorBBoxes
,
&
priorVariances
);
GetPriorBBoxes
(
priorData
,
numPriors
,
&
priorBBoxes
,
&
priorVariances
);
// Decode all loc predictions to bboxes.
std
::
vector
<
LabelBBox
>
allDecodedBBoxes
;
DecodeBBoxesAll
(
allLocationPredictions
,
priorBBoxes
,
priorVariances
,
_
num
,
DecodeBBoxesAll
(
allLocationPredictions
,
priorBBoxes
,
priorVariances
,
num
,
_shareLocation
,
_numLocClasses
,
_backgroundLabelId
,
_codeType
,
_varianceEncodedInTarget
,
&
allDecodedBBoxes
);
int
numKept
=
0
;
std
::
vector
<
std
::
map
<
int
,
std
::
vector
<
int
>
>
>
allIndices
;
for
(
int
i
=
0
;
i
<
_
num
;
++
i
)
for
(
int
i
=
0
;
i
<
num
;
++
i
)
{
const
LabelBBox
&
decodeBBoxes
=
allDecodedBBoxes
[
i
];
const
std
::
map
<
int
,
std
::
vector
<
float
>
>&
confidenceScores
=
...
...
@@ -324,7 +325,7 @@ public:
float
*
outputsData
=
outputs
[
0
].
ptr
<
float
>
();
int
count
=
0
;
for
(
int
i
=
0
;
i
<
_
num
;
++
i
)
for
(
int
i
=
0
;
i
<
num
;
++
i
)
{
const
std
::
map
<
int
,
std
::
vector
<
float
>
>&
confidenceScores
=
allConfidenceScores
[
i
];
...
...
modules/dnn/src/layers/elementwise_layers.cpp
View file @
4b1834ac
...
...
@@ -36,16 +36,16 @@ public:
ElementWiseLayer
(
bool
run_parallel_
=
false
,
const
Func
&
f
=
Func
())
:
func
(
f
),
run_parallel
(
run_parallel_
)
{}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
]
=
*
inputs
[
i
];
}
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
return
true
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
...
...
@@ -169,20 +169,16 @@ public:
setParamsFrom
(
params
);
}
////////////////////////////////////////////////////////////////////////////
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
blobs
.
size
()
==
1
);
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
].
create
(
inputs
[
i
]
->
dims
,
inputs
[
i
]
->
size
.
p
,
inputs
[
i
]
->
type
());
}
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
return
true
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
CV_Assert
(
inputs
.
size
()
==
1
);
Mat
&
inpBlob
=
*
inputs
[
0
];
...
...
modules/dnn/src/layers/eltwise_layer.cpp
View file @
4b1834ac
...
...
@@ -41,7 +41,6 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
namespace
cv
{
namespace
dnn
...
...
@@ -82,21 +81,26 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
2
<=
inputs
.
size
()
);
CV_Assert
(
inputs
.
size
()
>=
2
);
CV_Assert
(
coeffs
.
size
()
==
0
||
coeffs
.
size
()
==
inputs
.
size
());
CV_Assert
(
op
==
SUM
||
coeffs
.
size
()
==
0
);
for
(
size_t
i
=
1
;
i
<
inputs
.
size
();
++
i
)
for
(
int
i
=
1
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
size
==
inputs
[
0
]
->
size
);
CV_Assert
(
inputs
[
0
]
==
inputs
[
i
]
);
}
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
inputs
[
0
]
->
dims
,
inputs
[
0
]
->
size
.
p
,
inputs
[
0
]
->
type
());
outputs
.
assign
(
1
,
inputs
[
0
]);
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
Mat
&
output
=
outputs
[
0
];
switch
(
op
)
...
...
modules/dnn/src/layers/flatten_layer.cpp
View file @
4b1834ac
...
...
@@ -43,6 +43,7 @@
#include "layers_common.hpp"
#include <float.h>
#include <algorithm>
#include <opencv2/dnn/shape_utils.hpp>
namespace
cv
{
...
...
@@ -59,56 +60,60 @@ public:
setParamsFrom
(
params
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
size_t
i
,
ninputs
=
inputs
.
size
();
CV_Assert
(
ninputs
>
0
);
const
Mat
&
inp0
=
*
inputs
[
0
];
CV_Assert
(
inputs
.
size
()
>
0
);
for
(
size_t
i
=
1
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
==
inputs
[
0
]);
}
for
(
i
=
1
;
i
<
ninputs
;
i
++
)
int
numAxes
=
inputs
[
0
].
size
();
int
startAxis
=
clamp
(
_startAxis
,
numAxes
);
int
endAxis
=
clamp
(
_endAxis
,
numAxes
);
for
(
size_t
i
=
1
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
size
==
inp0
.
size
);
CV_Assert
(
inputs
[
i
]
==
inputs
[
0
]
);
}
_numAxes
=
inp0
.
dims
;
_endAxis
=
_endAxis
<
0
?
_endAxis
+
_numAxes
:
_endAxis
;
CV_Assert
(
_startAxis
>=
0
);
CV_Assert
(
_endAxis
>=
_startAxis
&&
_endAxis
<
(
int
)
_numAxes
);
size_t
flattenedDimensionSize
=
inp0
.
total
(
_startAxis
,
_endAxis
+
1
);
CV_Assert
(
startAxis
>=
0
);
CV_Assert
(
endAxis
>=
startAxis
&&
endAxis
<
(
int
)
numAxes
);
size_t
flattenedDimensionSize
=
total
(
inputs
[
0
],
startAxis
,
endAxis
);
resultShape
.
clear
()
;
for
(
int
j
=
0
;
j
<
_startAxis
;
j
++
)
MatShape
outputShapeVec
;
for
(
int
i
=
0
;
i
<
startAxis
;
i
++
)
{
resultShape
.
push_back
(
inp0
.
size
[
j
]);
outputShapeVec
.
push_back
(
inputs
[
0
][
i
]);
}
resultShape
.
push_back
(
flattenedDimensionSize
);
for
(
int
j
=
_endAxis
+
1
;
j
<
_numAxes
;
j
++
)
outputShapeVec
.
push_back
(
flattenedDimensionSize
);
for
(
size_t
i
=
endAxis
+
1
;
i
<
numAxes
;
i
++
)
{
resultShape
.
push_back
(
inp0
.
size
[
j
]);
outputShapeVec
.
push_back
(
inputs
[
0
][
i
]);
}
CV_Assert
(
resultShape
.
size
()
<=
4
);
CV_Assert
(
outputShapeVec
.
size
()
<=
4
);
for
(
i
=
0
;
i
<
ninputs
;
i
++
)
{
//in-place
outputs
[
i
]
=
inputs
[
i
]
->
reshape
(
1
,
(
int
)
resultShape
.
size
(),
&
resultShape
[
0
]);
}
outputs
.
resize
(
inputs
.
size
(),
outputShapeVec
);
return
true
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
]
=
inputs
[
i
]
->
reshape
(
1
,
(
int
)
resultShape
.
size
(),
&
resultShape
[
0
]);
MatShape
outShape
=
shape
(
outputs
[
i
]);
outputs
[
i
]
=
inputs
[
i
]
->
reshape
(
1
,
(
int
)
outShape
.
size
(),
&
outShape
[
0
]);
}
}
int
_startAxis
;
int
_endAxis
;
size_t
_numAxes
;
std
::
vector
<
int
>
resultShape
;
};
Ptr
<
FlattenLayer
>
FlattenLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/fully_connected_layer.cpp
View file @
4b1834ac
...
...
@@ -57,8 +57,8 @@ public:
setParamsFrom
(
params
);
CV_Assert
(
1
<=
blobs
.
size
()
&&
blobs
.
size
()
<=
2
);
numOutput
=
params
.
get
<
int
>
(
"num_output"
);
innerSize
=
(
int
)
blobs
[
0
].
total
()
/
numOutput
;
int
numOutput
=
params
.
get
<
int
>
(
"num_output"
);
in
t
in
nerSize
=
(
int
)
blobs
[
0
].
total
()
/
numOutput
;
bias
=
params
.
get
<
bool
>
(
"bias_term"
,
true
);
axis
=
params
.
get
<
int
>
(
"axis"
,
1
);
...
...
@@ -70,43 +70,39 @@ public:
blobs
[
1
]
=
blobs
[
1
].
reshape
(
1
,
1
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
input
.
size
()
>
0
);
const
Mat
&
inp0
=
*
input
[
0
];
CV_Assert
(
inputs
.
size
()
>
0
);
CV_Assert
(
1
<=
blobs
.
size
()
&&
blobs
.
size
()
<=
2
);
CV_Assert
(
blobs
[
0
].
dims
==
2
);
bias
=
(
blobs
.
size
()
>=
1
);
axisCan
=
axis
<
0
?
axis
+
inp0
.
dims
:
axis
;
dtype
=
inp0
.
type
();
numOutput
=
blobs
[
0
].
size
[
0
];
innerSize
=
blobs
[
0
].
size
[
1
];
outerSize
=
inp0
.
total
(
0
,
axisCan
);
size_t
innerSize0
=
inp0
.
total
(
axisCan
);
int
cAxis
=
clamp
(
axis
,
inputs
[
0
]);
int
outerSize
=
total
(
inputs
[
0
],
0
,
cAxis
);
int
numOutput
=
blobs
[
0
].
size
[
0
];
outputs
.
resize
(
inputs
.
size
(),
shape
(
outerSize
,
numOutput
));
CV_Assert
((
size_t
)
innerSize
==
innerSize0
);
CV_Assert
(
!
bias
||
(
size_t
)
numOutput
==
blobs
[
1
].
total
());
internals
.
push_back
(
shape
(
outerSize
,
1
));
biasOnesBlob
.
create
(
outerSize
,
1
,
dtype
);
biasOnesBlob
.
setTo
(
1.
);
CV_Assert
(
!
bias
||
(
size_t
)
numOutput
==
blobs
[
1
].
total
());
output
.
resize
(
input
.
size
());
for
(
size_t
i
=
0
;
i
<
input
.
size
();
i
++
)
{
CV_Assert
(
i
==
0
||
(
input
[
i
]
->
size
==
input
[
0
]
->
size
&&
input
[
i
]
->
type
()
==
dtype
));
output
[
i
].
create
(
outerSize
,
numOutput
,
dtype
);
}
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
)
void
forward
(
std
::
vector
<
Mat
*>
&
input
,
std
::
vector
<
Mat
>
&
output
,
std
::
vector
<
Mat
>
&
internals
)
{
internals
[
0
].
setTo
(
1.
);
const
Mat
&
weight
=
blobs
[
0
];
const
Mat
*
biasMat
=
NULL
,
*
biasOnesMat
=
NULL
;
int
axisCan
=
clamp
(
axis
,
input
[
0
]
->
dims
);
int
outerSize
=
input
[
0
]
->
total
(
0
,
axisCan
);
if
(
bias
)
{
biasOnesMat
=
&
biasOnesBlob
;
biasOnesMat
=
&
internals
[
0
]
;
biasMat
=
&
blobs
[
1
];
}
...
...
@@ -121,10 +117,7 @@ public:
}
}
int
axisCan
,
dtype
;
int
numOutput
,
innerSize
,
outerSize
;
bool
bias
;
Mat
biasOnesBlob
;
};
Ptr
<
InnerProductLayer
>
InnerProductLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/layers_common.cpp
View file @
4b1834ac
...
...
@@ -163,25 +163,19 @@ void getConvolutionKernelParams(const LayerParams ¶ms, int &kernelH, int &ke
// We pad Pr/2 on the left and Pr - Pr/2 on the right, Pc/2 on the top
// and Pc - Pc/2 on the bottom. When Pr or Pc is odd, this means
// we pad more on the right and bottom than on the top and left.
void
getConvPoolOutParams
(
const
int
inputH
,
const
int
inputW
,
const
cv
::
Size
&
kernel
,
const
cv
::
Size
&
stride
,
cv
::
Size
&
pad
,
const
cv
::
String
&
padMode
,
int
&
outH
,
int
&
outW
)
void
getConvPoolOutParams
(
const
Size
&
inp
,
const
Size
&
kernel
,
const
Size
&
stride
,
const
String
&
padMode
,
Size
&
out
)
{
if
(
padMode
==
"VALID"
)
{
outH
=
(
inputH
-
kernel
.
height
+
stride
.
height
)
/
stride
.
height
;
outW
=
(
inputW
-
kernel
.
width
+
stride
.
width
)
/
stride
.
width
;
pad
=
cv
::
Size
(
0
,
0
);
out
.
height
=
(
inp
.
height
-
kernel
.
height
+
stride
.
height
)
/
stride
.
height
;
out
.
width
=
(
inp
.
width
-
kernel
.
width
+
stride
.
width
)
/
stride
.
width
;
}
else
if
(
padMode
==
"SAME"
)
{
outH
=
(
inputH
-
1
+
stride
.
height
)
/
stride
.
height
;
outW
=
(
inputW
-
1
+
stride
.
width
)
/
stride
.
width
;
int
Ph
=
std
::
max
(
0
,
(
outH
-
1
)
*
stride
.
height
+
kernel
.
height
-
inputH
);
int
Pw
=
std
::
max
(
0
,
(
outW
-
1
)
*
stride
.
width
+
kernel
.
width
-
inputW
);
// For odd values of total padding, add more padding at the 'right'
// side of the given dimension.
pad
=
cv
::
Size
(
Pw
/
2
,
Ph
/
2
);
out
.
height
=
(
inp
.
height
-
1
+
stride
.
height
)
/
stride
.
height
;
out
.
width
=
(
inp
.
width
-
1
+
stride
.
width
)
/
stride
.
width
;
}
else
{
...
...
@@ -189,5 +183,23 @@ void getConvPoolOutParams(const int inputH, const int inputW, const cv::Size &ke
}
}
void
getConvPoolPaddings
(
const
Size
&
inp
,
const
Size
&
out
,
const
Size
&
kernel
,
const
Size
&
stride
,
const
String
&
padMode
,
Size
&
pad
)
{
if
(
padMode
==
"VALID"
)
{
pad
=
cv
::
Size
(
0
,
0
);
}
else
if
(
padMode
==
"SAME"
)
{
int
Ph
=
std
::
max
(
0
,
(
out
.
height
-
1
)
*
stride
.
height
+
kernel
.
height
-
inp
.
height
);
int
Pw
=
std
::
max
(
0
,
(
out
.
width
-
1
)
*
stride
.
width
+
kernel
.
width
-
inp
.
width
);
// For odd values of total padding, add more padding at the 'right'
// side of the given dimension.
pad
=
cv
::
Size
(
Pw
/
2
,
Ph
/
2
);
}
}
}
}
modules/dnn/src/layers/layers_common.hpp
View file @
4b1834ac
...
...
@@ -44,6 +44,7 @@
#include <opencv2/dnn.hpp>
#include "op_blas.hpp"
#include "op_im2col.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace
cv
{
...
...
@@ -56,10 +57,13 @@ void getConvolutionKernelParams(const LayerParams ¶ms, int &kernelH, int &ke
void
getPoolingKernelParams
(
const
LayerParams
&
params
,
int
&
kernelH
,
int
&
kernelW
,
bool
&
globalPooling
,
int
&
padH
,
int
&
padW
,
int
&
strideH
,
int
&
strideW
,
cv
::
String
&
padMode
);
void
getConvPoolOutParams
(
const
int
inputH
,
const
int
inputW
,
const
cv
::
Size
&
kernel
,
const
cv
::
Size
&
stride
,
cv
::
Size
&
pad
,
const
cv
::
String
&
padMode
,
int
&
outH
,
int
&
outW
);
void
getConvPoolOutParams
(
const
Size
&
inp
,
const
Size
&
kernel
,
const
Size
&
stride
,
const
String
&
padMode
,
Size
&
out
);
void
getConvPoolPaddings
(
const
Size
&
inp
,
const
Size
&
out
,
const
Size
&
kernel
,
const
Size
&
stride
,
const
String
&
padMode
,
Size
&
pad
);
}
}
...
...
modules/dnn/src/layers/lrn_layer.cpp
View file @
4b1834ac
...
...
@@ -75,36 +75,28 @@ public:
normBySize
=
params
.
get
<
bool
>
(
"norm_by_size"
,
true
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
output
s
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internal
s
)
{
CV_Assert
(
inputs
.
size
()
==
1
&&
inputs
[
0
]
->
dims
==
4
);
CV_Assert
(
type
==
CHANNEL_NRM
||
type
==
SPATIAL_NRM
);
const
Mat
&
inp0
=
*
inputs
[
0
];
if
(
type
==
SPATIAL_NRM
)
buf
.
create
(
inp0
.
size
[
2
],
inp0
.
size
[
3
],
inp0
.
type
());
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
inp0
.
dims
,
inp0
.
size
.
p
,
inp0
.
type
());
}
CV_Assert
(
inputs
.
size
()
==
outputs
.
size
());
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
dims
==
4
);
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
Mat
&
src
=
*
inputs
[
0
];
Mat
&
dst
=
outputs
[
0
];
Mat
&
src
=
*
inputs
[
i
];
Mat
&
dst
=
outputs
[
i
];
switch
(
type
)
{
case
CHANNEL_NRM
:
channelNormalization
(
src
,
dst
);
break
;
case
SPATIAL_NRM
:
spatialNormalization
(
src
,
dst
);
break
;
default
:
CV_Error
(
Error
::
StsNotImplemented
,
"Unimplemented mode of LRN layer"
);
break
;
switch
(
type
)
{
case
CHANNEL_NRM
:
channelNormalization
(
src
,
dst
);
break
;
case
SPATIAL_NRM
:
spatialNormalization
(
src
,
dst
);
break
;
default
:
CV_Error
(
Error
::
StsNotImplemented
,
"Unimplemented mode of LRN layer"
);
break
;
}
}
}
...
...
@@ -179,8 +171,6 @@ public:
}
}
}
Mat
buf
;
};
Ptr
<
LRNLayer
>
LRNLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/max_unpooling_layer.cpp
View file @
4b1834ac
...
...
@@ -29,22 +29,25 @@ public:
poolStride
=
Size
(
params
.
get
<
int
>
(
"pool_stride_w"
),
params
.
get
<
int
>
(
"pool_stride_h"
));
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
==
2
);
const
Mat
&
inp0
=
*
inputs
[
0
];
CV_Assert
(
inp0
.
total
()
==
inputs
[
1
]
->
total
());
CV_Assert
(
inp0
.
dims
==
4
);
CV_Assert
(
total
(
inputs
[
0
])
==
total
(
inputs
[
1
]));
int
outShape
[]
=
{
inp0
.
size
[
0
],
inp0
.
size
[
1
],
inp0
.
size
[
2
],
inp0
.
size
[
3
]
}
;
MatShape
outShape
=
inputs
[
0
]
;
outShape
[
2
]
=
(
outShape
[
2
]
-
1
)
*
poolStride
.
height
+
poolKernel
.
height
-
2
*
poolPad
.
height
;
outShape
[
3
]
=
(
outShape
[
3
]
-
1
)
*
poolStride
.
width
+
poolKernel
.
width
-
2
*
poolPad
.
width
;
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
4
,
outShape
,
inp0
.
type
());
outputs
.
clear
();
outputs
.
push_back
(
outShape
);
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
CV_Assert
(
inputs
.
size
()
==
2
);
Mat
&
input
=
*
inputs
[
0
];
...
...
modules/dnn/src/layers/mvn_layer.cpp
View file @
4b1834ac
...
...
@@ -59,18 +59,7 @@ public:
eps
=
params
.
get
<
double
>
(
"eps"
,
1e-9
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
int
dims
=
inputs
[
i
]
->
dims
;
CV_Assert
(
!
acrossChannels
||
dims
>=
2
);
outputs
[
i
].
create
(
dims
,
inputs
[
i
]
->
size
.
p
,
inputs
[
i
]
->
type
());
}
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
inpIdx
=
0
;
inpIdx
<
inputs
.
size
();
inpIdx
++
)
{
...
...
modules/dnn/src/layers/normalize_bbox_layer.cpp
View file @
4b1834ac
...
...
@@ -51,31 +51,17 @@ namespace cv
namespace
dnn
{
class
NormalizeBBoxLayerImpl
:
public
NormalizeBBoxLayer
namespace
{
public
:
Mat
_buffer
;
Mat
_sumChannelMultiplier
;
Mat
_sumSpatialMultiplier
;
Mat
_scale
;
const
std
::
string
layerName
=
"NormalizeBBox"
;
}
class
NormalizeBBoxLayerImpl
:
public
NormalizeBBoxLayer
{
float
_eps
;
bool
_across_spatial
;
bool
_channel_shared
;
size_t
_num
;
size_t
_channels
;
size_t
_rows
;
size_t
_cols
;
size_t
_channelSize
;
size_t
_imageSize
;
static
const
size_t
_numAxes
=
4
;
static
const
std
::
string
_layerName
;
public
:
bool
getParameterDict
(
const
LayerParams
&
params
,
const
std
::
string
&
parameterName
,
DictValue
&
result
)
...
...
@@ -102,7 +88,7 @@ public:
{
if
(
required
)
{
std
::
string
message
=
_
layerName
;
std
::
string
message
=
layerName
;
message
+=
" layer parameter does not contain "
;
message
+=
parameterName
;
message
+=
" parameter."
;
...
...
@@ -127,60 +113,63 @@ public:
void
checkInputs
(
const
std
::
vector
<
Mat
*>
&
inputs
)
{
CV_Assert
(
inputs
.
size
()
>
0
);
CV_Assert
(
inputs
[
0
]
->
dims
==
4
&&
inputs
[
0
]
->
type
()
==
CV_32F
);
for
(
size_t
i
=
1
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
dims
==
4
&&
inputs
[
i
]
->
type
()
==
CV_32F
);
CV_Assert
(
inputs
[
i
]
->
size
==
inputs
[
0
]
->
size
);
}
CV_Assert
(
inputs
[
0
]
->
dims
>
2
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
checkInputs
(
inputs
);
const
Mat
&
inp0
=
*
inputs
[
0
];
CV_Assert
(
inp0
.
dims
==
4
&&
inp0
.
type
()
==
CV_32F
);
bool
inplace
=
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
size_t
channels
=
inputs
[
0
][
1
];
size_t
rows
=
inputs
[
0
][
2
];
size_t
cols
=
inputs
[
0
][
3
];
size_t
channelSize
=
rows
*
cols
;
_num
=
inp0
.
size
[
0
];
_channels
=
inp0
.
size
[
1
];
_rows
=
inp0
.
size
[
2
];
_cols
=
inp0
.
size
[
3
];
internals
.
assign
(
1
,
shape
(
channels
,
channelSize
));
internals
.
push_back
(
shape
(
channels
,
1
));
internals
.
push_back
(
shape
(
1
,
channelSize
));
_channelSize
=
_rows
*
_cols
;
_imageSize
=
_channelSize
*
_channels
;
return
inplace
;
}
_buffer
=
Mat
(
_channels
,
_channelSize
,
CV_32F
);
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
checkInputs
(
inputs
);
_sumChannelMultiplier
=
Mat
(
_channels
,
1
,
CV_32F
,
Scalar
(
1.0
));
_sumSpatialMultiplier
=
Mat
(
1
,
_channelSize
,
CV_32F
,
Scalar
(
1.0
))
;
Mat
&
buffer
=
internals
[
0
],
sumChannelMultiplier
=
internals
[
1
],
sumSpatialMultiplier
=
internals
[
2
]
;
_scale
=
blobs
[
0
];
size_t
i
,
ninputs
=
inputs
.
size
();
outputs
.
resize
(
ninputs
);
sumChannelMultiplier
.
setTo
(
1.0
);
sumSpatialMultiplier
.
setTo
(
1.0
);
for
(
i
=
0
;
i
<
ninputs
;
i
++
)
{
outputs
[
i
].
create
(
inp0
.
dims
,
inp0
.
size
.
p
,
inp0
.
type
());
}
}
const
Mat
&
inp0
=
*
inputs
[
0
];
size_t
num
=
inp0
.
size
[
0
];
size_t
channels
=
inp0
.
size
[
1
];
size_t
channelSize
=
inp0
.
size
[
2
]
*
inp0
.
size
[
3
];
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
Mat
zeroBuffer
(
_channels
,
_channelSize
,
CV_32F
,
Scalar
(
0
));
Mat
zeroBuffer
(
channels
,
channelSize
,
CV_32F
,
Scalar
(
0
));
Mat
absDiff
;
Mat
scale
=
blobs
[
0
];
for
(
size_t
j
=
0
;
j
<
inputs
.
size
();
j
++
)
{
for
(
size_t
n
=
0
;
n
<
_
num
;
++
n
)
for
(
size_t
n
=
0
;
n
<
num
;
++
n
)
{
Mat
src
=
Mat
(
_channels
,
_
channelSize
,
CV_32F
,
inputs
[
j
]
->
ptr
<
float
>
(
n
));
Mat
dst
=
Mat
(
_channels
,
_
channelSize
,
CV_32F
,
outputs
[
j
].
ptr
<
float
>
(
n
));
Mat
src
=
Mat
(
channels
,
channelSize
,
CV_32F
,
inputs
[
j
]
->
ptr
<
float
>
(
n
));
Mat
dst
=
Mat
(
channels
,
channelSize
,
CV_32F
,
outputs
[
j
].
ptr
<
float
>
(
n
));
_
buffer
=
src
.
mul
(
src
);
buffer
=
src
.
mul
(
src
);
if
(
_across_spatial
)
{
absdiff
(
_
buffer
,
zeroBuffer
,
absDiff
);
absdiff
(
buffer
,
zeroBuffer
,
absDiff
);
// add eps to avoid overflow
double
absSum
=
sum
(
absDiff
)[
0
]
+
_eps
;
...
...
@@ -190,34 +179,34 @@ public:
}
else
{
Mat
norm
(
_channelSize
,
1
,
_buffer
.
type
());
// 1 x _
channelSize
Mat
norm
(
channelSize
,
1
,
buffer
.
type
());
// 1 x
channelSize
// (_channels x
_channelSize)T * _channels x 1 -> _
channelSize x 1
gemmCPU
(
_buffer
,
_
sumChannelMultiplier
,
1
,
norm
,
0
,
GEMM_1_T
);
// (_channels x
channelSize)T * _channels x 1 ->
channelSize x 1
gemmCPU
(
buffer
,
sumChannelMultiplier
,
1
,
norm
,
0
,
GEMM_1_T
);
// compute norm
pow
(
norm
,
0.5
f
,
norm
);
// scale the layer
// _channels x 1 * (
_channelSize x 1)T -> _channels x _
channelSize
gemmCPU
(
_sumChannelMultiplier
,
norm
,
1
,
_
buffer
,
0
,
GEMM_2_T
);
// _channels x 1 * (
channelSize x 1)T -> _channels x
channelSize
gemmCPU
(
sumChannelMultiplier
,
norm
,
1
,
buffer
,
0
,
GEMM_2_T
);
dst
=
src
/
_
buffer
;
dst
=
src
/
buffer
;
}
// scale the output
if
(
_channel_shared
)
{
// _scale: 1 x 1
dst
*=
_
scale
.
at
<
float
>
(
0
,
0
);
dst
*=
scale
.
at
<
float
>
(
0
,
0
);
}
else
{
// _scale: _channels x 1
// _channels x 1 * 1 x
_channelSize -> _channels x _
channelSize
gemmCPU
(
_scale
,
_sumSpatialMultiplier
,
1
,
_
buffer
,
0
);
// _channels x 1 * 1 x
channelSize -> _channels x
channelSize
gemmCPU
(
scale
,
sumSpatialMultiplier
,
1
,
buffer
,
0
);
dst
=
dst
.
mul
(
_
buffer
);
dst
=
dst
.
mul
(
buffer
);
}
}
}
...
...
@@ -225,7 +214,6 @@ public:
};
const
std
::
string
NormalizeBBoxLayerImpl
::
_layerName
=
std
::
string
(
"NormalizeBBox"
);
Ptr
<
NormalizeBBoxLayer
>
NormalizeBBoxLayer
::
create
(
const
LayerParams
&
params
)
{
...
...
modules/dnn/src/layers/padding_layer.cpp
View file @
4b1834ac
...
...
@@ -33,25 +33,26 @@ public:
CV_Error
(
cv
::
Error
::
StsNotImplemented
,
"Negative padding and dim aren't supported"
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
size_t
i
,
ninputs
=
inputs
.
size
();
outputs
.
resize
(
ninputs
);
for
(
i
=
0
;
i
<
ninputs
;
i
++
)
outputs
.
clear
();
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
const
Mat
&
inp
=
*
inputs
[
i
];
int
dims
=
inp
.
dims
;
std
::
vector
<
int
>
shape
(
inp
.
size
.
p
,
inp
.
size
.
p
+
dims
);
MatShape
shape
=
inputs
[
i
];
int
dim
=
getPadDim
(
shape
);
CV_Assert
(
dim
<
dims
);
CV_Assert
(
dim
<
shape
.
size
()
);
shape
[
dim
]
+=
padding
;
outputs
[
i
].
create
(
dims
,
&
shape
[
0
],
inp
.
type
()
);
outputs
.
push_back
(
shape
);
}
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
...
...
@@ -59,8 +60,8 @@ public:
const
Mat
&
inp
=
*
inputs
[
i
];
Mat
&
out
=
outputs
[
i
];
int
dims
=
inp
.
dims
;
std
::
vector
<
int
>
inShape
(
inp
.
size
.
p
,
inp
.
size
.
p
+
dims
);
std
::
vector
<
int
>
outShape
(
out
.
size
.
p
,
out
.
size
.
p
+
dims
);
MatShape
inShape
(
inp
.
size
.
p
,
inp
.
size
.
p
+
dims
);
MatShape
outShape
(
out
.
size
.
p
,
out
.
size
.
p
+
dims
);
int
dim
=
getPadDim
(
inShape
);
int
actualIndex
=
index
;
...
...
@@ -88,7 +89,7 @@ public:
}
}
int
getPadDim
(
const
std
::
vector
<
int
>
&
shape
)
const
int
getPadDim
(
const
MatShape
&
shape
)
const
{
return
inputDims
>
0
&&
(
int
)
shape
.
size
()
>
inputDims
?
paddingDim
+
1
:
paddingDim
;
}
...
...
modules/dnn/src/layers/permute_layer.cpp
View file @
4b1834ac
...
...
@@ -110,7 +110,35 @@ public:
checkNeedForPermutation
();
}
void
computeStrides
()
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
if
(
!
_needsPermute
)
return
true
;
CV_Assert
(
inputs
.
size
()
>
0
);
CV_Assert
((
int
)
_numAxes
==
inputs
[
0
].
size
());
MatShape
shapeBefore
=
inputs
[
0
],
shapeAfter
;
for
(
size_t
i
=
0
;
i
<
_numAxes
;
i
++
)
{
shapeAfter
[
i
]
=
shapeBefore
[
_order
[
i
]];
}
outputs
.
clear
();
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
][
2
]
==
shapeBefore
[
2
]
&&
inputs
[
i
][
3
]
==
shapeBefore
[
3
]);
outputs
.
push_back
(
shapeAfter
);
}
return
false
;
}
void
computeStrides
(
const
MatShape
&
shapeBefore
,
const
MatShape
&
shapeAfter
)
{
_oldStride
.
resize
(
_numAxes
);
_newStride
.
resize
(
_numAxes
);
...
...
@@ -120,14 +148,14 @@ public:
for
(
int
i
=
_numAxes
-
2
;
i
>=
0
;
i
--
)
{
_oldStride
[
i
]
=
_oldStride
[
i
+
1
]
*
_oldDimensionSiz
e
[
i
+
1
];
_newStride
[
i
]
=
_newStride
[
i
+
1
]
*
_newDimensionSize
[
i
+
1
];
_oldStride
[
i
]
=
_oldStride
[
i
+
1
]
*
shapeBefor
e
[
i
+
1
];
_newStride
[
i
]
=
_newStride
[
i
+
1
]
*
shapeAfter
[
i
+
1
];
}
_count
=
_oldStride
[
0
]
*
_oldDimensionSiz
e
[
0
];
_count
=
_oldStride
[
0
]
*
shapeBefor
e
[
0
];
}
void
allocat
e
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
finaliz
e
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
if
(
!
_needsPermute
)
{
...
...
@@ -138,27 +166,10 @@ public:
const
Mat
&
inp0
=
*
inputs
[
0
];
CV_Assert
((
int
)
_numAxes
==
inp0
.
dims
);
outputs
.
resize
(
inputs
.
size
());
_newDimensionSize
.
resize
(
_numAxes
);
_oldDimensionSize
.
resize
(
_numAxes
);
for
(
size_t
i
=
0
;
i
<
_numAxes
;
i
++
)
{
_oldDimensionSize
[
i
]
=
inp0
.
size
[
i
];
_newDimensionSize
[
i
]
=
inp0
.
size
[
_order
[
i
]];
}
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
size
==
inp0
.
size
);
outputs
[
i
].
create
(
_numAxes
,
&
_newDimensionSize
[
0
],
CV_32F
);
}
computeStrides
();
computeStrides
(
shape
(
*
inputs
[
0
]),
shape
(
outputs
[
0
]));
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
size_t
k
,
ninputs
=
inputs
.
size
();
if
(
!
_needsPermute
)
...
...
modules/dnn/src/layers/pooling_layer.cpp
View file @
4b1834ac
...
...
@@ -77,39 +77,22 @@ public:
setParamsFrom
(
params
);
}
void
allocat
e
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
finaliz
e
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
CV_Assert
(
inputs
.
size
()
==
1
);
inp
=
Size
(
inputs
[
0
]
->
size
[
3
],
inputs
[
0
]
->
size
[
2
]);
cv
::
Size
inp
(
inputs
[
0
]
->
size
[
3
],
inputs
[
0
]
->
size
[
2
]),
out
(
outputs
[
0
].
size
[
3
],
outputs
[
0
].
size
[
2
]);
if
(
globalPooling
)
{
kernel
=
inp
;
}
computeOutputShape
(
inp
);
outputs
.
resize
(
type
==
MAX
?
2
*
inputs
.
size
()
:
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
const
Mat
&
inp_i
=
*
inputs
[
i
];
CV_Assert
(
inp_i
.
size
[
2
]
==
inp
.
height
&&
inp_i
.
size
[
3
]
==
inp
.
width
);
int
outsz
[]
=
{
inp_i
.
size
[
0
],
inp_i
.
size
[
1
],
out
.
height
,
out
.
width
};
if
(
type
==
MAX
)
{
outputs
[
2
*
i
].
create
(
4
,
outsz
,
CV_32F
);
outputs
[
2
*
i
+
1
].
create
(
4
,
outsz
,
CV_32F
);
}
else
{
outputs
[
i
].
create
(
4
,
outsz
,
CV_32F
);
}
}
getConvPoolPaddings
(
inp
,
out
,
kernel
,
stride
,
padMode
,
pad
);
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
ii
=
0
;
ii
<
inputs
.
size
();
ii
++
)
{
...
...
@@ -130,7 +113,8 @@ public:
void
maxPooling
(
Mat
&
src
,
Mat
&
dst
,
Mat
&
mask
)
{
CV_DbgAssert
(
dst
.
size
[
2
]
==
out
.
height
&&
dst
.
size
[
3
]
==
out
.
width
);
Size
inp
(
src
.
size
[
3
],
src
.
size
[
2
]),
out
(
dst
.
size
[
3
],
dst
.
size
[
2
]);
for
(
int
n
=
0
;
n
<
src
.
size
[
0
];
++
n
)
{
...
...
@@ -175,6 +159,8 @@ public:
void
avePooling
(
Mat
&
src
,
Mat
&
dst
)
{
Size
inp
(
src
.
size
[
3
],
src
.
size
[
2
]),
out
(
dst
.
size
[
3
],
dst
.
size
[
2
]);
for
(
int
n
=
0
;
n
<
src
.
size
[
0
];
++
n
)
{
for
(
int
c
=
0
;
c
<
src
.
size
[
1
];
++
c
)
...
...
@@ -209,35 +195,52 @@ public:
}
}
void
computeOutputShape
(
Size
inpSz
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
!=
0
);
Size
in
(
inputs
[
0
][
3
],
inputs
[
0
][
2
]),
out
;
if
(
padMode
.
empty
())
{
//Yeah, something strange Caffe scheme-)
out
.
height
=
static_cast
<
int
>
(
ceil
(
static_cast
<
float
>
(
in
pSz
.
height
+
2
*
pad
.
height
-
out
.
height
=
static_cast
<
int
>
(
ceil
(
static_cast
<
float
>
(
in
.
height
+
2
*
pad
.
height
-
kernel
.
height
)
/
stride
.
height
))
+
1
;
out
.
width
=
static_cast
<
int
>
(
ceil
(
static_cast
<
float
>
(
in
pSz
.
width
+
2
*
pad
.
width
-
out
.
width
=
static_cast
<
int
>
(
ceil
(
static_cast
<
float
>
(
in
.
width
+
2
*
pad
.
width
-
kernel
.
width
)
/
stride
.
width
))
+
1
;
if
(
pad
.
height
||
pad
.
width
)
{
// If we have padding, ensure that the last pooling starts strictly
// inside the image (instead of at the padding); otherwise clip the last.
if
((
out
.
height
-
1
)
*
stride
.
height
>=
in
pSz
.
height
+
pad
.
height
)
if
((
out
.
height
-
1
)
*
stride
.
height
>=
in
.
height
+
pad
.
height
)
--
out
.
height
;
if
((
out
.
width
-
1
)
*
stride
.
width
>=
in
pSz
.
width
+
pad
.
width
)
if
((
out
.
width
-
1
)
*
stride
.
width
>=
in
.
width
+
pad
.
width
)
--
out
.
width
;
CV_Assert
((
out
.
height
-
1
)
*
stride
.
height
<
in
pSz
.
height
+
pad
.
height
);
CV_Assert
((
out
.
width
-
1
)
*
stride
.
width
<
in
pSz
.
width
+
pad
.
width
);
CV_Assert
((
out
.
height
-
1
)
*
stride
.
height
<
in
.
height
+
pad
.
height
);
CV_Assert
((
out
.
width
-
1
)
*
stride
.
width
<
in
.
width
+
pad
.
width
);
}
}
else
{
getConvPoolOutParams
(
in
pSz
.
height
,
inpSz
.
width
,
kernel
,
stride
,
pad
,
padMode
,
out
.
height
,
out
.
width
);
getConvPoolOutParams
(
in
,
kernel
,
stride
,
padMode
,
out
);
}
}
Size
inp
,
out
;
outputs
.
resize
(
type
==
MAX
?
2
*
inputs
.
size
()
:
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
size_t
index
=
type
==
MAX
?
2
*
i
:
i
;
int
dims
[]
=
{
inputs
[
i
][
0
],
inputs
[
i
][
1
],
out
.
height
,
out
.
width
};
outputs
[
index
]
=
shape
(
dims
);
if
(
type
==
MAX
)
outputs
[
index
+
1
]
=
shape
(
dims
);
}
return
false
;
}
};
Ptr
<
PoolingLayer
>
PoolingLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/prior_box_layer.cpp
View file @
4b1834ac
...
...
@@ -185,34 +185,41 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
==
2
);
_layerWidth
=
inputs
[
0
]
->
size
[
3
];
_layerHeight
=
inputs
[
0
]
->
size
[
2
];
_imageWidth
=
inputs
[
1
]
->
size
[
3
];
_imageHeight
=
inputs
[
1
]
->
size
[
2
];
_stepX
=
static_cast
<
float
>
(
_imageWidth
)
/
_layerWidth
;
_stepY
=
static_cast
<
float
>
(
_imageHeight
)
/
_layerHeight
;
int
layerHeight
=
inputs
[
0
][
2
];
int
layerWidth
=
inputs
[
0
][
3
];
// Since all images in a batch has same height and width, we only need to
// generate one set of priors which can be shared across all images.
in
t
outNum
=
1
;
size_
t
outNum
=
1
;
// 2 channels. First channel stores the mean of each prior coordinate.
// Second channel stores the variance of each prior coordinate.
int
outChannels
=
2
;
_outChannelSize
=
_layerHeight
*
_layerWidth
*
_numPriors
*
4
;
size_t
outChannels
=
2
;
outputs
.
resize
(
1
,
shape
(
outNum
,
outChannels
,
layerHeight
*
layerWidth
*
_numPriors
*
4
));
int
outsz
[]
=
{
outNum
,
outChannels
,
(
int
)
_outChannelSize
};
outputs
[
0
].
create
(
3
,
outsz
,
CV_32F
);
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
(
void
)
inputs
;
// to suppress unused parameter warning
int
_layerWidth
=
inputs
[
0
]
->
size
[
3
];
int
_layerHeight
=
inputs
[
0
]
->
size
[
2
];
int
_imageWidth
=
inputs
[
1
]
->
size
[
3
];
int
_imageHeight
=
inputs
[
1
]
->
size
[
2
];
float
_stepX
=
static_cast
<
float
>
(
_imageWidth
)
/
_layerWidth
;
float
_stepY
=
static_cast
<
float
>
(
_imageHeight
)
/
_layerHeight
;
int
_outChannelSize
=
_layerHeight
*
_layerWidth
*
_numPriors
*
4
;
float
*
outputPtr
=
outputs
[
0
].
ptr
<
float
>
();
...
...
@@ -305,17 +312,6 @@ public:
}
}
size_t
_layerWidth
;
size_t
_layerHeight
;
size_t
_imageWidth
;
size_t
_imageHeight
;
size_t
_outChannelSize
;
float
_stepX
;
float
_stepY
;
float
_minSize
;
float
_maxSize
;
...
...
modules/dnn/src/layers/recurrent_layers.cpp
View file @
4b1834ac
This diff is collapsed.
Click to expand it.
modules/dnn/src/layers/reshape_layer.cpp
View file @
4b1834ac
...
...
@@ -48,10 +48,10 @@ namespace cv
namespace
dnn
{
static
void
computeShapeByReshapeMask
(
const
std
::
vector
<
int
>
&
srcShape
,
const
std
::
vector
<
int
>
&
maskShape
,
static
void
computeShapeByReshapeMask
(
const
MatShape
&
srcShape
,
const
MatShape
&
maskShape
,
Range
srcRange
/*= Range::all()*/
,
std
::
vector
<
int
>
&
dstShape
)
MatShape
&
dstShape
)
{
int
srcShapeSize
=
(
int
)
srcShape
.
size
();
int
maskShapeSize
=
(
int
)
maskShape
.
size
();
...
...
@@ -61,7 +61,7 @@ static void computeShapeByReshapeMask(const std::vector<int> &srcShape,
else
{
int
sz
=
srcRange
.
size
();
srcRange
.
start
=
srcRange
.
start
<
0
?
srcRange
.
start
+
srcShapeSize
:
srcRange
.
start
;
srcRange
.
start
=
clamp
(
srcRange
.
start
,
srcShapeSize
)
;
srcRange
.
end
=
srcRange
.
end
==
INT_MAX
?
srcShapeSize
:
srcRange
.
start
+
sz
;
}
...
...
@@ -96,8 +96,8 @@ static void computeShapeByReshapeMask(const std::vector<int> &srcShape,
CV_Error
(
Error
::
StsBadArg
,
"maskShape[i] >= -1"
);
}
size_t
srcTotal
=
shapeT
otal
(
srcShape
);
size_t
dstTotal
=
shapeT
otal
(
dstShape
);
size_t
srcTotal
=
t
otal
(
srcShape
);
size_t
dstTotal
=
t
otal
(
dstShape
);
if
(
inferDim
!=
-
1
)
{
...
...
@@ -116,7 +116,8 @@ static void computeShapeByReshapeMask(const std::vector<int> &srcShape,
class
ReshapeLayerImpl
:
public
ReshapeLayer
{
public
:
ReshapeLayerImpl
(
const
LayerParams
&
params
)
ReshapeLayerImpl
(
const
LayerParams
&
params
)
:
performReordering
(
false
)
{
setParamsFrom
(
params
);
int
axis
=
params
.
get
<
int
>
(
"axis"
,
0
);
...
...
@@ -136,29 +137,40 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
outputs
.
resize
(
inputs
.
size
());
outShapes
.
resize
(
inputs
.
size
());
outputs
.
clear
();
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
std
::
vector
<
int
>
inputShape
(
inputs
[
i
]
->
size
.
p
,
inputs
[
i
]
->
size
.
p
+
inputs
[
i
]
->
dims
);
computeShapeByReshapeMask
(
inputShape
,
newShapeDesc
,
newShapeRange
,
outShapes
[
i
]);
outputs
[
i
]
=
inputs
[
i
]
->
reshape
(
1
,
outShapes
[
i
]);
outputs
.
push_back
(
MatShape
());
computeShapeByReshapeMask
(
inputs
[
i
],
newShapeDesc
,
newShapeRange
,
outputs
.
back
());
}
return
true
;
}
void
finalize
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
CV_Assert
(
inputs
.
size
());
CV_Assert
(
outputs
.
size
());
Mat
srcBlob
=
*
inputs
[
0
];
int
dims
=
srcBlob
.
dims
;
MatShape
inputShape
=
shape
(
srcBlob
),
outShape
=
shape
(
outputs
[
0
]);
bool
channelsReduced
=
dims
>
(
int
)
outShape
.
size
()
||
(
dims
==
4
&&
inputShape
[
1
]
>
outShape
[
1
]);
performReordering
=
enableReordering
&&
dims
==
4
&&
channelsReduced
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
i
=
0
;
i
<
out
puts
.
size
();
i
++
)
for
(
size_t
i
=
0
;
i
<
in
puts
.
size
();
i
++
)
{
Mat
srcBlob
=
*
inputs
[
i
];
int
dims
=
srcBlob
.
dims
;
std
::
vector
<
int
>
inputShape
(
srcBlob
.
size
.
p
,
srcBlob
.
size
.
p
+
dims
);
bool
channelsReduced
=
dims
>
(
int
)
outShapes
[
i
].
size
()
||
(
dims
==
4
&&
inputShape
[
1
]
>
outShapes
[
i
][
1
]);
bool
performReordering
=
enableReordering
&&
dims
==
4
&&
channelsReduced
;
MatShape
inputShape
=
shape
(
srcBlob
),
outShape
=
shape
(
outputs
[
i
]);
if
(
performReordering
)
{
...
...
@@ -185,16 +197,14 @@ public:
}
}
srcBlob
=
reordered_blob
;
outputs
[
i
]
=
reordered_blob
.
reshape
(
1
,
outShape
)
;
}
// TODO: we should not assign srcBlob if performReordering is true.
outputs
[
i
]
=
srcBlob
.
reshape
(
1
,
outShapes
[
i
]);
}
}
private
:
std
::
vector
<
std
::
vector
<
int
>
>
outShapes
;
bool
enableReordering
;
bool
enableReordering
,
performReordering
;
};
Ptr
<
ReshapeLayer
>
ReshapeLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/scale_layer.cpp
View file @
4b1834ac
...
...
@@ -27,20 +27,10 @@ public:
hasBias
=
params
.
get
<
bool
>
(
"bias_term"
,
false
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
output
s
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internal
s
)
{
CV_Assert
(
blobs
.
size
()
==
1
+
hasBias
);
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
const
Mat
&
inp
=
*
inputs
[
i
];
outputs
[
i
].
create
(
inp
.
dims
,
inp
.
size
.
p
,
inp
.
type
());
}
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
for
(
size_t
ii
=
0
;
ii
<
outputs
.
size
();
ii
++
)
{
Mat
&
inpBlob
=
*
inputs
[
ii
];
...
...
modules/dnn/src/layers/shift_layer.cpp
View file @
4b1834ac
...
...
@@ -11,6 +11,7 @@ Implementation of shift layer, which adds up const values to blob.
#include "../precomp.hpp"
#include "op_blas.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace
cv
{
...
...
@@ -35,42 +36,17 @@ public:
#endif
}
virtual
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
>
0
);
CV_Assert
(
blobs
.
size
()
>
0
);
const
Mat
&
inpBlob
=
*
inputs
[
0
];
CV_Assert
(
inpBlob
.
dims
==
4
&&
inpBlob
.
type
()
==
CV_32F
);
const
Mat
&
biasBlob
=
blobs
[
0
];
outputs
.
resize
(
inputs
.
size
());
if
(
inpBlob
.
dims
==
biasBlob
.
dims
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
type
()
==
inpBlob
.
type
());
CV_Assert
(
inputs
[
i
]
->
dims
==
inpBlob
.
dims
);
outputs
[
i
]
=
*
inputs
[
i
];
}
}
else
{
CV_Assert
(
biasBlob
.
total
()
==
(
size_t
)
inpBlob
.
size
[
1
]);
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
inputs
[
i
]
->
type
()
==
inpBlob
.
type
());
CV_Assert
(
inputs
[
i
]
->
dims
==
4
&&
inputs
[
i
]
->
size
[
1
]
==
inpBlob
.
size
[
1
]);
outputs
[
i
]
=
*
inputs
[
i
];
}
biasOnesMat
=
Mat
::
ones
(
1
,
inpBlob
.
size
[
2
]
*
inpBlob
.
size
[
3
],
inpBlob
.
type
());
}
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
internals
.
assign
(
1
,
shape
(
1
,
total
(
inputs
[
0
],
2
)));
return
true
;
}
virtual
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
virtual
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
CV_Assert
(
inputs
.
size
()
>
0
);
CV_Assert
(
blobs
.
size
()
>
0
);
...
...
@@ -87,6 +63,8 @@ public:
}
else
{
Mat
biasOnesMat
=
internals
[
0
];
biasOnesMat
.
setTo
(
1
);
for
(
size_t
ii
=
0
;
ii
<
outputs
.
size
();
ii
++
)
{
Mat
&
inpBlob
=
*
inputs
[
ii
];
...
...
@@ -103,8 +81,6 @@ public:
}
}
}
Mat
biasOnesMat
;
};
Ptr
<
ShiftLayer
>
ShiftLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/slice_layer.cpp
View file @
4b1834ac
...
...
@@ -66,66 +66,69 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
==
1
);
const
Mat
&
inpBlob
=
*
inputs
[
0
];
int
dims
=
inpBlob
.
dims
;
axisIdx
=
axis
<
0
?
axis
+
dims
:
axis
;
int
axisSize
=
inpBlob
.
size
[
axisIdx
];
std
::
vector
<
int
>
inpShape
(
inpBlob
.
size
.
p
,
inpBlob
.
size
.
p
+
dims
);
outputs
.
clear
();
MatShape
inpShape
=
inputs
[
0
];
int
cAxis
=
clamp
(
axis
,
inpShape
.
size
());
int
axisSize
=
inpShape
[
cAxis
];
if
(
sliceIndices
.
size
())
//divide blob with respect to passed parameters
{
std
::
vector
<
int
>
outAxisSize
;
int
prevSlice
=
0
;
std
::
vector
<
int
>
outAxisSize
;
int
prevSlice
=
0
;
for
(
size_t
i
=
0
;
i
<
sliceIndices
.
size
();
i
++
)
{
if
(
!
(
prevSlice
<
sliceIndices
[
i
]
&&
sliceIndices
[
i
]
<
axisSize
))
CV_Error
(
Error
::
StsBadArg
,
"Slice indices should be positive, increased and don't exceed size of sliced dimension"
);
for
(
size_t
i
=
0
;
i
<
sliceIndices
.
size
();
i
++
)
{
if
(
!
(
prevSlice
<
sliceIndices
[
i
]
&&
sliceIndices
[
i
]
<
axisSize
))
CV_Error
(
Error
::
StsBadArg
,
"Slice indices should be positive, increased and don't exceed size of sliced dimension"
);
outAxisSize
.
push_back
(
sliceIndices
[
i
]
-
prevSlice
);
prevSlice
=
sliceIndices
[
i
];
outAxisSize
.
push_back
(
sliceIndices
[
i
]
-
prevSlice
);
prevSlice
=
sliceIndices
[
i
];
}
outAxisSize
.
push_back
(
axisSize
-
prevSlice
);
outputs
.
resize
(
outAxisSize
.
size
());
for
(
size_t
i
=
0
;
i
<
outAxisSize
.
size
();
i
++
)
{
inpShape
[
axisIdx
]
=
outAxisSize
[
i
];
outputs
[
i
].
create
(
inpShape
,
inpBlob
.
type
()
);
inpShape
[
cAxis
]
=
outAxisSize
[
i
];
outputs
.
push_back
(
inpShape
);
}
}
else
//divide blob with respect to count of output blobs
{
CV_Assert
(
outputs
.
size
()
>
0
&&
axisSize
%
outputs
.
size
()
==
0
);
int
outAxisSize
=
axisSize
/
(
int
)
outputs
.
size
()
;
CV_Assert
(
requiredOutputs
>
0
&&
axisSize
%
requiredOutputs
==
0
);
int
outAxisSize
=
axisSize
/
(
int
)
requiredOutputs
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
()
;
i
++
)
for
(
size_t
i
=
0
;
i
<
requiredOutputs
;
i
++
)
{
inpShape
[
axisIdx
]
=
outAxisSize
;
outputs
[
i
].
create
(
inpShape
,
inpBlob
.
type
()
);
inpShape
[
cAxis
]
=
outAxisSize
;
outputs
.
push_back
(
inpShape
);
}
}
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
const
Mat
&
inpMat
=
*
inputs
[
0
];
std
::
vector
<
Range
>
ranges
(
inpMat
.
dims
,
Range
::
all
());
int
cAxis
=
clamp
(
axis
,
inpMat
.
dims
);
ranges
[
axisIdx
].
start
=
0
;
ranges
[
cAxis
].
start
=
0
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
ranges
[
axisIdx
].
end
=
ranges
[
axisIdx
].
start
+
outputs
[
i
].
size
[
axisIdx
];
ranges
[
cAxis
].
end
=
ranges
[
cAxis
].
start
+
outputs
[
i
].
size
[
cAxis
];
inpMat
(
&
ranges
[
0
]).
copyTo
(
outputs
[
i
]);
ranges
[
axisIdx
].
start
=
ranges
[
axisIdx
].
end
;
ranges
[
cAxis
].
start
=
ranges
[
cAxis
].
end
;
}
}
int
axisIdx
;
};
Ptr
<
SliceLayer
>
SliceLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/softmax_layer.cpp
View file @
4b1834ac
...
...
@@ -60,36 +60,34 @@ public:
setParamsFrom
(
params
);
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
==
1
);
const
Mat
&
inp0
=
*
inputs
[
0
];
int
dims
=
inp0
.
dims
;
axis
=
axisRaw
<
0
?
axisRaw
+
dims
:
axisRaw
;
outerSize
=
inp0
.
total
(
0
,
axis
);
channels
=
inp0
.
size
[
axis
];
innerSize
=
inp0
.
total
(
axis
+
1
);
std
::
vector
<
int
>
shape
(
inp0
.
size
.
p
,
inp0
.
size
.
p
+
dims
);
shape
[
axis
]
=
1
;
buf
.
create
(
shape
,
inp0
.
type
());
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
inp0
.
dims
,
inp0
.
size
.
p
,
inp0
.
type
());
bool
inplace
=
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
MatShape
shape
=
inputs
[
0
];
int
cAxis
=
clamp
(
axisRaw
,
shape
.
size
());
shape
[
cAxis
]
=
1
;
internals
.
assign
(
1
,
shape
);
return
inplace
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
const
Mat
&
src
=
*
inputs
[
0
];
Mat
&
dst
=
outputs
[
0
];
int
axis
=
clamp
(
axisRaw
,
src
.
dims
);
size_t
outerSize
=
src
.
total
(
0
,
axis
),
channels
=
src
.
size
[
axis
],
innerSize
=
src
.
total
(
axis
+
1
);
CV_Assert
(
src
.
type
()
==
CV_32F
);
CV_Assert
(
src
.
isContinuous
()
&&
dst
.
isContinuous
());
const
float
*
srcPtr
=
src
.
ptr
<
float
>
();
float
*
dstPtr
=
dst
.
ptr
<
float
>
();
float
*
bufPtr
=
buf
.
ptr
<
float
>
();
float
*
bufPtr
=
internals
[
0
]
.
ptr
<
float
>
();
size_t
outerStep
=
src
.
total
(
axis
);
size_t
cnStep
=
src
.
total
(
axis
+
1
);
...
...
@@ -148,9 +146,7 @@ public:
}
}
int
axis
,
axisRaw
;
Mat
buf
;
size_t
outerSize
,
channels
,
innerSize
;
int
axisRaw
;
};
Ptr
<
SoftmaxLayer
>
SoftmaxLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/split_layer.cpp
View file @
4b1834ac
...
...
@@ -65,19 +65,20 @@ public:
}
}
void
allocate
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
==
1
);
const
Mat
&
inp0
=
*
inputs
[
0
];
if
(
outputsCount
>=
0
)
outputs
.
resize
(
outputsCount
);
outputs
.
resize
(
outputsCount
>=
0
?
outputsCount
:
requiredOutputs
,
inputs
[
0
]
);
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
outputs
[
i
].
create
(
inp0
.
dims
,
inp0
.
size
.
p
,
inp0
.
type
());
return
false
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
...
...
modules/dnn/src/tensorflow/tf_importer.cpp
View file @
4b1834ac
...
...
@@ -51,7 +51,7 @@ struct Pin
int
blobIndex
;
};
void
blobShapeFromTensor
(
const
tensorflow
::
TensorProto
&
tensor
,
std
::
vector
<
int
>
&
shape
)
void
blobShapeFromTensor
(
const
tensorflow
::
TensorProto
&
tensor
,
MatShape
&
shape
)
{
shape
.
clear
();
if
(
tensor
.
has_tensor_shape
())
...
...
@@ -72,7 +72,7 @@ void blobShapeFromTensor(const tensorflow::TensorProto &tensor, std::vector<int>
template
<
typename
T
>
void
parseTensor
(
const
tensorflow
::
TensorProto
&
tensor
,
Mat
&
dstBlob
)
{
std
::
vector
<
int
>
shape
;
MatShape
shape
;
blobShapeFromTensor
(
tensor
,
shape
);
int
dims
=
(
int
)
shape
.
size
();
...
...
@@ -236,7 +236,7 @@ void setStrides(LayerParams &layerParams, const tensorflow::NodeDef &layer)
}
DictValue
parseDims
(
const
tensorflow
::
TensorProto
&
tensor
)
{
std
::
vector
<
int
>
shape
;
MatShape
shape
;
blobShapeFromTensor
(
tensor
,
shape
);
int
dims
=
(
int
)
shape
.
size
();
...
...
@@ -396,7 +396,7 @@ TFImporter::TFImporter(const char *model)
void
TFImporter
::
kernelFromTensor
(
const
tensorflow
::
TensorProto
&
tensor
,
Mat
&
dstBlob
)
{
std
::
vector
<
int
>
shape
;
MatShape
shape
;
blobShapeFromTensor
(
tensor
,
shape
);
int
dims
=
(
int
)
shape
.
size
();
...
...
modules/dnn/test/npy_blob.hpp
View file @
4b1834ac
...
...
@@ -60,75 +60,6 @@ inline void saveBlobToNPY(const Mat &blob, const String &path)
cnpy
::
npy_save
(
path
.
c_str
(),
blob
.
ptr
<
float
>
(),
(
unsigned
*
)
&
blob
.
size
.
p
[
0
],
blob
.
dims
);
}
inline
size_t
shapeTotal
(
const
std
::
vector
<
int
>&
shape
)
{
size_t
p
=
1
,
i
,
n
=
shape
.
size
();
for
(
i
=
0
;
i
<
n
;
i
++
)
p
*=
shape
[
i
];
return
p
;
}
inline
bool
shapeEqual
(
const
std
::
vector
<
int
>&
shape1
,
const
std
::
vector
<
int
>&
shape2
)
{
size_t
i
,
n1
=
shape1
.
size
(),
n2
=
shape2
.
size
();
if
(
n1
!=
n2
)
return
false
;
for
(
i
=
0
;
i
<
n1
;
i
++
)
if
(
shape1
[
i
]
!=
shape2
[
i
]
)
return
false
;
return
true
;
}
inline
std
::
vector
<
int
>
getShape
(
const
Mat
&
m
)
{
return
m
.
empty
()
?
std
::
vector
<
int
>
()
:
std
::
vector
<
int
>
(
&
m
.
size
.
p
[
0
],
&
m
.
size
.
p
[
0
]
+
m
.
dims
);
}
inline
std
::
vector
<
int
>
makeShape
(
int
a0
,
int
a1
=-
1
,
int
a2
=-
1
,
int
a3
=-
1
,
int
a4
=-
1
,
int
a5
=-
1
)
{
std
::
vector
<
int
>
s
;
s
.
push_back
(
a0
);
if
(
a1
>
0
)
{
s
.
push_back
(
a1
);
if
(
a2
>
0
)
{
s
.
push_back
(
a2
);
if
(
a3
>
0
)
{
s
.
push_back
(
a3
);
if
(
a4
>
0
)
{
s
.
push_back
(
a4
);
if
(
a5
>
0
)
s
.
push_back
(
a5
);
}
}
}
}
return
s
;
}
inline
std
::
vector
<
int
>
concatShape
(
const
std
::
vector
<
int
>&
a
,
const
std
::
vector
<
int
>&
b
)
{
size_t
na
=
a
.
size
(),
nb
=
b
.
size
();
std
::
vector
<
int
>
c
(
na
+
nb
);
std
::
copy
(
a
.
begin
(),
a
.
end
(),
c
.
begin
());
std
::
copy
(
b
.
begin
(),
b
.
end
(),
c
.
begin
()
+
na
);
return
c
;
}
inline
void
printShape
(
const
String
&
name
,
const
std
::
vector
<
int
>&
shape
)
{
printf
(
"%s: ["
,
name
.
c_str
());
size_t
i
,
n
=
shape
.
size
();
for
(
i
=
0
;
i
<
n
;
i
++
)
printf
(
" %d"
,
shape
[
i
]);
printf
(
" ]
\n
"
);
}
}
#endif
modules/dnn/test/test_layers.cpp
View file @
4b1834ac
...
...
@@ -43,6 +43,7 @@
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/all_layers.hpp>
#include <opencv2/ts/ocl_test.hpp>
...
...
@@ -67,16 +68,28 @@ void runLayer(Ptr<Layer> layer, std::vector<Mat> &inpBlobs, std::vector<Mat> &ou
size_t
i
,
ninputs
=
inpBlobs
.
size
();
std
::
vector
<
Mat
>
inp_
(
ninputs
);
std
::
vector
<
Mat
*>
inp
(
ninputs
);
std
::
vector
<
Mat
>
outp
;
std
::
vector
<
Mat
>
outp
,
intp
;
std
::
vector
<
MatShape
>
inputs
,
outputs
,
internals
;
for
(
i
=
0
;
i
<
ninputs
;
i
++
)
{
inp_
[
i
]
=
inpBlobs
[
i
].
clone
();
inp
[
i
]
=
&
inp_
[
i
];
inputs
.
push_back
(
shape
(
inp_
[
i
]));
}
layer
->
allocate
(
inp
,
outp
);
layer
->
forward
(
inp
,
outp
);
layer
->
getMemoryShapes
(
inputs
,
0
,
outputs
,
internals
);
for
(
int
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
outp
.
push_back
(
Mat
(
outputs
[
i
],
CV_32F
));
}
for
(
int
i
=
0
;
i
<
internals
.
size
();
i
++
)
{
intp
.
push_back
(
Mat
(
internals
[
i
],
CV_32F
));
}
layer
->
finalize
(
inp
,
outp
);
layer
->
forward
(
inp
,
outp
,
intp
);
size_t
noutputs
=
outp
.
size
();
outBlobs
.
resize
(
noutputs
);
...
...
@@ -165,18 +178,17 @@ TEST(Layer_Test_Reshape, squeeze)
int
sz
[]
=
{
4
,
3
,
1
,
2
};
Mat
inp
(
4
,
sz
,
CV_32F
);
std
::
vector
<
Mat
*>
inpVec
(
1
,
&
inp
);
std
::
vector
<
Mat
>
outVec
;
std
::
vector
<
Mat
>
inpVec
(
1
,
inp
);
std
::
vector
<
Mat
>
outVec
,
intVec
;
Ptr
<
Layer
>
rl
=
LayerFactory
::
createLayerInstance
(
"Reshape"
,
params
);
rl
->
allocate
(
inpVec
,
outVec
);
rl
->
forward
(
inpVec
,
outVec
);
runLayer
(
rl
,
inpVec
,
outVec
);
Mat
&
out
=
outVec
[
0
];
std
::
vector
<
int
>
shape
(
out
.
size
.
p
,
out
.
size
.
p
+
out
.
dims
);
MatShape
shape
(
out
.
size
.
p
,
out
.
size
.
p
+
out
.
dims
);
int
sh0
[]
=
{
4
,
3
,
2
};
std
::
vector
<
int
>
shape0
(
sh0
,
sh0
+
3
);
EXPECT_
TRUE
(
shapeEqual
(
shape
,
shape0
)
);
MatShape
shape0
(
sh0
,
sh0
+
3
);
EXPECT_
EQ
(
shape
,
shape0
);
}
TEST
(
Layer_Test_BatchNorm
,
Accuracy
)
...
...
@@ -253,10 +265,10 @@ public:
Layer_LSTM_Test
()
{}
void
init
(
const
std
::
vector
<
int
>
&
inpShape_
,
const
std
::
vector
<
int
>
&
outShape_
)
void
init
(
const
MatShape
&
inpShape_
,
const
MatShape
&
outShape_
)
{
numInp
=
(
int
)
shapeT
otal
(
inpShape_
);
numOut
=
(
int
)
shapeT
otal
(
outShape_
);
numInp
=
t
otal
(
inpShape_
);
numOut
=
t
otal
(
outShape_
);
Wh
=
Mat
::
ones
(
4
*
numOut
,
numOut
,
CV_32F
);
Wx
=
Mat
::
ones
(
4
*
numOut
,
numInp
,
CV_32F
);
...
...
@@ -271,10 +283,10 @@ public:
TEST_F
(
Layer_LSTM_Test
,
get_set_test
)
{
const
int
TN
=
4
;
std
::
vector
<
int
>
inpShape
=
makeS
hape
(
5
,
3
,
2
);
std
::
vector
<
int
>
outShape
=
makeS
hape
(
3
,
1
,
2
);
std
::
vector
<
int
>
inpResShape
=
concatShape
(
makeS
hape
(
TN
),
inpShape
);
std
::
vector
<
int
>
outResShape
=
concatShape
(
makeS
hape
(
TN
),
outShape
);
MatShape
inpShape
=
s
hape
(
5
,
3
,
2
);
MatShape
outShape
=
s
hape
(
3
,
1
,
2
);
MatShape
inpResShape
=
concat
(
s
hape
(
TN
),
inpShape
);
MatShape
outResShape
=
concat
(
s
hape
(
TN
),
outShape
);
init
(
inpShape
,
outShape
);
layer
->
setProduceCellOutput
(
true
);
...
...
@@ -285,8 +297,6 @@ TEST_F(Layer_LSTM_Test, get_set_test)
randu
(
C
,
-
1.
,
1.
);
Mat
H
=
C
.
clone
();
randu
(
H
,
-
1.
,
1.
);
layer
->
setC
(
C
);
layer
->
setH
(
H
);
Mat
inp
((
int
)
inpResShape
.
size
(),
&
inpResShape
[
0
],
CV_32F
);
randu
(
inp
,
-
1.
,
1.
);
...
...
@@ -296,17 +306,12 @@ TEST_F(Layer_LSTM_Test, get_set_test)
EXPECT_EQ
(
2u
,
outputs
.
size
());
printShape
(
"outResShape"
,
outResShape
);
printShape
(
"out0"
,
getShape
(
outputs
[
0
]));
printShape
(
"out1"
,
getShape
(
outputs
[
0
]));
printShape
(
"C"
,
getShape
(
layer
->
getC
()));
printShape
(
"H"
,
getShape
(
layer
->
getH
()));
EXPECT_TRUE
(
shapeEqual
(
outResShape
,
getShape
(
outputs
[
0
])));
EXPECT_TRUE
(
shapeEqual
(
outResShape
,
getShape
(
outputs
[
1
])));
print
(
outResShape
,
"outResShape"
);
print
(
shape
(
outputs
[
0
]),
"out0"
);
print
(
shape
(
outputs
[
0
]),
"out1"
);
EXPECT_
TRUE
(
shapeEqual
(
outResShape
,
getShape
(
layer
->
getC
())
));
EXPECT_
TRUE
(
shapeEqual
(
outResShape
,
getShape
(
layer
->
getH
())
));
EXPECT_
EQ
(
outResShape
,
shape
(
outputs
[
0
]
));
EXPECT_
EQ
(
outResShape
,
shape
(
outputs
[
1
]
));
EXPECT_EQ
(
0
,
layer
->
inputNameToIndex
(
"x"
));
EXPECT_EQ
(
0
,
layer
->
outputNameToIndex
(
"h"
));
...
...
@@ -387,8 +392,8 @@ TEST_F(Layer_RNN_Test, get_set_test)
runLayer
(
layer
,
inputs
,
outputs
);
EXPECT_EQ
(
outputs
.
size
(),
2u
);
EXPECT_
TRUE
(
shapeEqual
(
getShape
(
outputs
[
0
]),
makeShape
(
nT
,
nS
,
nO
)
));
EXPECT_
TRUE
(
shapeEqual
(
getShape
(
outputs
[
1
]),
makeShape
(
nT
,
nS
,
nH
)
));
EXPECT_
EQ
(
shape
(
outputs
[
0
]),
shape
(
nT
,
nS
,
nO
));
EXPECT_
EQ
(
shape
(
outputs
[
1
]),
shape
(
nT
,
nS
,
nH
));
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment