Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
ed150bd9
Commit
ed150bd9
authored
May 11, 2018
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #11461 from dkurt:dnn_reduce_mem_consumption
parents
d9ddca04
c99c3e76
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
55 additions
and
35 deletions
+55
-35
caffe_importer.cpp
modules/dnn/src/caffe/caffe_importer.cpp
+12
-8
caffe_io.cpp
modules/dnn/src/caffe/caffe_io.cpp
+20
-20
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+6
-4
tf_graph_simplifier.cpp
modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
+9
-1
tf_graph_simplifier.hpp
modules/dnn/src/tensorflow/tf_graph_simplifier.hpp
+2
-0
tf_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
+6
-2
No files found.
modules/dnn/src/caffe/caffe_importer.cpp
View file @
ed150bd9
...
...
@@ -250,16 +250,13 @@ public:
blobShapeFromProto
(
pbBlob
,
shape
);
dstBlob
.
create
((
int
)
shape
.
size
(),
&
shape
[
0
],
CV_32F
);
float
*
dstData
=
dstBlob
.
ptr
<
float
>
();
if
(
pbBlob
.
data_size
())
{
// Single precision floats.
CV_Assert
(
pbBlob
.
data_size
()
==
(
int
)
dstBlob
.
total
());
CV_DbgAssert
(
pbBlob
.
GetDescriptor
()
->
FindFieldByLowercaseName
(
"data"
)
->
cpp_type
()
==
FieldDescriptor
::
CPPTYPE_FLOAT
);
for
(
int
i
=
0
;
i
<
pbBlob
.
data_size
();
i
++
)
dstData
[
i
]
=
pbBlob
.
data
(
i
);
Mat
(
dstBlob
.
dims
,
&
dstBlob
.
size
[
0
],
CV_32F
,
(
void
*
)
pbBlob
.
data
().
data
()).
copyTo
(
dstBlob
);
}
else
{
...
...
@@ -288,11 +285,18 @@ public:
if
(
li
==
netBinary
.
layer_size
()
||
netBinary
.
layer
(
li
).
blobs_size
()
==
0
)
return
;
const
caffe
::
LayerParameter
&
binLayer
=
netBinary
.
layer
(
li
);
layerParams
.
blobs
.
resize
(
binLayer
.
blobs_size
());
for
(
int
bi
=
0
;
bi
<
binLayer
.
blobs_size
();
bi
++
)
caffe
::
LayerParameter
*
binLayer
=
netBinary
.
mutable_layer
(
li
);
const
int
numBlobs
=
binLayer
->
blobs_size
();
layerParams
.
blobs
.
resize
(
numBlobs
);
for
(
int
bi
=
0
;
bi
<
numBlobs
;
bi
++
)
{
blobFromProto
(
binLayer
->
blobs
(
bi
),
layerParams
.
blobs
[
bi
]);
}
binLayer
->
clear_blobs
();
CV_Assert
(
numBlobs
==
binLayer
->
blobs
().
ClearedCount
());
for
(
int
bi
=
0
;
bi
<
numBlobs
;
bi
++
)
{
blobFromProto
(
binLayer
.
blobs
(
bi
),
layerParams
.
blobs
[
bi
]
);
delete
binLayer
->
mutable_blobs
()
->
ReleaseCleared
(
);
}
}
...
...
modules/dnn/src/caffe/caffe_io.cpp
View file @
ed150bd9
...
...
@@ -132,7 +132,7 @@ void UpgradeV0PaddingLayers(const NetParameter& param,
NetParameter
*
param_upgraded_pad
);
// Upgrade a single V0LayerConnection to the V1LayerParameter format.
bool
UpgradeV0LayerParameter
(
const
V1LayerParameter
&
v0_layer_connection
,
bool
UpgradeV0LayerParameter
(
V1LayerParameter
*
v0_layer_connection
,
V1LayerParameter
*
layer_param
);
V1LayerParameter_LayerType
UpgradeV0LayerType
(
const
string
&
type
);
...
...
@@ -149,9 +149,9 @@ bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param);
// Perform all necessary transformations to upgrade a NetParameter with
// deprecated V1LayerParameters.
bool
UpgradeV1Net
(
const
NetParameter
&
v1_net_param
,
NetParameter
*
net_param
);
bool
UpgradeV1Net
(
NetParameter
*
net_param
);
bool
UpgradeV1LayerParameter
(
const
V1LayerParameter
&
v1_layer_param
,
bool
UpgradeV1LayerParameter
(
V1LayerParameter
*
v1_layer_param
,
LayerParameter
*
layer_param
);
const
char
*
UpgradeV1LayerType
(
const
V1LayerParameter_LayerType
type
);
...
...
@@ -194,7 +194,7 @@ bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers,
net_param
->
set_name
(
v0_net_param
.
name
());
}
for
(
int
i
=
0
;
i
<
v0_net_param
.
layers_size
();
++
i
)
{
is_fully_compatible
&=
UpgradeV0LayerParameter
(
v0_net_param
.
layers
(
i
),
is_fully_compatible
&=
UpgradeV0LayerParameter
(
v0_net_param
.
mutable_
layers
(
i
),
net_param
->
add_layers
());
}
for
(
int
i
=
0
;
i
<
v0_net_param
.
input_size
();
++
i
)
{
...
...
@@ -268,8 +268,10 @@ void UpgradeV0PaddingLayers(const NetParameter& param,
}
}
bool
UpgradeV0LayerParameter
(
const
V1LayerParameter
&
v0_layer_connection
,
bool
UpgradeV0LayerParameter
(
V1LayerParameter
*
v0_layer_connection_
,
V1LayerParameter
*
layer_param
)
{
CV_Assert
(
v0_layer_connection_
!=
NULL
);
const
V1LayerParameter
&
v0_layer_connection
=
*
v0_layer_connection_
;
bool
is_fully_compatible
=
true
;
layer_param
->
Clear
();
for
(
int
i
=
0
;
i
<
v0_layer_connection
.
bottom_size
();
++
i
)
{
...
...
@@ -287,9 +289,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
if
(
v0_layer_param
.
has_type
())
{
layer_param
->
set_type
(
UpgradeV0LayerType
(
type
));
}
for
(
int
i
=
0
;
i
<
v0_layer_param
.
blobs_size
();
++
i
)
{
layer_param
->
add_blobs
()
->
CopyFrom
(
v0_layer_param
.
blobs
(
i
));
}
layer_param
->
mutable_blobs
()
->
Swap
(
v0_layer_connection_
->
mutable_blobs
());
for
(
int
i
=
0
;
i
<
v0_layer_param
.
blobs_lr_size
();
++
i
)
{
layer_param
->
add_blobs_lr
(
v0_layer_param
.
blobs_lr
(
i
));
}
...
...
@@ -770,8 +770,7 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
if
(
NetNeedsV1ToV2Upgrade
(
*
param
))
{
LOG
(
ERROR
)
<<
"Attempting to upgrade input file specified using deprecated "
<<
"V1LayerParameter: "
<<
param_file
;
NetParameter
original_param
(
*
param
);
if
(
!
UpgradeV1Net
(
original_param
,
param
))
{
if
(
!
UpgradeV1Net
(
param
))
{
success
=
false
;
LOG
(
ERROR
)
<<
"Warning: had one or more problems upgrading "
<<
"V1LayerParameter (see above); continuing anyway."
;
...
...
@@ -791,23 +790,24 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
return
success
;
}
bool
UpgradeV1Net
(
const
NetParameter
&
v1_net_param
,
NetParameter
*
net_param
)
{
bool
UpgradeV1Net
(
NetParameter
*
net_param
)
{
// V1LayerParameter layers -> LayerParameter layer
CV_Assert
(
net_param
!=
NULL
);
bool
is_fully_compatible
=
true
;
if
(
v1_net_param
.
layer_size
()
>
0
)
{
if
(
net_param
->
layer_size
()
>
0
)
{
LOG
(
ERROR
)
<<
"Input NetParameter to be upgraded already specifies 'layer' "
<<
"fields; these will be ignored for the upgrade."
;
is_fully_compatible
=
false
;
}
net_param
->
CopyFrom
(
v1_net_param
);
net_param
->
clear_layers
();
net_param
->
clear_layer
();
for
(
int
i
=
0
;
i
<
v1_net_param
.
layers_size
();
++
i
)
{
if
(
!
UpgradeV1LayerParameter
(
v1_net_param
.
layers
(
i
),
for
(
int
i
=
0
;
i
<
net_param
->
layers_size
();
++
i
)
{
if
(
!
UpgradeV1LayerParameter
(
net_param
->
mutable_
layers
(
i
),
net_param
->
add_layer
()))
{
LOG
(
ERROR
)
<<
"Upgrade of input layer "
<<
i
<<
" failed."
;
is_fully_compatible
=
false
;
}
}
net_param
->
clear_layers
();
return
is_fully_compatible
;
}
...
...
@@ -834,8 +834,10 @@ void UpgradeNetBatchNorm(NetParameter* net_param) {
}
}
bool
UpgradeV1LayerParameter
(
const
V1LayerParameter
&
v1_layer_param
,
bool
UpgradeV1LayerParameter
(
V1LayerParameter
*
v1_layer_param_
,
LayerParameter
*
layer_param
)
{
CV_Assert
(
v1_layer_param_
!=
NULL
);
const
V1LayerParameter
&
v1_layer_param
=
*
v1_layer_param_
;
layer_param
->
Clear
();
bool
is_fully_compatible
=
true
;
for
(
int
i
=
0
;
i
<
v1_layer_param
.
bottom_size
();
++
i
)
{
...
...
@@ -856,9 +858,7 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param,
if
(
v1_layer_param
.
has_type
())
{
layer_param
->
set_type
(
UpgradeV1LayerType
(
v1_layer_param
.
type
()));
}
for
(
int
i
=
0
;
i
<
v1_layer_param
.
blobs_size
();
++
i
)
{
layer_param
->
add_blobs
()
->
CopyFrom
(
v1_layer_param
.
blobs
(
i
));
}
layer_param
->
mutable_blobs
()
->
Swap
(
v1_layer_param_
->
mutable_blobs
());
for
(
int
i
=
0
;
i
<
v1_layer_param
.
param_size
();
++
i
)
{
while
(
layer_param
->
param_size
()
<=
i
)
{
layer_param
->
add_param
();
}
layer_param
->
mutable_param
(
i
)
->
set_name
(
v1_layer_param
.
param
(
i
));
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
ed150bd9
...
...
@@ -169,7 +169,8 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
{
public
:
enum
{
VEC_ALIGN
=
8
,
DFT_TYPE
=
CV_32F
};
Mat
weightsMat
,
weightsMat_doubles
;
Mat
weightsMat
;
std
::
vector
<
double
>
weightsMultipliers
;
std
::
vector
<
float
>
biasvec
;
std
::
vector
<
float
>
reluslope
;
Ptr
<
ActivationLayer
>
activ
;
...
...
@@ -259,7 +260,7 @@ public:
wm
=
wm_aligned
;
}
weightsMat
=
wm
;
weightsM
at
.
convertTo
(
weightsMat_doubles
,
CV_64F
);
weightsM
ultipliers
.
assign
(
outCn
,
1.0
);
Mat
biasMat
=
hasBias
()
?
blobs
[
1
].
reshape
(
1
,
outCn
)
:
Mat
();
biasvec
.
resize
(
outCn
+
2
);
...
...
@@ -335,13 +336,14 @@ public:
if
(
!
w
.
empty
())
{
Mat
originWeights
=
blobs
[
0
].
reshape
(
1
,
outCn
);
for
(
int
i
=
0
;
i
<
outCn
;
++
i
)
{
double
wi
=
w
.
at
<
float
>
(
i
);
cv
::
multiply
(
slice
(
weightsMat_doubles
,
i
),
wi
,
slice
(
weightsMat_doubles
,
i
));
weightsMultipliers
[
i
]
*=
wi
;
cv
::
multiply
(
originWeights
.
row
(
i
),
weightsMultipliers
[
i
],
weightsMat
.
row
(
i
));
biasvec
[
i
]
*=
wi
;
}
weightsMat_doubles
.
convertTo
(
weightsMat
,
weightsMat
.
type
());
}
if
(
!
b
.
empty
())
...
...
modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
View file @
ed150bd9
...
...
@@ -612,7 +612,7 @@ void RemoveIdentityOps(tensorflow::GraphDef& net)
Mat
getTensorContent
(
const
tensorflow
::
TensorProto
&
tensor
)
{
std
::
string
content
=
tensor
.
tensor_content
();
const
std
::
string
&
content
=
tensor
.
tensor_content
();
switch
(
tensor
.
dtype
())
{
case
tensorflow
:
:
DT_FLOAT
:
...
...
@@ -681,6 +681,14 @@ Mat getTensorContent(const tensorflow::TensorProto &tensor)
return
Mat
();
}
void
releaseTensor
(
tensorflow
::
TensorProto
*
tensor
)
{
if
(
!
tensor
->
mutable_tensor_content
()
->
empty
())
{
delete
tensor
->
release_tensor_content
();
}
}
CV__DNN_EXPERIMENTAL_NS_END
}}
// namespace dnn, namespace cv
...
...
modules/dnn/src/tensorflow/tf_graph_simplifier.hpp
View file @
ed150bd9
...
...
@@ -23,6 +23,8 @@ void simplifySubgraphs(tensorflow::GraphDef& net);
Mat
getTensorContent
(
const
tensorflow
::
TensorProto
&
tensor
);
void
releaseTensor
(
tensorflow
::
TensorProto
*
tensor
);
CV__DNN_EXPERIMENTAL_NS_END
}}
// namespace dnn, namespace cv
...
...
modules/dnn/src/tensorflow/tf_importer.cpp
View file @
ed150bd9
...
...
@@ -677,7 +677,9 @@ void TFImporter::populateNet(Net dstNet)
layers_to_ignore
.
insert
(
next_layers
[
0
].
first
);
}
kernelFromTensor
(
getConstBlob
(
layer
,
value_id
),
layerParams
.
blobs
[
0
]);
const
tensorflow
::
TensorProto
&
kernelTensor
=
getConstBlob
(
layer
,
value_id
);
kernelFromTensor
(
kernelTensor
,
layerParams
.
blobs
[
0
]);
releaseTensor
(
const_cast
<
tensorflow
::
TensorProto
*>
(
&
kernelTensor
));
int
*
kshape
=
layerParams
.
blobs
[
0
].
size
.
p
;
if
(
type
==
"DepthwiseConv2dNative"
)
{
...
...
@@ -788,7 +790,9 @@ void TFImporter::populateNet(Net dstNet)
}
int
kernel_blob_index
=
-
1
;
blobFromTensor
(
getConstBlob
(
layer
,
value_id
,
-
1
,
&
kernel_blob_index
),
layerParams
.
blobs
[
0
]);
const
tensorflow
::
TensorProto
&
kernelTensor
=
getConstBlob
(
layer
,
value_id
,
-
1
,
&
kernel_blob_index
);
blobFromTensor
(
kernelTensor
,
layerParams
.
blobs
[
0
]);
releaseTensor
(
const_cast
<
tensorflow
::
TensorProto
*>
(
&
kernelTensor
));
if
(
kernel_blob_index
==
1
)
{
// In this case output is computed by x*W formula - W should be transposed
Mat
data
=
layerParams
.
blobs
[
0
].
t
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment