Commit ed150bd9 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #11461 from dkurt:dnn_reduce_mem_consumption

parents d9ddca04 c99c3e76
...@@ -250,16 +250,13 @@ public: ...@@ -250,16 +250,13 @@ public:
blobShapeFromProto(pbBlob, shape); blobShapeFromProto(pbBlob, shape);
dstBlob.create((int)shape.size(), &shape[0], CV_32F); dstBlob.create((int)shape.size(), &shape[0], CV_32F);
float *dstData = dstBlob.ptr<float>();
if (pbBlob.data_size()) if (pbBlob.data_size())
{ {
// Single precision floats. // Single precision floats.
CV_Assert(pbBlob.data_size() == (int)dstBlob.total()); CV_Assert(pbBlob.data_size() == (int)dstBlob.total());
CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT); CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
Mat(dstBlob.dims, &dstBlob.size[0], CV_32F, (void*)pbBlob.data().data()).copyTo(dstBlob);
for (int i = 0; i < pbBlob.data_size(); i++)
dstData[i] = pbBlob.data(i);
} }
else else
{ {
...@@ -288,11 +285,18 @@ public: ...@@ -288,11 +285,18 @@ public:
if (li == netBinary.layer_size() || netBinary.layer(li).blobs_size() == 0) if (li == netBinary.layer_size() || netBinary.layer(li).blobs_size() == 0)
return; return;
const caffe::LayerParameter &binLayer = netBinary.layer(li); caffe::LayerParameter* binLayer = netBinary.mutable_layer(li);
layerParams.blobs.resize(binLayer.blobs_size()); const int numBlobs = binLayer->blobs_size();
for (int bi = 0; bi < binLayer.blobs_size(); bi++) layerParams.blobs.resize(numBlobs);
for (int bi = 0; bi < numBlobs; bi++)
{
blobFromProto(binLayer->blobs(bi), layerParams.blobs[bi]);
}
binLayer->clear_blobs();
CV_Assert(numBlobs == binLayer->blobs().ClearedCount());
for (int bi = 0; bi < numBlobs; bi++)
{ {
blobFromProto(binLayer.blobs(bi), layerParams.blobs[bi]); delete binLayer->mutable_blobs()->ReleaseCleared();
} }
} }
......
...@@ -132,7 +132,7 @@ void UpgradeV0PaddingLayers(const NetParameter& param, ...@@ -132,7 +132,7 @@ void UpgradeV0PaddingLayers(const NetParameter& param,
NetParameter* param_upgraded_pad); NetParameter* param_upgraded_pad);
// Upgrade a single V0LayerConnection to the V1LayerParameter format. // Upgrade a single V0LayerConnection to the V1LayerParameter format.
bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, bool UpgradeV0LayerParameter(V1LayerParameter* v0_layer_connection,
V1LayerParameter* layer_param); V1LayerParameter* layer_param);
V1LayerParameter_LayerType UpgradeV0LayerType(const string& type); V1LayerParameter_LayerType UpgradeV0LayerType(const string& type);
...@@ -149,9 +149,9 @@ bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param); ...@@ -149,9 +149,9 @@ bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param);
// Perform all necessary transformations to upgrade a NetParameter with // Perform all necessary transformations to upgrade a NetParameter with
// deprecated V1LayerParameters. // deprecated V1LayerParameters.
bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param); bool UpgradeV1Net(NetParameter* net_param);
bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param,
LayerParameter* layer_param); LayerParameter* layer_param);
const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type); const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type);
...@@ -194,7 +194,7 @@ bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers, ...@@ -194,7 +194,7 @@ bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers,
net_param->set_name(v0_net_param.name()); net_param->set_name(v0_net_param.name());
} }
for (int i = 0; i < v0_net_param.layers_size(); ++i) { for (int i = 0; i < v0_net_param.layers_size(); ++i) {
is_fully_compatible &= UpgradeV0LayerParameter(v0_net_param.layers(i), is_fully_compatible &= UpgradeV0LayerParameter(v0_net_param.mutable_layers(i),
net_param->add_layers()); net_param->add_layers());
} }
for (int i = 0; i < v0_net_param.input_size(); ++i) { for (int i = 0; i < v0_net_param.input_size(); ++i) {
...@@ -268,8 +268,10 @@ void UpgradeV0PaddingLayers(const NetParameter& param, ...@@ -268,8 +268,10 @@ void UpgradeV0PaddingLayers(const NetParameter& param,
} }
} }
bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, bool UpgradeV0LayerParameter(V1LayerParameter* v0_layer_connection_,
V1LayerParameter* layer_param) { V1LayerParameter* layer_param) {
CV_Assert(v0_layer_connection_ != NULL);
const V1LayerParameter& v0_layer_connection = *v0_layer_connection_;
bool is_fully_compatible = true; bool is_fully_compatible = true;
layer_param->Clear(); layer_param->Clear();
for (int i = 0; i < v0_layer_connection.bottom_size(); ++i) { for (int i = 0; i < v0_layer_connection.bottom_size(); ++i) {
...@@ -287,9 +289,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, ...@@ -287,9 +289,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
if (v0_layer_param.has_type()) { if (v0_layer_param.has_type()) {
layer_param->set_type(UpgradeV0LayerType(type)); layer_param->set_type(UpgradeV0LayerType(type));
} }
for (int i = 0; i < v0_layer_param.blobs_size(); ++i) { layer_param->mutable_blobs()->Swap(v0_layer_connection_->mutable_blobs());
layer_param->add_blobs()->CopyFrom(v0_layer_param.blobs(i));
}
for (int i = 0; i < v0_layer_param.blobs_lr_size(); ++i) { for (int i = 0; i < v0_layer_param.blobs_lr_size(); ++i) {
layer_param->add_blobs_lr(v0_layer_param.blobs_lr(i)); layer_param->add_blobs_lr(v0_layer_param.blobs_lr(i));
} }
...@@ -770,8 +770,7 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { ...@@ -770,8 +770,7 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
if (NetNeedsV1ToV2Upgrade(*param)) { if (NetNeedsV1ToV2Upgrade(*param)) {
LOG(ERROR) << "Attempting to upgrade input file specified using deprecated " LOG(ERROR) << "Attempting to upgrade input file specified using deprecated "
<< "V1LayerParameter: " << param_file; << "V1LayerParameter: " << param_file;
NetParameter original_param(*param); if (!UpgradeV1Net(param)) {
if (!UpgradeV1Net(original_param, param)) {
success = false; success = false;
LOG(ERROR) << "Warning: had one or more problems upgrading " LOG(ERROR) << "Warning: had one or more problems upgrading "
<< "V1LayerParameter (see above); continuing anyway."; << "V1LayerParameter (see above); continuing anyway.";
...@@ -791,23 +790,24 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { ...@@ -791,23 +790,24 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
return success; return success;
} }
bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) { bool UpgradeV1Net(NetParameter* net_param) {
// V1LayerParameter layers -> LayerParameter layer
CV_Assert(net_param != NULL);
bool is_fully_compatible = true; bool is_fully_compatible = true;
if (v1_net_param.layer_size() > 0) { if (net_param->layer_size() > 0) {
LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' " LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' "
<< "fields; these will be ignored for the upgrade."; << "fields; these will be ignored for the upgrade.";
is_fully_compatible = false; is_fully_compatible = false;
} }
net_param->CopyFrom(v1_net_param);
net_param->clear_layers();
net_param->clear_layer(); net_param->clear_layer();
for (int i = 0; i < v1_net_param.layers_size(); ++i) { for (int i = 0; i < net_param->layers_size(); ++i) {
if (!UpgradeV1LayerParameter(v1_net_param.layers(i), if (!UpgradeV1LayerParameter(net_param->mutable_layers(i),
net_param->add_layer())) { net_param->add_layer())) {
LOG(ERROR) << "Upgrade of input layer " << i << " failed."; LOG(ERROR) << "Upgrade of input layer " << i << " failed.";
is_fully_compatible = false; is_fully_compatible = false;
} }
} }
net_param->clear_layers();
return is_fully_compatible; return is_fully_compatible;
} }
...@@ -834,8 +834,10 @@ void UpgradeNetBatchNorm(NetParameter* net_param) { ...@@ -834,8 +834,10 @@ void UpgradeNetBatchNorm(NetParameter* net_param) {
} }
} }
bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param_,
LayerParameter* layer_param) { LayerParameter* layer_param) {
CV_Assert(v1_layer_param_ != NULL);
const V1LayerParameter& v1_layer_param = *v1_layer_param_;
layer_param->Clear(); layer_param->Clear();
bool is_fully_compatible = true; bool is_fully_compatible = true;
for (int i = 0; i < v1_layer_param.bottom_size(); ++i) { for (int i = 0; i < v1_layer_param.bottom_size(); ++i) {
...@@ -856,9 +858,7 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, ...@@ -856,9 +858,7 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param,
if (v1_layer_param.has_type()) { if (v1_layer_param.has_type()) {
layer_param->set_type(UpgradeV1LayerType(v1_layer_param.type())); layer_param->set_type(UpgradeV1LayerType(v1_layer_param.type()));
} }
for (int i = 0; i < v1_layer_param.blobs_size(); ++i) { layer_param->mutable_blobs()->Swap(v1_layer_param_->mutable_blobs());
layer_param->add_blobs()->CopyFrom(v1_layer_param.blobs(i));
}
for (int i = 0; i < v1_layer_param.param_size(); ++i) { for (int i = 0; i < v1_layer_param.param_size(); ++i) {
while (layer_param->param_size() <= i) { layer_param->add_param(); } while (layer_param->param_size() <= i) { layer_param->add_param(); }
layer_param->mutable_param(i)->set_name(v1_layer_param.param(i)); layer_param->mutable_param(i)->set_name(v1_layer_param.param(i));
......
...@@ -169,7 +169,8 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl ...@@ -169,7 +169,8 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
{ {
public: public:
enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F }; enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
Mat weightsMat, weightsMat_doubles; Mat weightsMat;
std::vector<double> weightsMultipliers;
std::vector<float> biasvec; std::vector<float> biasvec;
std::vector<float> reluslope; std::vector<float> reluslope;
Ptr<ActivationLayer> activ; Ptr<ActivationLayer> activ;
...@@ -259,7 +260,7 @@ public: ...@@ -259,7 +260,7 @@ public:
wm = wm_aligned; wm = wm_aligned;
} }
weightsMat = wm; weightsMat = wm;
weightsMat.convertTo(weightsMat_doubles, CV_64F); weightsMultipliers.assign(outCn, 1.0);
Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat(); Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
biasvec.resize(outCn+2); biasvec.resize(outCn+2);
...@@ -335,13 +336,14 @@ public: ...@@ -335,13 +336,14 @@ public:
if (!w.empty()) if (!w.empty())
{ {
Mat originWeights = blobs[0].reshape(1, outCn);
for (int i = 0; i < outCn; ++i) for (int i = 0; i < outCn; ++i)
{ {
double wi = w.at<float>(i); double wi = w.at<float>(i);
cv::multiply(slice(weightsMat_doubles, i), wi, slice(weightsMat_doubles, i)); weightsMultipliers[i] *= wi;
cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
biasvec[i] *= wi; biasvec[i] *= wi;
} }
weightsMat_doubles.convertTo(weightsMat, weightsMat.type());
} }
if (!b.empty()) if (!b.empty())
......
...@@ -612,7 +612,7 @@ void RemoveIdentityOps(tensorflow::GraphDef& net) ...@@ -612,7 +612,7 @@ void RemoveIdentityOps(tensorflow::GraphDef& net)
Mat getTensorContent(const tensorflow::TensorProto &tensor) Mat getTensorContent(const tensorflow::TensorProto &tensor)
{ {
std::string content = tensor.tensor_content(); const std::string& content = tensor.tensor_content();
switch (tensor.dtype()) switch (tensor.dtype())
{ {
case tensorflow::DT_FLOAT: case tensorflow::DT_FLOAT:
...@@ -681,6 +681,14 @@ Mat getTensorContent(const tensorflow::TensorProto &tensor) ...@@ -681,6 +681,14 @@ Mat getTensorContent(const tensorflow::TensorProto &tensor)
return Mat(); return Mat();
} }
void releaseTensor(tensorflow::TensorProto* tensor)
{
if (!tensor->mutable_tensor_content()->empty())
{
delete tensor->release_tensor_content();
}
}
CV__DNN_EXPERIMENTAL_NS_END CV__DNN_EXPERIMENTAL_NS_END
}} // namespace dnn, namespace cv }} // namespace dnn, namespace cv
......
...@@ -23,6 +23,8 @@ void simplifySubgraphs(tensorflow::GraphDef& net); ...@@ -23,6 +23,8 @@ void simplifySubgraphs(tensorflow::GraphDef& net);
Mat getTensorContent(const tensorflow::TensorProto &tensor); Mat getTensorContent(const tensorflow::TensorProto &tensor);
void releaseTensor(tensorflow::TensorProto* tensor);
CV__DNN_EXPERIMENTAL_NS_END CV__DNN_EXPERIMENTAL_NS_END
}} // namespace dnn, namespace cv }} // namespace dnn, namespace cv
......
...@@ -677,7 +677,9 @@ void TFImporter::populateNet(Net dstNet) ...@@ -677,7 +677,9 @@ void TFImporter::populateNet(Net dstNet)
layers_to_ignore.insert(next_layers[0].first); layers_to_ignore.insert(next_layers[0].first);
} }
kernelFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]); const tensorflow::TensorProto& kernelTensor = getConstBlob(layer, value_id);
kernelFromTensor(kernelTensor, layerParams.blobs[0]);
releaseTensor(const_cast<tensorflow::TensorProto*>(&kernelTensor));
int* kshape = layerParams.blobs[0].size.p; int* kshape = layerParams.blobs[0].size.p;
if (type == "DepthwiseConv2dNative") if (type == "DepthwiseConv2dNative")
{ {
...@@ -788,7 +790,9 @@ void TFImporter::populateNet(Net dstNet) ...@@ -788,7 +790,9 @@ void TFImporter::populateNet(Net dstNet)
} }
int kernel_blob_index = -1; int kernel_blob_index = -1;
blobFromTensor(getConstBlob(layer, value_id, -1, &kernel_blob_index), layerParams.blobs[0]); const tensorflow::TensorProto& kernelTensor = getConstBlob(layer, value_id, -1, &kernel_blob_index);
blobFromTensor(kernelTensor, layerParams.blobs[0]);
releaseTensor(const_cast<tensorflow::TensorProto*>(&kernelTensor));
if (kernel_blob_index == 1) { // In this case output is computed by x*W formula - W should be transposed if (kernel_blob_index == 1) { // In this case output is computed by x*W formula - W should be transposed
Mat data = layerParams.blobs[0].t(); Mat data = layerParams.blobs[0].t();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment