Commit 09b73b2d authored by Aleksandr Rybnikov's avatar Aleksandr Rybnikov Committed by Vadim Pisarevsky

Blobs reuse improvement (#1205)

* Reuse deep learning output blobs

* Changed order for iterating through blobs while seeking memory. Refactored a little.
parent 1c8809ff
...@@ -369,6 +369,21 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -369,6 +369,21 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
CV_WRAP void getMemoryConsumption(const int layerId, CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape, const MatShape& netInputShape,
size_t& weights, size_t& blobs) const; size_t& weights, size_t& blobs) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for each layer.
* @param netInputShapes vector of shapes for all net inputs.
* @param layerIds output vector to save layer IDs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
std::vector<int>& layerIds, std::vector<size_t>& weights,
std::vector<size_t>& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
std::vector<int>& layerIds, std::vector<size_t>& weights,
std::vector<size_t>& blobs) const;
private: private:
struct Impl; struct Impl;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
typedef dnn::DictValue LayerId; typedef dnn::DictValue LayerId;
typedef std::vector<dnn::MatShape> vector_MatShape; typedef std::vector<dnn::MatShape> vector_MatShape;
typedef std::vector<std::vector<dnn::MatShape> > vector_vector_MatShape; typedef std::vector<std::vector<dnn::MatShape> > vector_vector_MatShape;
typedef std::vector<size_t> vector_size_t;
template<> template<>
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name) bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
......
This diff is collapsed.
...@@ -30,6 +30,15 @@ public: ...@@ -30,6 +30,15 @@ public:
epsilon = params.get<float>("eps", 1E-5); epsilon = params.get<float>("eps", 1E-5);
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{ {
CV_Assert(blobs.size() >= 2); CV_Assert(blobs.size() >= 2);
......
...@@ -61,7 +61,12 @@ public: ...@@ -61,7 +61,12 @@ public:
return true; return true;
} }
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) {} void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (int i = 0, n = outputs.size(); i < n; ++i)
if (outputs[i].data != inputs[i]->data)
inputs[i]->copyTo(outputs[i]);
}
}; };
Ptr<BlankLayer> BlankLayer::create(const LayerParams& params) Ptr<BlankLayer> BlankLayer::create(const LayerParams& params)
......
...@@ -20,17 +20,17 @@ public: ...@@ -20,17 +20,17 @@ public:
class PBody : public cv::ParallelLoopBody class PBody : public cv::ParallelLoopBody
{ {
Func &func; Func &func;
Dtype *data; Dtype *src, *dst;
public: public:
PBody(Mat &mat, Func &func_) : PBody(Mat &src, Mat &dst, Func &func_) :
func(func_), data(mat.ptr<Dtype>()) func(func_), src(src.ptr<Dtype>()), dst(dst.ptr<Dtype>())
{} {}
void operator()(const Range &r) const void operator()(const Range &r) const
{ {
for (int i = r.start; i < r.end; i++) for (int i = r.start; i < r.end; i++)
data[i] = func(data[i]); dst[i] = func(src[i]);
} }
}; };
...@@ -49,13 +49,13 @@ public: ...@@ -49,13 +49,13 @@ public:
{ {
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
const Mat &src = *inputs[i]; Mat &src = *inputs[i];
Mat &dst = outputs[i]; Mat &dst = outputs[i];
CV_Assert(src.ptr() == dst.ptr() && src.isContinuous()); CV_Assert(src.isContinuous() && dst.isContinuous());
Range sizeRange = Range(0, dst.total()); Range sizeRange = Range(0, dst.total());
CV_Assert(src.type() == CV_32F); CV_Assert(src.type() == CV_32F);
PBody<float> body(dst, func); PBody<float> body(src, dst, func);
if( run_parallel ) if( run_parallel )
cv::parallel_for_(sizeRange, body); cv::parallel_for_(sizeRange, body);
else else
......
...@@ -178,7 +178,7 @@ public: ...@@ -178,7 +178,7 @@ public:
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
Mat srcBlob = *inputs[i]; Mat srcBlob = *inputs[i];
MatShape inputShape = shape(srcBlob); MatShape inputShape = shape(srcBlob), outShape = shape(outputs[i]);
if (performReordering) if (performReordering)
{ {
...@@ -204,6 +204,11 @@ public: ...@@ -204,6 +204,11 @@ public:
} }
internals[i].copyTo(outputs[i]); internals[i].copyTo(outputs[i]);
} }
else
{
if (outputs[i].data != srcBlob.data)
srcBlob.reshape(1, outShape).copyTo(outputs[i]);
}
} }
} }
......
...@@ -27,6 +27,15 @@ public: ...@@ -27,6 +27,15 @@ public:
hasBias = params.get<bool>("bias_term", false); hasBias = params.get<bool>("bias_term", false);
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{ {
CV_Assert(blobs.size() == 1 + hasBias); CV_Assert(blobs.size() == 1 + hasBias);
......
...@@ -72,17 +72,17 @@ public: ...@@ -72,17 +72,17 @@ public:
{ {
CV_Assert(inputs.size() == 1); CV_Assert(inputs.size() == 1);
outputs.resize(outputsCount >= 0 ? outputsCount : requiredOutputs, Layer::getMemoryShapes(inputs, outputsCount >= 0 ? outputsCount : requiredOutputs,
inputs[0]); outputs, internals);
return true;
return false;
} }
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{ {
for (size_t i = 0; i < outputs.size(); i++) for (size_t i = 0; i < outputs.size(); i++)
{ {
inputs[0]->copyTo(outputs[i]); if (outputs[i].data != inputs[0]->data)
inputs[0]->copyTo(outputs[i]);
} }
} }
}; };
......
...@@ -121,6 +121,10 @@ TEST(Reproducibility_FCN, Accuracy) ...@@ -121,6 +121,10 @@ TEST(Reproducibility_FCN, Accuracy)
if (sample.size() != inputSize) if (sample.size() != inputSize)
resize(sample, sample, inputSize); resize(sample, sample, inputSize);
std::vector<int> layerIds;
std::vector<size_t> weights, blobs;
net.getMemoryConsumption(shape(1,3,227,227), layerIds, weights, blobs);
net.setBlob(".data", blobFromImage(sample, 1.)); net.setBlob(".data", blobFromImage(sample, 1.));
net.forward(); net.forward();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment