Commit a5d0ef52 authored by Aleksandr Rybnikov's avatar Aleksandr Rybnikov

Added statistics functions

parent 9b73fee2
...@@ -135,6 +135,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -135,6 +135,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
const int requiredOutputs, const int requiredOutputs,
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const; std::vector<MatShape> &internals) const;
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const {(void)inputs; (void)outputs; return 0;}
CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes. CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
CV_PROP String type; //!< Type name which was used for creating layer by layer factory. CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
...@@ -323,6 +325,50 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -323,6 +325,50 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
const int layerId, const int layerId,
std::vector<MatShape>* inLayerShapes, std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const; std::vector<MatShape>* outLayerShapes) const;
/** @brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @returns computed FLOP.
*/
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const MatShape& netInputShape) const;
/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP void getLayerTypes(std::vector<String>& layersTypes) const;
/** @brief Returns count of layers of specified type.
* @param layerType type.
* @returns count of layers
*/
CV_WRAP int getLayersCount(const String& layerType) const;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
private: private:
struct Impl; struct Impl;
......
...@@ -876,6 +876,144 @@ void Net::getLayerShapes(const Net::Impl::ShapesVec& netInputShapes, ...@@ -876,6 +876,144 @@ void Net::getLayerShapes(const Net::Impl::ShapesVec& netInputShapes,
*outLayerShapes = shapes.out; *outLayerShapes = shapes.out;
} }
int64 Net::getFLOPS(const std::vector<MatShape>& netInputShapes) const
{
int64 flops = 0;
std::vector<int> ids;
std::vector<std::vector<MatShape> > inShapes, outShapes;
getLayersShapes(netInputShapes, &ids, &inShapes, &outShapes);
CV_Assert(inShapes.size() == outShapes.size());
CV_Assert(inShapes.size() == ids.size());
for(int i = 0; i < ids.size(); i++)
{
flops += impl->layers[ids[i]].getLayerInstance()->getFLOPS(inShapes[i],
outShapes[i]);
}
return flops;
}
int64 Net::getFLOPS(const MatShape& netInputShape) const
{
return getFLOPS(std::vector<MatShape>(1, netInputShape));
}
int64 Net::getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const
{
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
CV_Assert(layer != impl->layers.end());
Impl::LayerShapes shapes;
impl->getLayerShapes(netInputShapes, layerId, shapes);
return layer->second.getLayerInstance()->getFLOPS(shapes.in, shapes.out);
}
int64 Net::getFLOPS(const int layerId,
const MatShape& netInputShape) const
{
return getFLOPS(layerId, std::vector<MatShape>(1, netInputShape));
}
void Net::getLayerTypes(std::vector<String>& layersTypes) const
{
layersTypes.clear();
std::map<String, int> layers;
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
it != impl->layers.end(); it++)
{
if (layers.find(it->second.type) == layers.end())
layers[it->second.type] = 0;
layers[it->second.type]++;
}
for (std::map<String, int>::iterator it = layers.begin();
it != layers.end(); it++)
{
layersTypes.push_back(it->first);
}
}
int Net::getLayersCount(const String& layerType) const
{
int count = 0;
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
it != impl->layers.end(); it++)
{
if (it->second.type == layerType)
count++;
}
return count;
}
void Net::getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const
{
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
CV_Assert(layer != impl->layers.end());
weights = blobs = 0;
for(int i = 0; i < layer->second.params.blobs.size(); i++)
{
const Mat& weightsBlob = layer->second.params.blobs[i];
weights += weightsBlob.total()*weightsBlob.elemSize();
}
std::vector<MatShape> outLayerShapes;
getLayerShapes(netInputShapes, layerId, 0, &outLayerShapes);
for(int i = 0; i < outLayerShapes.size(); i++)
{
blobs += total(outLayerShapes[i]) * sizeof(float);
}
}
void Net::getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const
{
std::vector<int> layerIds;
std::vector<std::vector<MatShape> > outLayerShapes;
getLayersShapes(netInputShapes, &layerIds, 0, &outLayerShapes);
weights = blobs = 0;
for(int i = 0; i < layerIds.size(); i++)
{
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerIds[i]);
CV_Assert(layer != impl->layers.end());
for(int j = 0; j < layer->second.params.blobs.size(); j++)
{
const Mat& weightsBlob = layer->second.params.blobs[j];
weights += weightsBlob.total()*weightsBlob.elemSize();
}
for(int j = 0; j < outLayerShapes[i].size(); j++)
{
blobs += total(outLayerShapes[i][j]) * sizeof(float);
}
}
}
void Net::getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
size_t& weights, size_t& blobs) const
{
getMemoryConsumption(layerId, std::vector<MatShape>(1, netInputShape),
weights, blobs);
}
void Net::getMemoryConsumption(const MatShape& netInputShape,
size_t& weights, size_t& blobs) const
{
getMemoryConsumption(std::vector<MatShape>(1, netInputShape),
weights, blobs);
}
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
Importer::~Importer() {} Importer::~Importer() {}
......
...@@ -10,6 +10,7 @@ Implementation of Batch Normalization layer. ...@@ -10,6 +10,7 @@ Implementation of Batch Normalization layer.
*/ */
#include "../precomp.hpp" #include "../precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv namespace cv
{ {
...@@ -78,6 +79,19 @@ public: ...@@ -78,6 +79,19 @@ public:
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
int64 flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 3*total(inputs[i]);
}
return flops;
}
bool hasWeights, hasBias; bool hasWeights, hasBias;
float epsilon; float epsilon;
}; };
......
...@@ -224,6 +224,20 @@ public: ...@@ -224,6 +224,20 @@ public:
dilation.height, dilation.width, outH, outW, dstRow.ptr<float>()); dilation.height, dilation.width, outH, outW, dstRow.ptr<float>());
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
CV_Assert(inputs.size() == outputs.size());
int64 flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += total(outputs[i])*(2*kernel.area()*inputs[i][1] + 1);
}
return flops;
}
}; };
class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
...@@ -339,6 +353,22 @@ public: ...@@ -339,6 +353,22 @@ public:
dilation.height, dilation.width, dstImg.ptr<float>(), &ofsbuf[0]); dilation.height, dilation.width, dstImg.ptr<float>(), &ofsbuf[0]);
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
CV_Assert(inputs.size() == outputs.size());
float flops = 0;
int outChannels = blobs[0].size[0];
for (int i = 0; i < inputs.size(); i++)
{
flops += 2*outChannels*kernel.area()*total(inputs[i]);
}
return flops;
}
std::vector<int> ofsbuf; std::vector<int> ofsbuf;
}; };
......
...@@ -63,6 +63,17 @@ public: ...@@ -63,6 +63,17 @@ public:
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
long flops = 0;
for (int i = 0; i < outputs.size(); i++)
{
flops += total(outputs[i]) * func.getFLOPSPerElement();
}
return flops;
}
Func func; Func func;
bool run_parallel; bool run_parallel;
}; };
...@@ -79,6 +90,8 @@ struct ReLUFunctor ...@@ -79,6 +90,8 @@ struct ReLUFunctor
{ {
return (x >= (TFloat)0) ? x : (TFloat)slope * x; return (x >= (TFloat)0) ? x : (TFloat)slope * x;
} }
int64 getFLOPSPerElement() const {return 1;}
}; };
struct TanHFunctor struct TanHFunctor
...@@ -90,6 +103,8 @@ struct TanHFunctor ...@@ -90,6 +103,8 @@ struct TanHFunctor
{ {
return tanh(x); return tanh(x);
} }
int64 getFLOPSPerElement() const {return 1;}
}; };
struct SigmoidFunctor struct SigmoidFunctor
...@@ -101,6 +116,8 @@ struct SigmoidFunctor ...@@ -101,6 +116,8 @@ struct SigmoidFunctor
{ {
return (TFloat)1 / ((TFloat)1 + exp(-x)); return (TFloat)1 / ((TFloat)1 + exp(-x));
} }
int64 getFLOPSPerElement() const {return 3;}
}; };
struct AbsValFunctor struct AbsValFunctor
...@@ -112,6 +129,8 @@ struct AbsValFunctor ...@@ -112,6 +129,8 @@ struct AbsValFunctor
{ {
return abs(x); return abs(x);
} }
int64 getFLOPSPerElement() const {return 1;}
}; };
struct BNLLFunctor struct BNLLFunctor
...@@ -123,6 +142,8 @@ struct BNLLFunctor ...@@ -123,6 +142,8 @@ struct BNLLFunctor
{ {
return log((TFloat)1 + exp(-abs(x))); return log((TFloat)1 + exp(-abs(x)));
} }
int64 getFLOPSPerElement() const {return 5;}
}; };
struct PowerFunctor struct PowerFunctor
...@@ -141,6 +162,8 @@ struct PowerFunctor ...@@ -141,6 +162,8 @@ struct PowerFunctor
{ {
return pow((TFloat)shift + (TFloat)scale * x, (TFloat)power); return pow((TFloat)shift + (TFloat)scale * x, (TFloat)power);
} }
int64 getFLOPSPerElement() const {return 3;}
}; };
struct PowerFunctor1 struct PowerFunctor1
...@@ -158,6 +181,8 @@ struct PowerFunctor1 ...@@ -158,6 +181,8 @@ struct PowerFunctor1
{ {
return (TFloat)shift + (TFloat)scale * x; return (TFloat)shift + (TFloat)scale * x;
} }
int64 getFLOPSPerElement() const {return 2;}
}; };
class ChannelsPReLULayerImpl : public ChannelsPReLULayer class ChannelsPReLULayerImpl : public ChannelsPReLULayer
...@@ -210,6 +235,20 @@ public: ...@@ -210,6 +235,20 @@ public:
} }
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)inputs; // suppress unused variable warning
long flops = 0;
for (int i = 0; i < outputs.size(); i++)
{
flops += total(outputs[i]) * 3;
}
return flops;
}
}; };
#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \ #define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \
......
...@@ -143,6 +143,17 @@ public: ...@@ -143,6 +143,17 @@ public:
break; break;
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size());
long flops = inputs.size() * total(inputs[0]);
return flops;
}
}; };
Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params) Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params)
......
...@@ -117,6 +117,22 @@ public: ...@@ -117,6 +117,22 @@ public:
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)inputs; // suppress unused variable warning
long flops = 0;
int innerSize = blobs[0].size[1];
for(int i = 0; i < outputs.size(); i++)
{
flops += 3*innerSize*total(outputs[i]);
}
return flops;
}
bool bias; bool bias;
}; };
......
...@@ -171,6 +171,35 @@ public: ...@@ -171,6 +171,35 @@ public:
} }
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size() > 0);
long flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
if (type == CHANNEL_NRM)
{
int channels = inputs[i][1];
int ksize = (size - 1) / 2;
flops += inputs[i][0]*(std::min(ksize, channels)*2*total(inputs[i], 2) + channels*4*total(inputs[i], 2));
if (ksize < channels)
{
flops += (size + 2*(channels - size))*total(inputs[i], 2);
}
}
else
{
flops += total(inputs[i])*(2*size*size + 2);
}
}
return flops;
}
}; };
Ptr<LRNLayer> LRNLayer::create(const LayerParams& params) Ptr<LRNLayer> LRNLayer::create(const LayerParams& params)
......
...@@ -85,6 +85,18 @@ public: ...@@ -85,6 +85,18 @@ public:
} }
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 6*total(inputs[i]) + 3*total(inputs[i], 0, normVariance ? 2 : 1);
}
return flops;
}
}; };
Ptr<MVNLayer> MVNLayer::create(const LayerParams& params) Ptr<MVNLayer> MVNLayer::create(const LayerParams& params)
......
...@@ -241,6 +241,27 @@ public: ...@@ -241,6 +241,27 @@ public:
return false; return false;
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)inputs; // suppress unused variable warning
long flops = 0;
for(int i = 0; i < outputs.size(); i++)
{
if (type == MAX)
{
if (i%2 == 0)
flops += total(outputs[i])*kernel.area();
}
else
{
flops += total(outputs[i])*(kernel.area() + 1);
}
}
return flops;
}
}; };
Ptr<PoolingLayer> PoolingLayer::create(const LayerParams& params) Ptr<PoolingLayer> PoolingLayer::create(const LayerParams& params)
......
...@@ -312,6 +312,20 @@ public: ...@@ -312,6 +312,20 @@ public:
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += total(inputs[i], 2) * _numPriors * 4;
}
return flops;
}
float _minSize; float _minSize;
float _maxSize; float _maxSize;
......
...@@ -56,6 +56,18 @@ public: ...@@ -56,6 +56,18 @@ public:
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 2*total(inputs[i]);
}
return flops;
}
bool hasBias; bool hasBias;
}; };
......
...@@ -81,6 +81,20 @@ public: ...@@ -81,6 +81,20 @@ public:
} }
} }
} }
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
long flops = 0;
for(int i= 0; i < inputs.size(); i++)
{
flops += total(inputs[i]);
}
return flops;
}
}; };
Ptr<ShiftLayer> ShiftLayer::create(const LayerParams& params) Ptr<ShiftLayer> ShiftLayer::create(const LayerParams& params)
......
...@@ -146,6 +146,20 @@ public: ...@@ -146,6 +146,20 @@ public:
} }
} }
int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
int64 flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += 4*total(inputs[i]);
}
return flops;
}
int axisRaw; int axisRaw;
}; };
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "npy_blob.hpp" #include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cvtest namespace cvtest
{ {
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "npy_blob.hpp" #include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cvtest namespace cvtest
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment