Commit 589286bb authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Adding of pyhon bindings for dnn module

parent a62f7e1d
...@@ -77,7 +77,7 @@ namespace dnn ...@@ -77,7 +77,7 @@ namespace dnn
{ {
public: public:
/** Creates instance of LSTM layer */ /** Creates instance of LSTM layer */
static Ptr<LSTMLayer> create(); static CV_WRAP Ptr<LSTMLayer> create();
/** Set trained weights for LSTM layer. /** Set trained weights for LSTM layer.
LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights. LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights.
...@@ -109,27 +109,27 @@ namespace dnn ...@@ -109,27 +109,27 @@ namespace dnn
@param Wx is matrix defining how current input is transformed to internal gates (i.e. according to abovemtioned notation is @f$ W_x @f$) @param Wx is matrix defining how current input is transformed to internal gates (i.e. according to abovemtioned notation is @f$ W_x @f$)
@param b is bias vector (i.e. according to abovemtioned notation is @f$ b @f$) @param b is bias vector (i.e. according to abovemtioned notation is @f$ b @f$)
*/ */
virtual void setWeights(const Blob &Wh, const Blob &Wx, const Blob &b) = 0; CV_WRAP virtual void setWeights(const Blob &Wh, const Blob &Wx, const Blob &b) = 0;
/** @brief Specifies shape of output blob which will be [[`T`], `N`] + @p outTailShape. /** @brief Specifies shape of output blob which will be [[`T`], `N`] + @p outTailShape.
* @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used, * @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used,
* where `Wh` is parameter from setWeights(). * where `Wh` is parameter from setWeights().
*/ */
virtual void setOutShape(const BlobShape &outTailShape = BlobShape::empty()) = 0; CV_WRAP virtual void setOutShape(const BlobShape &outTailShape = BlobShape::empty()) = 0;
/** @brief Set @f$ h_{t-1} @f$ value that will be used in next forward() calls. /** @brief Set @f$ h_{t-1} @f$ value that will be used in next forward() calls.
* @details By-default @f$ h_{t-1} @f$ is inited by zeros and updated after each forward() call. * @details By-default @f$ h_{t-1} @f$ is inited by zeros and updated after each forward() call.
*/ */
virtual void setH(const Blob &H) = 0; CV_WRAP virtual void setH(const Blob &H) = 0;
/** @brief Returns current @f$ h_{t-1} @f$ value (deep copy). */ /** @brief Returns current @f$ h_{t-1} @f$ value (deep copy). */
virtual Blob getH() const = 0; CV_WRAP virtual Blob getH() const = 0;
/** @brief Set @f$ c_{t-1} @f$ value that will be used in next forward() calls. /** @brief Set @f$ c_{t-1} @f$ value that will be used in next forward() calls.
* @details By-default @f$ c_{t-1} @f$ is inited by zeros and updated after each forward() call. * @details By-default @f$ c_{t-1} @f$ is inited by zeros and updated after each forward() call.
*/ */
virtual void setC(const Blob &C) = 0; CV_WRAP virtual void setC(const Blob &C) = 0;
/** @brief Returns current @f$ c_{t-1} @f$ value (deep copy). */ /** @brief Returns current @f$ c_{t-1} @f$ value (deep copy). */
virtual Blob getC() const = 0; CV_WRAP virtual Blob getC() const = 0;
/** @brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample. /** @brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample.
* *
...@@ -139,12 +139,12 @@ namespace dnn ...@@ -139,12 +139,12 @@ namespace dnn
* If flag is set to false then shape of input blob will be interpeted as [`N`, `[data dims]`]. * If flag is set to false then shape of input blob will be interpeted as [`N`, `[data dims]`].
* In this case each forward() call will make one iteration and produce one timestamp with shape [`N`, `[out dims]`]. * In this case each forward() call will make one iteration and produce one timestamp with shape [`N`, `[out dims]`].
*/ */
virtual void setUseTimstampsDim(bool use = true) = 0; CV_WRAP virtual void setUseTimstampsDim(bool use = true) = 0;
/** @brief If this flag is set to true then layer will produce @f$ c_t @f$ as second output. /** @brief If this flag is set to true then layer will produce @f$ c_t @f$ as second output.
* @details Shape of the second output is the same as first output. * @details Shape of the second output is the same as first output.
*/ */
virtual void setProduceCellOutput(bool produce = false) = 0; CV_WRAP virtual void setProduceCellOutput(bool produce = false) = 0;
/** In common case it use single input with @f$x_t@f$ values to compute output(s) @f$h_t@f$ (and @f$c_t@f$). /** In common case it use single input with @f$x_t@f$ values to compute output(s) @f$h_t@f$ (and @f$c_t@f$).
* @param input should contain packed values @f$x_t@f$ * @param input should contain packed values @f$x_t@f$
...@@ -168,7 +168,7 @@ namespace dnn ...@@ -168,7 +168,7 @@ namespace dnn
{ {
public: public:
/** Creates instance of RNNLayer */ /** Creates instance of RNNLayer */
static Ptr<RNNLayer> create(); static CV_WRAP Ptr<RNNLayer> create();
/** Setups learned weights. /** Setups learned weights.
...@@ -184,12 +184,12 @@ namespace dnn ...@@ -184,12 +184,12 @@ namespace dnn
@param Who is @f$ W_{xo} @f$ matrix @param Who is @f$ W_{xo} @f$ matrix
@param bo is @f$ b_{o} @f$ vector @param bo is @f$ b_{o} @f$ vector
*/ */
virtual void setWeights(const Blob &Wxh, const Blob &bh, const Blob &Whh, const Blob &Who, const Blob &bo) = 0; CV_WRAP virtual void setWeights(const Blob &Wxh, const Blob &bh, const Blob &Whh, const Blob &Who, const Blob &bo) = 0;
/** @brief If this flag is set to true then layer will produce @f$ h_t @f$ as second output. /** @brief If this flag is set to true then layer will produce @f$ h_t @f$ as second output.
* @details Shape of the second output is the same as first output. * @details Shape of the second output is the same as first output.
*/ */
virtual void setProduceHiddenOutput(bool produce = false) = 0; CV_WRAP virtual void setProduceHiddenOutput(bool produce = false) = 0;
/** Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$. /** Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
...@@ -209,21 +209,21 @@ namespace dnn ...@@ -209,21 +209,21 @@ namespace dnn
{ {
public: public:
Size kernel, stride, pad; CV_PROP_RW Size kernel, stride, pad;
}; };
class CV_EXPORTS_W ConvolutionLayer : public BaseConvolutionLayer class CV_EXPORTS_W ConvolutionLayer : public BaseConvolutionLayer
{ {
public: public:
static Ptr<BaseConvolutionLayer> create(Size kernel = Size(3, 3), Size stride = Size(1, 1), Size pad = Size(0, 0)); static CV_WRAP Ptr<BaseConvolutionLayer> create(Size kernel = Size(3, 3), Size stride = Size(1, 1), Size pad = Size(0, 0));
}; };
class CV_EXPORTS_W DeconvolutionLayer : public BaseConvolutionLayer class CV_EXPORTS_W DeconvolutionLayer : public BaseConvolutionLayer
{ {
public: public:
static Ptr<BaseConvolutionLayer> create(Size kernel = Size(3, 3), Size stride = Size(1, 1), Size pad = Size(0, 0)); static CV_WRAP Ptr<BaseConvolutionLayer> create(Size kernel = Size(3, 3), Size stride = Size(1, 1), Size pad = Size(0, 0));
}; };
class CV_EXPORTS_W LRNLayer : public Layer class CV_EXPORTS_W LRNLayer : public Layer
...@@ -235,12 +235,12 @@ namespace dnn ...@@ -235,12 +235,12 @@ namespace dnn
CHANNEL_NRM, CHANNEL_NRM,
SPATIAL_NRM SPATIAL_NRM
}; };
int type; CV_PROP_RW int type;
int size; CV_PROP_RW int size;
double alpha, beta; CV_PROP_RW double alpha, beta;
static Ptr<LRNLayer> create(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75); static CV_WRAP Ptr<LRNLayer> create(int type = LRNLayer::CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
}; };
class CV_EXPORTS_W PoolingLayer : public Layer class CV_EXPORTS_W PoolingLayer : public Layer
...@@ -254,34 +254,34 @@ namespace dnn ...@@ -254,34 +254,34 @@ namespace dnn
STOCHASTIC STOCHASTIC
}; };
int type; CV_PROP_RW int type;
Size kernel, stride, pad; CV_PROP_RW Size kernel, stride, pad;
static Ptr<PoolingLayer> create(int type = MAX, Size kernel = Size(2, 2), Size stride = Size(1, 1), Size pad = Size(0, 0)); static CV_WRAP Ptr<PoolingLayer> create(int type = PoolingLayer::MAX, Size kernel = Size(2, 2), Size stride = Size(1, 1), Size pad = Size(0, 0));
}; };
class CV_EXPORTS_W SoftmaxLayer : public Layer class CV_EXPORTS_W SoftmaxLayer : public Layer
{ {
public: public:
static Ptr<SoftmaxLayer> create(int axis = 1); static CV_WRAP Ptr<SoftmaxLayer> create(int axis = 1);
}; };
class CV_EXPORTS_W InnerProductLayer : public Layer class CV_EXPORTS_W InnerProductLayer : public Layer
{ {
public: public:
int axis; CV_PROP_RW int axis;
static Ptr<InnerProductLayer> create(int axis = 1); static CV_WRAP Ptr<InnerProductLayer> create(int axis = 1);
}; };
class CV_EXPORTS_W MVNLayer : public Layer class CV_EXPORTS_W MVNLayer : public Layer
{ {
public: public:
double eps; CV_PROP_RW double eps;
bool normVariance, acrossChannels; CV_PROP_RW bool normVariance, acrossChannels;
static Ptr<MVNLayer> create(bool normVariance = true, bool acrossChannels = false, double eps = 1e-9); static CV_WRAP Ptr<MVNLayer> create(bool normVariance = true, bool acrossChannels = false, double eps = 1e-9);
}; };
/* Reshaping */ /* Reshaping */
...@@ -289,10 +289,10 @@ namespace dnn ...@@ -289,10 +289,10 @@ namespace dnn
class CV_EXPORTS_W ReshapeLayer : public Layer class CV_EXPORTS_W ReshapeLayer : public Layer
{ {
public: public:
BlobShape newShapeDesc; CV_PROP_RW BlobShape newShapeDesc;
Range newShapeRange; CV_PROP_RW Range newShapeRange;
static Ptr<ReshapeLayer> create(const BlobShape &newShape, Range applyingRange = Range::all()); static CV_WRAP Ptr<ReshapeLayer> create(const BlobShape &newShape, Range applyingRange = Range::all());
}; };
class CV_EXPORTS_W ConcatLayer : public Layer class CV_EXPORTS_W ConcatLayer : public Layer
...@@ -300,7 +300,7 @@ namespace dnn ...@@ -300,7 +300,7 @@ namespace dnn
public: public:
int axis; int axis;
static Ptr<ConcatLayer> create(int axis = 1); static CV_WRAP Ptr<ConcatLayer> create(int axis = 1);
}; };
class CV_EXPORTS_W SplitLayer : public Layer class CV_EXPORTS_W SplitLayer : public Layer
...@@ -308,17 +308,17 @@ namespace dnn ...@@ -308,17 +308,17 @@ namespace dnn
public: public:
int outputsCount; //!< Number of copies that will be produced (is ignored when negative). int outputsCount; //!< Number of copies that will be produced (is ignored when negative).
static Ptr<SplitLayer> create(int outputsCount = -1); static CV_WRAP Ptr<SplitLayer> create(int outputsCount = -1);
}; };
class CV_EXPORTS_W SliceLayer : public Layer class CV_EXPORTS_W SliceLayer : public Layer
{ {
public: public:
int axis; CV_PROP_RW int axis;
std::vector<int> sliceIndices; CV_PROP std::vector<int> sliceIndices;
static Ptr<SliceLayer> create(int axis); static CV_WRAP Ptr<SliceLayer> create(int axis);
static Ptr<SliceLayer> create(int axis, const std::vector<int> &sliceIndices); static CV_WRAP Ptr<SliceLayer> create(int axis, const std::vector<int> &sliceIndices);
}; };
/* Activations */ /* Activations */
...@@ -326,41 +326,41 @@ namespace dnn ...@@ -326,41 +326,41 @@ namespace dnn
class CV_EXPORTS_W ReLULayer : public Layer class CV_EXPORTS_W ReLULayer : public Layer
{ {
public: public:
double negativeSlope; CV_PROP_RW double negativeSlope;
static Ptr<ReLULayer> create(double negativeSlope = 0); static CV_WRAP Ptr<ReLULayer> create(double negativeSlope = 0);
}; };
class CV_EXPORTS_W TanHLayer : public Layer class CV_EXPORTS_W TanHLayer : public Layer
{ {
public: public:
static Ptr<TanHLayer> create(); static CV_WRAP Ptr<TanHLayer> create();
}; };
class CV_EXPORTS_W SigmoidLayer : public Layer class CV_EXPORTS_W SigmoidLayer : public Layer
{ {
public: public:
static Ptr<SigmoidLayer> create(); static CV_WRAP Ptr<SigmoidLayer> create();
}; };
class CV_EXPORTS_W BNLLLayer : public Layer class CV_EXPORTS_W BNLLLayer : public Layer
{ {
public: public:
static Ptr<BNLLLayer> create(); static CV_WRAP Ptr<BNLLLayer> create();
}; };
class CV_EXPORTS_W AbsLayer : public Layer class CV_EXPORTS_W AbsLayer : public Layer
{ {
public: public:
static Ptr<AbsLayer> create(); static CV_WRAP Ptr<AbsLayer> create();
}; };
class CV_EXPORTS_W PowerLayer : public Layer class CV_EXPORTS_W PowerLayer : public Layer
{ {
public: public:
double power, scale, shift; CV_PROP_RW double power, scale, shift;
static Ptr<PowerLayer> create(double power = 1, double scale = 1, double shift = 0); static CV_WRAP Ptr<PowerLayer> create(double power = 1, double scale = 1, double shift = 0);
}; };
//! @} //! @}
......
...@@ -54,7 +54,7 @@ namespace dnn ...@@ -54,7 +54,7 @@ namespace dnn
//! @{ //! @{
/** @brief Lightweight class for storing and processing a shape of blob (or anything else). */ /** @brief Lightweight class for storing and processing a shape of blob (or anything else). */
struct BlobShape struct CV_EXPORTS_W BlobShape
{ {
BlobShape(); //!< Creates [1, 1, 1, 1] shape @todo Make more clearer behavior. BlobShape(); //!< Creates [1, 1, 1, 1] shape @todo Make more clearer behavior.
explicit BlobShape(int s0); //!< Creates 1-dim shape [@p s0] explicit BlobShape(int s0); //!< Creates 1-dim shape [@p s0]
...@@ -154,7 +154,7 @@ namespace dnn ...@@ -154,7 +154,7 @@ namespace dnn
/** @brief Constructs Blob from existing Mat or UMat. */ /** @brief Constructs Blob from existing Mat or UMat. */
Blob(InputArray data); Blob(InputArray data);
/** @brief Constucts 4-dimensional blob (so-called batch) from image or array of images. /** @brief Constructs 4-dimensional blob (so-called batch) from image or array of images.
* @param image 2-dimensional multi-channel or 3-dimensional single-channel image (or array of such images) * @param image 2-dimensional multi-channel or 3-dimensional single-channel image (or array of such images)
* @param dstCn specifies size of second axis of ouptut blob * @param dstCn specifies size of second axis of ouptut blob
*/ */
...@@ -312,17 +312,17 @@ namespace dnn ...@@ -312,17 +312,17 @@ namespace dnn
public: public:
enum DataState enum DataState
{ {
UNINITIALIZED, UNINITIALIZED = 0,
HEAD_AT_MAT, HEAD_AT_MAT = 1 << 0,
HEAD_AT_UMAT, HEAD_AT_UMAT = 1 << 1,
SYNCED SYNCED = HEAD_AT_MAT | HEAD_AT_UMAT
}; };
enum AllocFlag enum AllocFlag
{ {
ALLOC_MAT = 1, ALLOC_MAT = HEAD_AT_MAT,
ALLOC_UMAT = 2, ALLOC_UMAT = HEAD_AT_UMAT,
ALLOC_BOTH = 3 ALLOC_BOTH = SYNCED
}; };
}; };
......
...@@ -62,7 +62,8 @@ struct DictValue ...@@ -62,7 +62,8 @@ struct DictValue
DictValue(int p = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar DictValue(int p = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
DictValue(const String &p) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = p; } //!< Constructs string scalar DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overlaod
template<typename TypeIter> template<typename TypeIter>
static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array
......
...@@ -86,7 +86,7 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -86,7 +86,7 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
public: public:
//! List of learned parameters must be stored here to allow read them by using Net::getParam(). //! List of learned parameters must be stored here to allow read them by using Net::getParam().
std::vector<Blob> blobs; CV_PROP_RW std::vector<Blob> blobs;
/** @brief Allocates internal buffers and output blobs with respect to the shape of inputs. /** @brief Allocates internal buffers and output blobs with respect to the shape of inputs.
* @param[in] input vector of already allocated input blobs * @param[in] input vector of already allocated input blobs
...@@ -104,6 +104,18 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -104,6 +104,18 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/ */
virtual void forward(std::vector<Blob*> &input, std::vector<Blob> &output) = 0; virtual void forward(std::vector<Blob*> &input, std::vector<Blob> &output) = 0;
/** @brief @overload */
CV_WRAP void allocate(const std::vector<Blob> &inputs, CV_OUT std::vector<Blob> &outputs);
/** @brief @overload */
CV_WRAP std::vector<Blob> allocate(const std::vector<Blob> &inputs);
/** @brief @overload */
CV_WRAP void forward(const std::vector<Blob> &inputs, CV_IN_OUT std::vector<Blob> &outputs);
/** @brief Allocates layer and computes output. */
CV_WRAP void run(const std::vector<Blob> &inputs, CV_OUT std::vector<Blob> &outputs);
/** @brief Returns index of input blob into the input array. /** @brief Returns index of input blob into the input array.
* @param inputName label of input blob * @param inputName label of input blob
* *
...@@ -116,8 +128,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -116,8 +128,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/ */
virtual int outputNameToIndex(String outputName); virtual int outputNameToIndex(String outputName);
String name; //!< Name of the layer instance, can be used for logging or other internal purposes. CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
String type; //!< Type name which was used for creating layer by layer factory. CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
Layer(); Layer();
explicit Layer(const LayerParams &params); //!< Initializes only #name, #type and #blobs fields. explicit Layer(const LayerParams &params); //!< Initializes only #name, #type and #blobs fields.
...@@ -135,12 +147,15 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -135,12 +147,15 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
* *
* This class supports reference counting of its instances, i. e. copies point to the same instance. * This class supports reference counting of its instances, i. e. copies point to the same instance.
*/ */
class CV_EXPORTS_W Net class CV_EXPORTS_W_SIMPLE Net
{ {
public: public:
Net(); //!< Default constructor. CV_WRAP Net(); //!< Default constructor.
~Net(); //!< Destructor frees the net only if there aren't references to the net anymore. CV_WRAP ~Net(); //!< Destructor frees the net only if there aren't references to the net anymore.
/** Returns true if there are no layers in the network. */
CV_WRAP bool empty() const;
/** @brief Adds new layer to the net. /** @brief Adds new layer to the net.
* @param name unique name of the adding layer. * @param name unique name of the adding layer.
...@@ -157,13 +172,18 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -157,13 +172,18 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
/** @brief Converts string name of the layer to the integer identifier. /** @brief Converts string name of the layer to the integer identifier.
* @returns id of the layer, or -1 if the layer wasn't found. * @returns id of the layer, or -1 if the layer wasn't found.
*/ */
int getLayerId(const String &layer); CV_WRAP int getLayerId(const String &layer);
CV_WRAP std::vector<String> getLayerNames() const;
/** @brief Container for strings and integers. */ /** @brief Container for strings and integers. */
typedef DictValue LayerId; typedef DictValue LayerId;
/** @brief Returns pointer to layer with specified name which the network use. */
CV_WRAP Ptr<Layer> getLayer(LayerId layerId);
/** @brief Delete layer for the network (not implemented yet) */ /** @brief Delete layer for the network (not implemented yet) */
void deleteLayer(LayerId layer); CV_WRAP void deleteLayer(LayerId layer);
/** @brief Connects output of the first layer to input of the second layer. /** @brief Connects output of the first layer to input of the second layer.
* @param outPin descriptor of the first layer output. * @param outPin descriptor of the first layer output.
...@@ -178,7 +198,7 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -178,7 +198,7 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
* *
* @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex() * @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
*/ */
void connect(String outPin, String inpPin); CV_WRAP void connect(String outPin, String inpPin);
/** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer. /** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer.
* @param outLayerId identifier of the first layer * @param outLayerId identifier of the first layer
...@@ -188,19 +208,22 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -188,19 +208,22 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/ */
void connect(int outLayerId, int outNum, int inpLayerId, int inpNum); void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);
/** @brief Sets ouputs names of the network input pseudo layer. /** @brief Sets outputs names of the network input pseudo layer.
* *
* Each net always has special own the network input pseudo layer with id=0. * Each net always has special own the network input pseudo layer with id=0.
* This layer stores the user blobs only and don't make any computations. * This layer stores the user blobs only and don't make any computations.
* In fact, this layer provides the only way to pass user data into the network. * In fact, this layer provides the only way to pass user data into the network.
* As any other layer, this layer can label its outputs and this function provides an easy way to do this. * As any other layer, this layer can label its outputs and this function provides an easy way to do this.
*/ */
void setNetInputs(const std::vector<String> &inputBlobNames); CV_WRAP void setNetInputs(const std::vector<String> &inputBlobNames);
/** @brief Initializes and allocates all layers. */
CV_WRAP void allocate();
/** @brief Runs forward pass for the whole network */ /** @brief Runs forward pass to compute output of layer @p toLayer.
void forward(); * @detail By default runs forward pass for the whole network.
/** @brief Runs forward pass to compute output of layer @p toLayer */ */
void forward(LayerId toLayer); CV_WRAP void forward(LayerId toLayer = String());
/** @brief Runs forward pass to compute output of layer @p toLayer, but computations start from @p startLayer */ /** @brief Runs forward pass to compute output of layer @p toLayer, but computations start from @p startLayer */
void forward(LayerId startLayer, LayerId toLayer); void forward(LayerId startLayer, LayerId toLayer);
/** @overload */ /** @overload */
...@@ -222,12 +245,13 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -222,12 +245,13 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
* @note If updating blob is not empty then @p blob must have the same shape, * @note If updating blob is not empty then @p blob must have the same shape,
* because network reshaping is not implemented yet. * because network reshaping is not implemented yet.
*/ */
void setBlob(String outputName, const Blob &blob); CV_WRAP void setBlob(String outputName, const Blob &blob);
/** @brief Returns the layer output blob. /** @brief Returns the layer output blob.
* @param outputName the descriptor of the returning layer output blob. * @param outputName the descriptor of the returning layer output blob.
* @see connect(String, String) * @see connect(String, String)
*/ */
Blob getBlob(String outputName); CV_WRAP Blob getBlob(String outputName);
/** @brief Sets the new value for the learned param of the layer. /** @brief Sets the new value for the learned param of the layer.
* @param layer name or id of the layer. * @param layer name or id of the layer.
...@@ -237,13 +261,14 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -237,13 +261,14 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
* @note If shape of the new blob differs from the previous shape, * @note If shape of the new blob differs from the previous shape,
* then the following forward pass may fail. * then the following forward pass may fail.
*/ */
void setParam(LayerId layer, int numParam, const Blob &blob); CV_WRAP void setParam(LayerId layer, int numParam, const Blob &blob);
/** @brief Returns parameter blob of the layer. /** @brief Returns parameter blob of the layer.
* @param layer name or id of the layer. * @param layer name or id of the layer.
* @param numParam index of the layer parameter in the Layer::blobs array. * @param numParam index of the layer parameter in the Layer::blobs array.
* @see Layer::blobs * @see Layer::blobs
*/ */
Blob getParam(LayerId layer, int numParam = 0); CV_WRAP Blob getParam(LayerId layer, int numParam = 0);
private: private:
...@@ -252,12 +277,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -252,12 +277,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
}; };
/** @brief Small interface class for loading trained serialized models of different dnn-frameworks. */ /** @brief Small interface class for loading trained serialized models of different dnn-frameworks. */
class Importer class CV_EXPORTS_W Importer
{ {
public: public:
/** @brief Adds loaded layers into the @p net and sets connetions between them. */ /** @brief Adds loaded layers into the @p net and sets connections between them. */
virtual void populateNet(Net net) = 0; CV_WRAP virtual void populateNet(Net net) = 0;
virtual ~Importer(); virtual ~Importer();
}; };
...@@ -267,7 +292,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -267,7 +292,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
* @param caffeModel path to the .caffemodel file with learned network. * @param caffeModel path to the .caffemodel file with learned network.
* @returns Pointer to the created importer, NULL in failure cases. * @returns Pointer to the created importer, NULL in failure cases.
*/ */
CV_EXPORTS Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel = String()); CV_EXPORTS_W Ptr<Importer> createCaffeImporter(const String &prototxt, const String &caffeModel = String());
/** @brief Reads a network model stored in Caffe model files.
* @detail This is shortcut consisting from createCaffeImporter and Net::populateNet calls.
*/
CV_EXPORTS_W Net readNetFromCaffe(const String &prototxt, const String &caffeModel = String());
/** @brief Creates the importer of <a href="http://torch.ch">Torch7</a> framework network. /** @brief Creates the importer of <a href="http://torch.ch">Torch7</a> framework network.
* @param filename path to the file, dumped from Torch by using torch.save() function. * @param filename path to the file, dumped from Torch by using torch.save() function.
...@@ -294,12 +324,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity. ...@@ -294,12 +324,12 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
* *
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
*/ */
CV_EXPORTS Ptr<Importer> createTorchImporter(const String &filename, bool isBinary = true); CV_EXPORTS_W Ptr<Importer> createTorchImporter(const String &filename, bool isBinary = true);
/** @brief Loads blob which was serialized as torch.Tensor object of Torch7 framework. /** @brief Loads blob which was serialized as torch.Tensor object of Torch7 framework.
* @warning This function has the same limitations as createTorchImporter(). * @warning This function has the same limitations as createTorchImporter().
*/ */
CV_EXPORTS Blob readTorchBlob(const String &filename, bool isBinary = true); CV_EXPORTS_W Blob readTorchBlob(const String &filename, bool isBinary = true);
//! @} //! @}
} }
......
from __future__ import print_function
import numpy as np
import cv2
from cv2 import dnn
import timeit
def prepare_image(img):
img = cv2.resize(img, (224, 224))
#convert interleaved image (RGBRGB) to planar(RRGGBB)
blob = np.moveaxis(img, 2, 0)
blob = np.reshape(blob.astype(np.float32), (-1, 3, 224, 224))
return blob
def timeit_forward(net):
print("OpenCL:", cv2.ocl.useOpenCL())
print("Runtime:", timeit.timeit(lambda: net.forward(), number=10))
def get_class_list():
with open('synset_words.txt', 'rt') as f:
return [ x[x.find(" ") + 1 :] for x in f ]
blob = prepare_image(cv2.imread('space_shuttle.jpg'))
print("Input:", blob.shape, blob.dtype)
cv2.ocl.setUseOpenCL(True) #Disable OCL if you want
net = dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel')
net.setBlob(".data", blob)
net.forward()
timeit_forward(net) #Uncomment to check performance
prob = net.getBlob("prob")
print("Output:", prob.shape, prob.dtype)
classes = get_class_list()
print("Best match", classes[prob.argmax()])
\ No newline at end of file
#ifdef HAVE_OPENCV_DNN
typedef dnn::DictValue LayerId;
typedef std::vector<cv::dnn::Blob> vector_Blob;
template<>
bool pyopencv_to(PyObject *o, dnn::Blob &blob, const char *name);
template<> struct pyopencvVecConverter<dnn::Blob>
{
static bool to(PyObject* obj, std::vector<dnn::Blob>& value, const ArgInfo info)
{
if (PyArray_Check(obj))
{
value.resize(1);
return pyopencv_to(obj, value[0], info.name);
}
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<dnn::Blob>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<>
bool pyopencv_to(PyObject *o, std::vector<dnn::Blob> &blobs, const char *name) //required for Layer::blobs RW
{
return pyopencvVecConverter<dnn::Blob>::to(o, blobs, ArgInfo(name, false));
}
template<>
bool pyopencv_to(PyObject *o, dnn::Blob &blob, const char *name)
{
Mat &dst = blob.matRef();
if (!pyopencv_to(o, dst, name))
return false;
if (PyArray_Check(o)) //try fix channels
{
PyArrayObject* oarr = (PyArrayObject*) o;
if (PyArray_NDIM(oarr) == dst.dims)
return true;
int ndims = PyArray_NDIM(oarr);
std::vector<int> shape(ndims);
const npy_intp* _sizes = PyArray_DIMS(oarr);
for (int i = 0; i < ndims; i++)
shape[i] = (int)_sizes[i];
dst = dst.reshape(1, ndims, &shape[0]);
}
return true;
}
template<>
PyObject *pyopencv_from(const dnn::Blob &blob)
{
return pyopencv_from(blob.matRefConst());
}
template<>
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
{
(void)name;
if (!o || o == Py_None)
return true; //Current state will be used
else if (PyLong_Check(o))
{
dv = dnn::DictValue(PyLong_AsLong(o));
return true;
}
else if (PyFloat_Check(o))
{
dv = dnn::DictValue(PyFloat_AS_DOUBLE(o));
return true;
}
else if (PyString_Check(o))
{
dv = dnn::DictValue(String(PyString_AsString(o)));
return true;
}
else
return false;
}
template<>
bool pyopencv_to(PyObject *o, dnn::BlobShape &shape, const char *name)
{
std::vector<int> data;
if (!pyopencv_to_generic_vec(o, data, ArgInfo(name, false)))
return false;
shape = data.size() ? dnn::BlobShape((int)data.size(), &data[0]) : dnn::BlobShape::empty();
return true;
}
template<>
PyObject *pyopencv_from(const dnn::BlobShape &shape)
{
std::vector<int> data(shape.ptr(), shape.ptr() + shape.dims());
return pyopencv_from_generic_vec(data);
}
#endif
\ No newline at end of file
...@@ -63,16 +63,15 @@ Blob::Blob(InputArray data) ...@@ -63,16 +63,15 @@ Blob::Blob(InputArray data)
#ifndef CV_DNN_UMAT #ifndef CV_DNN_UMAT
m = data.getMat(); m = data.getMat();
#else #else
CV_Assert(data.isMat() || data.isUMat()); if (data.isUMat())
if (data.isMat())
{ {
m = data.getMat(); um = data.getUMat();
state = HEAD_AT_MAT; state = HEAD_AT_UMAT;
} }
else else
{ {
um = data.getUMat(); m = data.getMat();
state = HEAD_AT_UMAT; state = HEAD_AT_MAT;
} }
#endif #endif
} }
......
...@@ -353,3 +353,12 @@ Ptr<Importer> cv::dnn::createCaffeImporter(const String&, const String&) ...@@ -353,3 +353,12 @@ Ptr<Importer> cv::dnn::createCaffeImporter(const String&, const String&)
} }
#endif //HAVE_PROTOBUF #endif //HAVE_PROTOBUF
Net cv::dnn::readNetFromCaffe(const String &prototxt, const String &caffeModel /*= String()*/)
{
Ptr<Importer> caffeImporter = createCaffeImporter(prototxt, caffeModel);
Net net;
if (caffeImporter)
caffeImporter->populateNet(net);
return net;
}
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include <iterator>
using namespace cv; using namespace cv;
using namespace cv::dnn; using namespace cv::dnn;
...@@ -127,7 +128,7 @@ struct LayerData ...@@ -127,7 +128,7 @@ struct LayerData
}; };
//fake layer containing network input blobs //fake layer containing network input blobs
struct NetInputLayer : public Layer struct DataLayer : public Layer
{ {
void allocate(const std::vector<Blob*>&, std::vector<Blob>&) {} void allocate(const std::vector<Blob*>&, std::vector<Blob>&) {}
void forward(std::vector<Blob*>&, std::vector<Blob>&) {} void forward(std::vector<Blob*>&, std::vector<Blob>&) {}
...@@ -152,7 +153,7 @@ struct Net::Impl ...@@ -152,7 +153,7 @@ struct Net::Impl
Impl() Impl()
{ {
//allocate fake net input layer //allocate fake net input layer
netInputLayer = Ptr<NetInputLayer>(new NetInputLayer()); netInputLayer = Ptr<DataLayer>(new DataLayer());
LayerData &inpl = layers.insert( make_pair(0, LayerData()) ).first->second; LayerData &inpl = layers.insert( make_pair(0, LayerData()) ).first->second;
inpl.id = 0; inpl.id = 0;
inpl.name = "_input"; inpl.name = "_input";
...@@ -163,7 +164,7 @@ struct Net::Impl ...@@ -163,7 +164,7 @@ struct Net::Impl
netWasAllocated = false; netWasAllocated = false;
} }
Ptr<NetInputLayer> netInputLayer; Ptr<DataLayer> netInputLayer;
std::vector<int> netOutputs; std::vector<int> netOutputs;
typedef std::map<int, LayerData> MapIdToLayerData; typedef std::map<int, LayerData> MapIdToLayerData;
...@@ -328,11 +329,16 @@ struct Net::Impl ...@@ -328,11 +329,16 @@ struct Net::Impl
netOutputs.push_back(lid); netOutputs.push_back(lid);
} }
#ifndef NDEBUG
std::cout << "\nNet Outputs(" << netOutputs.size() << "):\n"; std::cout << "\nNet Outputs(" << netOutputs.size() << "):\n";
for (size_t i = 0; i < netOutputs.size(); i++) for (size_t i = 0; i < netOutputs.size(); i++)
std::cout << layers[netOutputs[i]].name << std::endl; std::cout << layers[netOutputs[i]].name << "\n";
#endif
} }
#define CV_RETHROW_ERROR(err, newmsg)\
cv::error(err.code, newmsg, err.func.c_str(), err.file.c_str(), err.line)
void allocateLayer(int lid) void allocateLayer(int lid)
{ {
LayerData &ld = layers[lid]; LayerData &ld = layers[lid];
...@@ -361,7 +367,15 @@ struct Net::Impl ...@@ -361,7 +367,15 @@ struct Net::Impl
//allocate layer //allocate layer
ld.outputBlobs.resize(std::max((size_t)1, ld.requiredOutputs.size())); //layer produce at least one output blob ld.outputBlobs.resize(std::max((size_t)1, ld.requiredOutputs.size())); //layer produce at least one output blob
ld.getLayerInstance()->allocate(ld.inputBlobs, ld.outputBlobs); Ptr<Layer> layerPtr = ld.getLayerInstance();
try
{
layerPtr->allocate(ld.inputBlobs, ld.outputBlobs);
}
catch (const cv::Exception &err)
{
CV_RETHROW_ERROR(err, format("The following error occured while making allocate() for layer \"%s\": %s", ld.name.c_str(), err.err.c_str()));
}
ld.flag = 1; ld.flag = 1;
} }
...@@ -399,7 +413,14 @@ struct Net::Impl ...@@ -399,7 +413,14 @@ struct Net::Impl
} }
//forward itself //forward itself
ld.layerInstance->forward(ld.inputBlobs, ld.outputBlobs); try
{
ld.layerInstance->forward(ld.inputBlobs, ld.outputBlobs);
}
catch (const cv::Exception &err)
{
CV_RETHROW_ERROR(err, format("The following error occured while making forward() for layer \"%s\": %s", ld.name.c_str(), err.err.c_str()));
}
ld.flag = 1; ld.flag = 1;
} }
...@@ -417,12 +438,10 @@ struct Net::Impl ...@@ -417,12 +438,10 @@ struct Net::Impl
Net::Net() : impl(new Net::Impl) Net::Net() : impl(new Net::Impl)
{ {
} }
Net::~Net() Net::~Net()
{ {
} }
int Net::addLayer(const String &name, const String &type, LayerParams &params) int Net::addLayer(const String &name, const String &type, LayerParams &params)
...@@ -469,16 +488,19 @@ void Net::connect(String _outPin, String _inPin) ...@@ -469,16 +488,19 @@ void Net::connect(String _outPin, String _inPin)
impl->connect(outPin.lid, outPin.oid, inpPin.lid, inpPin.oid); impl->connect(outPin.lid, outPin.oid, inpPin.lid, inpPin.oid);
} }
void Net::forward() void Net::allocate()
{ {
impl->setUpNet(); impl->setUpNet();
impl->forwardAll();
} }
void Net::forward(LayerId toLayer) void Net::forward(LayerId toLayer)
{ {
impl->setUpNet(); impl->setUpNet();
impl->forwardLayer(impl->getLayerData(toLayer));
if (toLayer.isString() && toLayer.get<String>().empty())
impl->forwardAll();
else
impl->forwardLayer(impl->getLayerData(toLayer));
} }
void Net::setNetInputs(const std::vector<String> &inputBlobNames) void Net::setNetInputs(const std::vector<String> &inputBlobNames)
...@@ -521,6 +543,16 @@ Blob Net::getParam(LayerId layer, int numParam) ...@@ -521,6 +543,16 @@ Blob Net::getParam(LayerId layer, int numParam)
return layerBlobs[numParam]; return layerBlobs[numParam];
} }
void Net::setParam(LayerId layer, int numParam, const Blob &blob)
{
LayerData &ld = impl->getLayerData(layer);
std::vector<Blob> &layerBlobs = ld.layerInstance->blobs;
CV_Assert(numParam < (int)layerBlobs.size());
//we don't make strong checks, use this function carefully
layerBlobs[numParam] = blob;
}
int Net::getLayerId(const String &layer) int Net::getLayerId(const String &layer)
{ {
return impl->getLayerId(layer); return impl->getLayerId(layer);
...@@ -531,6 +563,34 @@ void Net::deleteLayer(LayerId) ...@@ -531,6 +563,34 @@ void Net::deleteLayer(LayerId)
CV_Error(Error::StsNotImplemented, ""); CV_Error(Error::StsNotImplemented, "");
} }
Ptr<Layer> Net::getLayer(LayerId layerId)
{
LayerData &ld = impl->getLayerData(layerId);
if (!ld.layerInstance)
CV_Error(Error::StsNullPtr, format("Requseted layer \"%s\" was not initialized", ld.name.c_str()));
return ld.layerInstance;
}
std::vector<String> Net::getLayerNames() const
{
std::vector<String> res;
res.reserve(impl->layers.size());
Impl::MapIdToLayerData::iterator it;
for (it = impl->layers.begin(); it != impl->layers.end(); it++)
{
if (it->second.id) //skip Data layer
res.push_back(it->second.name);
}
return res;
}
bool Net::empty() const
{
return impl->layers.size() <= 1; //first layer is default Data layer
}
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
Importer::~Importer() {} Importer::~Importer() {}
...@@ -560,6 +620,43 @@ int Layer::outputNameToIndex(String) ...@@ -560,6 +620,43 @@ int Layer::outputNameToIndex(String)
return -1; return -1;
} }
template <typename T>
static void vecToPVec(const std::vector<T> &v, std::vector<T*> &pv)
{
pv.resize(v.size());
for (size_t i = 0; i < v.size(); i++)
pv[i] = const_cast<T*>(&v[i]);
}
void Layer::allocate(const std::vector<Blob> &inputs, std::vector<Blob> &outputs)
{
std::vector<Blob*> inputsp;
vecToPVec(inputs, inputsp);
this->allocate(inputsp, outputs);
}
std::vector<Blob> Layer::allocate(const std::vector<Blob> &inputs)
{
std::vector<Blob> outputs;
this->allocate(inputs, outputs);
return outputs;
}
void Layer::forward(const std::vector<Blob> &inputs, std::vector<Blob> &outputs)
{
std::vector<Blob*> inputsp;
vecToPVec(inputs, inputsp);
this->forward(inputsp, outputs);
}
void Layer::run(const std::vector<Blob> &inputs, std::vector<Blob> &outputs)
{
std::vector<Blob*> inputsp;
vecToPVec(inputs, inputsp);
this->allocate(inputsp, outputs);
this->forward(inputsp, outputs);
}
Layer::~Layer() {} Layer::~Layer() {}
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
......
...@@ -692,13 +692,13 @@ struct TorchImporter : public ::cv::dnn::Importer ...@@ -692,13 +692,13 @@ struct TorchImporter : public ::cv::dnn::Importer
} }
}; };
CV_EXPORTS Ptr<Importer> createTorchImporter(const String &filename, bool isBinary) Ptr<Importer> createTorchImporter(const String &filename, bool isBinary)
{ {
return Ptr<Importer>(new TorchImporter(filename, isBinary)); return Ptr<Importer>(new TorchImporter(filename, isBinary));
} }
CV_EXPORTS Blob readTorchBlob(const String &filename, bool isBinary) Blob readTorchBlob(const String &filename, bool isBinary)
{ {
Ptr<TorchImporter> importer(new TorchImporter(filename, isBinary)); Ptr<TorchImporter> importer(new TorchImporter(filename, isBinary));
importer->readObject(); importer->readObject();
...@@ -709,13 +709,13 @@ CV_EXPORTS Blob readTorchBlob(const String &filename, bool isBinary) ...@@ -709,13 +709,13 @@ CV_EXPORTS Blob readTorchBlob(const String &filename, bool isBinary)
#else //ENABLE_TORCH_IMPORTER #else //ENABLE_TORCH_IMPORTER
CV_EXPORTS Ptr<Importer> createTorchImporter(const String&, bool) Ptr<Importer> createTorchImporter(const String&, bool)
{ {
CV_Error(Error::StsNotImplemented, "Module was build without Torch importer"); CV_Error(Error::StsNotImplemented, "Module was build without Torch importer");
return Ptr<Importer>(); return Ptr<Importer>();
} }
CV_EXPORTS Blob readTorchMat(const String&, bool) Blob readTorchBlob(const String&, bool)
{ {
CV_Error(Error::StsNotImplemented, "Module was build without Torch importer"); CV_Error(Error::StsNotImplemented, "Module was build without Torch importer");
return Blob(); return Blob();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment