Commit b51ffe3e authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Adding of public interfaces and refactoring of Reshape and MVN layer.

parent 4f578068
...@@ -275,8 +275,26 @@ namespace dnn ...@@ -275,8 +275,26 @@ namespace dnn
static Ptr<InnerProductLayer> create(int axis = 1); static Ptr<InnerProductLayer> create(int axis = 1);
}; };
class CV_EXPORTS_W MVNLayer : public Layer
{
public:
double eps;
bool normVariance, acrossChannels;
static Ptr<MVNLayer> create(bool normVariance = true, bool acrossChannels = false, double eps = 1e-9);
};
/* Reshaping */ /* Reshaping */
class CV_EXPORTS_W ReshapeLayer : public Layer
{
public:
BlobShape newShapeDesc;
Range newShapeRange;
static Ptr<ReshapeLayer> create(const BlobShape &newShape, Range applyingRange = Range::all());
};
class CV_EXPORTS_W ConcatLayer : public Layer class CV_EXPORTS_W ConcatLayer : public Layer
{ {
public: public:
......
...@@ -115,6 +115,8 @@ namespace dnn ...@@ -115,6 +115,8 @@ namespace dnn
/** @brief Returns pointer to the first element of continuous size array. */ /** @brief Returns pointer to the first element of continuous size array. */
const int *ptr() const; const int *ptr() const;
/** @overload */
int *ptr();
bool equal(const BlobShape &other) const; //!< Checks equality of two shapes. bool equal(const BlobShape &other) const; //!< Checks equality of two shapes.
bool operator== (const BlobShape &r) const; //!< @sa equal() bool operator== (const BlobShape &r) const; //!< @sa equal()
......
...@@ -208,6 +208,11 @@ inline const int *BlobShape::ptr() const ...@@ -208,6 +208,11 @@ inline const int *BlobShape::ptr() const
return sz; return sz;
} }
inline int *BlobShape::ptr()
{
return sz;
}
inline bool BlobShape::equal(const BlobShape &other) const inline bool BlobShape::equal(const BlobShape &other) const
{ {
if (this->dims() != other.dims()) if (this->dims() != other.dims())
......
...@@ -57,6 +57,7 @@ inline std::ostream &operator<< (std::ostream &s, cv::Range &r) ...@@ -57,6 +57,7 @@ inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
} }
//Reshaping //Reshaping
//TODO: add -1 specifier for automatic size inferring
template<typename Mat> template<typename Mat>
void reshape(Mat &m, const BlobShape &shape) void reshape(Mat &m, const BlobShape &shape)
...@@ -129,31 +130,7 @@ Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2, co ...@@ -129,31 +130,7 @@ Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2, co
return m(&ranges[0]); return m(&ranges[0]);
} }
//Traits for switching in ploymorphic implementations BlobShape computeShapeByReshapeMask(const BlobShape &srcShape, const BlobShape &maskShape, Range srcRange = Range::all());
template<typename XMat>
struct MatTraits
{
};
template<>
struct MatTraits<cv::Mat>
{
enum
{
IS_MAT = 1,
IS_UMAT = 0,
};
};
template<>
struct MatTraits<cv::UMat>
{
enum
{
IS_MAT = 0,
IS_UMAT = 1,
};
};
} }
} }
......
...@@ -40,25 +40,26 @@ ...@@ -40,25 +40,26 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
Blob::Blob() Blob::Blob()
{ {
CV_DNN_UMAT_ONLY(state = UNINITIALIZED); CV_DNN_UMAT_ONLY(state = UNINITIALIZED);
} }
Blob::Blob(const BlobShape &shape, int type, int allocFlags) Blob::Blob(const BlobShape &shape, int type, int allocFlags)
{ {
CV_DNN_UMAT_ONLY(state = UNINITIALIZED); CV_DNN_UMAT_ONLY(state = UNINITIALIZED);
this->create(shape, type, allocFlags); this->create(shape, type, allocFlags);
} }
Blob::Blob(InputArray data) Blob::Blob(InputArray data)
{ {
#ifndef CV_DNN_UMAT #ifndef CV_DNN_UMAT
m = data.getMat(); m = data.getMat();
#else #else
...@@ -74,10 +75,10 @@ namespace dnn ...@@ -74,10 +75,10 @@ namespace dnn
state = HEAD_AT_UMAT; state = HEAD_AT_UMAT;
} }
#endif #endif
} }
void Blob::create(const BlobShape &shape, int type, int allocFlags) void Blob::create(const BlobShape &shape, int type, int allocFlags)
{ {
#ifndef CV_DNN_UMAT #ifndef CV_DNN_UMAT
CV_Assert(allocFlags & ALLOC_MAT); CV_Assert(allocFlags & ALLOC_MAT);
m.create(shape.dims(), shape.ptr(), type); m.create(shape.dims(), shape.ptr(), type);
...@@ -99,10 +100,10 @@ namespace dnn ...@@ -99,10 +100,10 @@ namespace dnn
state = HEAD_AT_UMAT; state = HEAD_AT_UMAT;
} }
#endif #endif
} }
void Blob::fill(InputArray in) void Blob::fill(InputArray in)
{ {
#ifdef CV_DNN_UMAT #ifdef CV_DNN_UMAT
CV_Assert(in.isMat() || in.isUMat()); CV_Assert(in.isMat() || in.isUMat());
if (in.isMat()) if (in.isMat())
...@@ -119,15 +120,15 @@ namespace dnn ...@@ -119,15 +120,15 @@ namespace dnn
CV_Assert(in.isMat()); CV_Assert(in.isMat());
m = in.getMat(); m = in.getMat();
#endif #endif
} }
static inline int getMatChannels(const Mat &mat) static inline int getMatChannels(const Mat &mat)
{ {
return (mat.dims <= 2) ? mat.channels() : mat.size[0]; return (mat.dims <= 2) ? mat.channels() : mat.size[0];
} }
static BlobShape getBlobShape(std::vector<Mat> &vmat, int requestedCn = -1) static BlobShape getBlobShape(std::vector<Mat> &vmat, int requestedCn = -1)
{ {
BlobShape shape(BlobShape::all(4)); BlobShape shape(BlobShape::all(4));
int cnSum = 0, matCn; int cnSum = 0, matCn;
...@@ -163,10 +164,10 @@ namespace dnn ...@@ -163,10 +164,10 @@ namespace dnn
shape[0] = cnSum / shape[-3]; shape[0] = cnSum / shape[-3];
return shape; return shape;
} }
static std::vector<Mat> extractMatVector(InputArray in) static std::vector<Mat> extractMatVector(InputArray in)
{ {
if (in.isMat() || in.isUMat()) if (in.isMat() || in.isUMat())
{ {
return std::vector<Mat>(1, in.getMat()); return std::vector<Mat>(1, in.getMat());
...@@ -186,10 +187,10 @@ namespace dnn ...@@ -186,10 +187,10 @@ namespace dnn
CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector()); CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector());
return std::vector<Mat>(); return std::vector<Mat>();
} }
} }
void Blob::batchFromImages(InputArray image, int dstCn) void Blob::batchFromImages(InputArray image, int dstCn)
{ {
CV_Assert(dstCn == -1 || dstCn > 0); CV_Assert(dstCn == -1 || dstCn > 0);
std::vector<Mat> inMats = extractMatVector(image); std::vector<Mat> inMats = extractMatVector(image);
BlobShape dstShape = getBlobShape(inMats, dstCn); BlobShape dstShape = getBlobShape(inMats, dstCn);
...@@ -223,17 +224,17 @@ namespace dnn ...@@ -223,17 +224,17 @@ namespace dnn
dstPtr += elemSize * inMat.total(); dstPtr += elemSize * inMat.total();
} }
} }
} }
Blob Blob::fromImages(InputArray image, int dstCn) Blob Blob::fromImages(InputArray image, int dstCn)
{ {
Blob res; Blob res;
res.batchFromImages(image, dstCn); res.batchFromImages(image, dstCn);
return res; return res;
} }
void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy) void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy)
{ {
if (deepCopy) if (deepCopy)
{ {
create(shape, type); create(shape, type);
...@@ -244,10 +245,10 @@ namespace dnn ...@@ -244,10 +245,10 @@ namespace dnn
m = Mat(shape.dims(), shape.ptr(), type, data); m = Mat(shape.dims(), shape.ptr(), type, data);
} }
CV_DNN_UMAT_ONLY(state = HEAD_AT_MAT); CV_DNN_UMAT_ONLY(state = HEAD_AT_MAT);
} }
void Blob::setTo(InputArray value, int allocFlags) void Blob::setTo(InputArray value, int allocFlags)
{ {
#ifdef CV_DNN_UMAT #ifdef CV_DNN_UMAT
if (allocFlags == -1) if (allocFlags == -1)
{ {
...@@ -285,10 +286,10 @@ namespace dnn ...@@ -285,10 +286,10 @@ namespace dnn
#else #else
m.setTo(value); m.setTo(value);
#endif #endif
} }
void Blob::updateMat(bool syncData) const void Blob::updateMat(bool syncData) const
{ {
#ifdef CV_DNN_UMAT #ifdef CV_DNN_UMAT
if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_MAT) if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_MAT)
{ {
...@@ -309,10 +310,10 @@ namespace dnn ...@@ -309,10 +310,10 @@ namespace dnn
#else #else
(void)syncData; (void)syncData;
#endif #endif
} }
void Blob::updateUMat(bool syncData) const void Blob::updateUMat(bool syncData) const
{ {
#ifdef CV_DNN_UMAT #ifdef CV_DNN_UMAT
if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_UMAT) if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_UMAT)
{ {
...@@ -332,21 +333,23 @@ namespace dnn ...@@ -332,21 +333,23 @@ namespace dnn
#else #else
(void)syncData; (void)syncData;
#endif #endif
} }
void Blob::sync() const void Blob::sync() const
{ {
updateMat(); updateMat();
updateUMat(); updateUMat();
} }
Vec4i Blob::shape4() const Vec4i Blob::shape4() const
{ {
return Vec4i(num(), channels(), rows(), cols()); return Vec4i(num(), channels(), rows(), cols());
} }
std::ostream &operator<< (std::ostream &stream, const BlobShape &shape) //BlobShape
{
std::ostream &operator<< (std::ostream &stream, const BlobShape &shape)
{
stream << "["; stream << "[";
for (int i = 0; i < shape.dims() - 1; i++) for (int i = 0; i < shape.dims() - 1; i++)
...@@ -355,6 +358,59 @@ namespace dnn ...@@ -355,6 +358,59 @@ namespace dnn
stream << shape[-1]; stream << shape[-1];
return stream << "]"; return stream << "]";
}
BlobShape computeShapeByReshapeMask(const BlobShape &srcShape, const BlobShape &maskShape, Range srcRange /*= Range::all()*/)
{
if (srcRange == Range::all())
srcRange = Range(0, srcShape.dims());
CV_Assert(0 <= srcRange.start && srcRange.start <= srcRange.end && srcRange.end <= srcShape.dims());
Shape dstShape(srcShape.dims() - srcRange.size() + maskShape.dims(), nullptr);
std::copy(srcShape.ptr(), srcShape.ptr() + srcRange.start, dstShape.ptr());
std::copy(srcShape.ptr() + srcRange.end, srcShape.ptr() + srcShape.dims(), dstShape.ptr() + srcRange.start + maskShape.dims());
int inferDim = -1;
for (int i = 0; i < maskShape.dims(); i++)
{
if (maskShape[i] > 0)
{
dstShape[srcRange.start + i] = maskShape[i];
}
else if (maskShape[i] == 0)
{
if (srcRange.start + i >= srcShape.dims())
CV_Error(Error::StsBadArg, format("Copy dim[%d] (which has zero size) is out of the source shape bounds", srcRange.start + i));
dstShape[srcRange.start + i] = srcShape[srcRange.start + i];
}
else if (maskShape[i] == -1)
{
if (inferDim != -1)
CV_Error(Error::StsAssert, "Duplicate of inferred dim (which is denoted by -1)");
inferDim = srcRange.start + i;
dstShape[inferDim] = 1;
}
else
CV_Error(Error::StsBadArg, "maskShape[i] >= -1");
}
if (inferDim != -1)
{
ptrdiff_t srcTotal = srcShape.total();
ptrdiff_t dstTotal = dstShape.total();
if (srcTotal % dstTotal != 0)
CV_Error(Error::StsBackTrace, "Can't infer a dim denoted by -1");
dstShape[inferDim] = (int)(srcTotal / dstTotal);
} }
else
{
CV_Assert(srcShape.total() == dstShape.total());
}
return dstShape;
}
} }
} }
...@@ -162,8 +162,45 @@ Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams& params) ...@@ -162,8 +162,45 @@ Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams& params)
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta)); return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta));
} }
template<>
Ptr<Layer> createLayerFromCaffe<MVNLayer>(LayerParams &params)
{
return Ptr<Layer>(MVNLayer::create(
params.get<bool>("normalize_variance", true),
params.get<bool>("across_channels", false),
params.get<double>("eps", 1e-9)
));
}
/* Reshape layers */ /* Reshape layers */
template<>
Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams &params)
{
int axis = params.get<int>("axis", 0);
int numAxes = params.get<int>("num_axes", -1);
CV_Assert(numAxes >= -1);
Range applyingRange = (numAxes == -1) ? Range::all() : Range(axis, axis + numAxes);
Shape newShape;
if (params.has("dim"))
{
const DictValue &paramShape = params.get("dim");
newShape = Shape(paramShape.size(), nullptr);
for (int i = 0; i < paramShape.size(); i++)
newShape[i] = paramShape.get<int>(i);
}
else
newShape = Shape::all(0);
return Ptr<Layer>(ReshapeLayer::create(newShape, applyingRange));
}
Ptr<Layer> createFlattenLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(ReshapeLayer::create(Shape(0, -1)));
}
template<> template<>
Ptr<Layer> createLayerFromCaffe<ConcatLayer>(LayerParams& params) Ptr<Layer> createLayerFromCaffe<ConcatLayer>(LayerParams& params)
{ {
...@@ -239,6 +276,11 @@ template Ptr<Layer> createLayerFromCaffe<DeconvolutionLayer>(LayerParams&); ...@@ -239,6 +276,11 @@ template Ptr<Layer> createLayerFromCaffe<DeconvolutionLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SoftmaxLayer>(LayerParams&); template Ptr<Layer> createLayerFromCaffe<SoftmaxLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<InnerProductLayer>(LayerParams&); template Ptr<Layer> createLayerFromCaffe<InnerProductLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams&); template Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<MVNLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<ConcatLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SliceLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SplitLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<ReLULayer>(LayerParams&); template Ptr<Layer> createLayerFromCaffe<ReLULayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SigmoidLayer>(LayerParams&); template Ptr<Layer> createLayerFromCaffe<SigmoidLayer>(LayerParams&);
......
...@@ -53,6 +53,8 @@ namespace dnn ...@@ -53,6 +53,8 @@ namespace dnn
template <typename PublicLayer> template <typename PublicLayer>
Ptr<Layer> createLayerFromCaffe(LayerParams&); Ptr<Layer> createLayerFromCaffe(LayerParams&);
Ptr<Layer> createFlattenLayerFromCaffe(LayerParams&);
} }
} }
#endif #endif
\ No newline at end of file
...@@ -71,10 +71,8 @@ void initModule() ...@@ -71,10 +71,8 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(Slice, createLayerFromCaffe<SliceLayer>); REG_RUNTIME_LAYER_FUNC(Slice, createLayerFromCaffe<SliceLayer>);
REG_RUNTIME_LAYER_FUNC(Split, createLayerFromCaffe<SplitLayer>); REG_RUNTIME_LAYER_FUNC(Split, createLayerFromCaffe<SplitLayer>);
REG_RUNTIME_LAYER_FUNC(Concat, createLayerFromCaffe<ConcatLayer>); REG_RUNTIME_LAYER_FUNC(Concat, createLayerFromCaffe<ConcatLayer>);
REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer) REG_RUNTIME_LAYER_FUNC(Reshape, createLayerFromCaffe<ReshapeLayer>);
REG_RUNTIME_LAYER_FUNC(Flatten, createFlattenLayer); REG_RUNTIME_LAYER_FUNC(Flatten, createFlattenLayerFromCaffe);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer)
REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer)
REG_RUNTIME_LAYER_FUNC(Convolution, createLayerFromCaffe<ConvolutionLayer>); REG_RUNTIME_LAYER_FUNC(Convolution, createLayerFromCaffe<ConvolutionLayer>);
REG_RUNTIME_LAYER_FUNC(Deconvolution, createLayerFromCaffe<DeconvolutionLayer>); REG_RUNTIME_LAYER_FUNC(Deconvolution, createLayerFromCaffe<DeconvolutionLayer>);
...@@ -82,6 +80,7 @@ void initModule() ...@@ -82,6 +80,7 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(LRN, createLayerFromCaffe<LRNLayer>); REG_RUNTIME_LAYER_FUNC(LRN, createLayerFromCaffe<LRNLayer>);
REG_RUNTIME_LAYER_FUNC(InnerProduct, createLayerFromCaffe<InnerProductLayer>); REG_RUNTIME_LAYER_FUNC(InnerProduct, createLayerFromCaffe<InnerProductLayer>);
REG_RUNTIME_LAYER_FUNC(Softmax, createLayerFromCaffe<SoftmaxLayer>); REG_RUNTIME_LAYER_FUNC(Softmax, createLayerFromCaffe<SoftmaxLayer>);
REG_RUNTIME_LAYER_FUNC(MVN, createLayerFromCaffe<MVNLayer>);
REG_RUNTIME_LAYER_FUNC(ReLU, createLayerFromCaffe<ReLULayer>); REG_RUNTIME_LAYER_FUNC(ReLU, createLayerFromCaffe<ReLULayer>);
REG_RUNTIME_LAYER_FUNC(Sigmoid, createLayerFromCaffe<SigmoidLayer>); REG_RUNTIME_LAYER_FUNC(Sigmoid, createLayerFromCaffe<SigmoidLayer>);
...@@ -89,6 +88,7 @@ void initModule() ...@@ -89,6 +88,7 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(BNLL, createLayerFromCaffe<BNLLLayer>); REG_RUNTIME_LAYER_FUNC(BNLL, createLayerFromCaffe<BNLLLayer>);
REG_RUNTIME_LAYER_FUNC(AbsVal, createLayerFromCaffe<AbsLayer>); REG_RUNTIME_LAYER_FUNC(AbsVal, createLayerFromCaffe<AbsLayer>);
REG_RUNTIME_LAYER_FUNC(Power, createLayerFromCaffe<PowerLayer>); REG_RUNTIME_LAYER_FUNC(Power, createLayerFromCaffe<PowerLayer>);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer)
init.status = true; init.status = true;
} }
......
...@@ -42,11 +42,12 @@ ...@@ -42,11 +42,12 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "lrn_layer.hpp" #include "lrn_layer.hpp"
#include "opencl_kernels_dnn.hpp" #include "modules/dnn/opencl_kernels_dnn.hpp"
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <opencv2/core/ocl.hpp> #include <opencv2/core/ocl.hpp>
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#include <algorithm> #include <algorithm>
#include <type_traits>
namespace cv namespace cv
{ {
...@@ -220,7 +221,7 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob) ...@@ -220,7 +221,7 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
XMat src = getPlane(srcMat, n, cn); XMat src = getPlane(srcMat, n, cn);
XMat dst = getPlane(dstMat, n, cn); XMat dst = getPlane(dstMat, n, cn);
if (MatTraits<XMat>::IS_UMAT) if (std::is_same<XMat, UMat>::value)
{ {
cv::sqrBoxFilter(src, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT | BORDER_ISOLATED); cv::sqrBoxFilter(src, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT | BORDER_ISOLATED);
} }
......
...@@ -42,20 +42,21 @@ ...@@ -42,20 +42,21 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "mvn_layer.hpp" #include "mvn_layer.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
MVNLayer::MVNLayer(LayerParams &params) : Layer(params) MVNLayerImpl::MVNLayerImpl(bool normVariance_, bool acrossChannels_, double eps_)
{ {
eps = params.get<double>("eps", 1e-9); normVariance = normVariance_;
acrossChannels = params.get<bool>("across_channels", false); acrossChannels = acrossChannels_;
normalizeVariance = params.get<bool>("normalize_variance", true); eps = eps_;
} }
void MVNLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs) void MVNLayerImpl::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{ {
outputs.resize(inputs.size()); outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
...@@ -65,20 +66,17 @@ void MVNLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &ou ...@@ -65,20 +66,17 @@ void MVNLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &ou
} }
} }
void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs) void MVNLayerImpl::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{ {
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++) for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
{ {
Blob &inpBlob = *inputs[inpIdx]; Blob &inpBlob = *inputs[inpIdx];
Blob &outBlob = outputs[inpIdx]; Blob &outBlob = outputs[inpIdx];
int workSize[2];
int splitDim = (acrossChannels) ? 1 : 2; int splitDim = (acrossChannels) ? 1 : 2;
workSize[0] = (int)inpBlob.total(0, splitDim); Shape workSize((int)inpBlob.total(0, splitDim), (int)inpBlob.total(splitDim));
workSize[1] = (int)inpBlob.total(splitDim); Mat inpMat = reshaped(inpBlob.matRefConst(), workSize);
Mat outMat = reshaped(outBlob.matRef(), workSize);
Mat inpMat = inpBlob.matRef().reshape(1, 2, workSize);
Mat outMat = outBlob.matRef().reshape(1, 2, workSize);
Scalar mean, dev; Scalar mean, dev;
for (int i = 0; i < workSize[0]; i++) for (int i = 0; i < workSize[0]; i++)
...@@ -86,12 +84,18 @@ void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs) ...@@ -86,12 +84,18 @@ void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
Mat inpRow = inpMat.row(i); Mat inpRow = inpMat.row(i);
Mat outRow = outMat.row(i); Mat outRow = outMat.row(i);
cv::meanStdDev(inpRow, mean, (normalizeVariance) ? dev : noArray()); cv::meanStdDev(inpRow, mean, (normVariance) ? dev : noArray());
double alpha = (normalizeVariance) ? 1/(eps + dev[0]) : 1; double alpha = (normVariance) ? 1/(eps + dev[0]) : 1;
inpRow.convertTo(outRow, outRow.type(), alpha, -mean[0] * alpha); inpRow.convertTo(outRow, outRow.type(), alpha, -mean[0] * alpha);
} }
} }
} }
Ptr<MVNLayer> MVNLayer::create(bool normVariance, bool acrossChannels, double eps)
{
return Ptr<MVNLayer>(new MVNLayerImpl(normVariance, acrossChannels, eps));
}
} }
} }
...@@ -42,20 +42,18 @@ ...@@ -42,20 +42,18 @@
#ifndef __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__ #ifndef __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__ #define __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__
#include "../precomp.hpp" #include "../precomp.hpp"
#include <opencv2/dnn/all_layers.hpp>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
class MVNLayer : public Layer class MVNLayerImpl : public MVNLayer
{ {
double eps;
bool acrossChannels, normalizeVariance;
public: public:
MVNLayer(LayerParams &params); MVNLayerImpl(bool normVariance_ = true, bool acrossChannels_ = false, double eps_ = 1e-9);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
}; };
......
...@@ -42,73 +42,33 @@ ...@@ -42,73 +42,33 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "reshape_layer.hpp" #include "reshape_layer.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
ReshapeLayer::ReshapeLayer(LayerParams &params) : Layer(params) ReshapeLayerImpl::ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_)
{ {
inAxis = params.get<int>("axis", 0); newShapeDesc = newShape_;
inNumAxes = params.get<int>("num_axes", -1); newShapeRange = applyingRange_;
CV_Assert(inNumAxes >= -1);
autoAxisIdx = -1;
if (!params.has("dim"))
{
shapeDesc = BlobShape::all(0);
return;
}
DictValue paramShape = params.get("dim");
shapeDesc = BlobShape::all(paramShape.size());
for (int i = 0; i < paramShape.size(); i++)
{
int dim = paramShape.get<int>(i);
CV_Assert(dim >= -1);
if (dim == -1)
{
if (autoAxisIdx != -1)
CV_Error(Error::StsBadArg, "New shape contains multiple -1 dims");
autoAxisIdx = i;
}
shapeDesc[i] = dim;
}
} }
void ReshapeLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) void ReshapeLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{ {
outputs.resize(inputs.size()); outputs.resize(inputs.size());
outShapes.resize(inputs.size()); outShapes.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
Blob &inpBlob = *inputs[i]; outShapes[i] = computeShapeByReshapeMask(inputs[i]->shape(), newShapeDesc, newShapeRange);
Blob &outBlob = outputs[i]; outputs[i].shareFrom(*inputs[i]);
BlobShape inpShape = inpBlob.shape(); outputs[i].reshape(outShapes[i]);
int startAxis = (inAxis >= 0) ? inAxis : inpShape.dims() + 1 + inAxis;
int endAxis = (inNumAxes == -1) ? inpShape.dims() : startAxis + inNumAxes;
CV_Assert(0 <= startAxis && startAxis <= inpShape.dims());
CV_Assert(0 <= endAxis && endAxis <= inpShape.dims());
int newDims = inpShape.dims() - (endAxis - startAxis) + shapeDesc.dims();
BlobShape outShape = BlobShape::all(newDims);
computeOutputShape(startAxis, endAxis, inpShape, outShape);
outShapes[i] = outShape;
outBlob.shareFrom(inpBlob);
outBlob.reshape(outShape);
} }
} }
void ReshapeLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) void ReshapeLayerImpl::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{ {
for (size_t i = 0; i < outputs.size(); i++) for (size_t i = 0; i < outputs.size(); i++)
{ {
...@@ -117,61 +77,11 @@ void ReshapeLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &output ...@@ -117,61 +77,11 @@ void ReshapeLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &output
} }
} }
void ReshapeLayer::computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape) Ptr<ReshapeLayer> ReshapeLayer::create(const BlobShape &newShape, Range applyingRange /*= Range::all()*/)
{ {
int idx = 0; return Ptr<ReshapeLayer>(new ReshapeLayerImpl(newShape, applyingRange));
for (int i = 0; i < startAxis; i++)
outShape[idx++] = inpShape[i];
for (int i = 0; i < shapeDesc.dims(); i++)
{
if (shapeDesc[i] == 0)
{
int inpAxisIdx = startAxis + i;
if (inpAxisIdx < 0 || inpShape.dims() <= inpAxisIdx)
CV_Error(Error::StsOutOfRange, "copy dimension (which has zero size) is not presented into reshaped blob");
outShape[idx++] = inpShape[startAxis + i];
}
else
{
outShape[idx++] = (shapeDesc[i] > 0) ? shapeDesc[i] : 1;
}
}
for (int i = endAxis; i < inpShape.dims(); i++)
outShape[idx++] = inpShape[i];
if (autoAxisIdx >= 0)
{
size_t total = inpShape.total();
size_t curTotal = 1;
for (int i = 0; i < outShape.dims(); i++)
{
if (i != startAxis + autoAxisIdx)
curTotal *= outShape[i];
}
CV_DbgAssert(curTotal <= total && total % curTotal == 0);
outShape[startAxis + autoAxisIdx] = (int)(total / curTotal);
}
if (inpShape.total() != outShape.total())
{
CV_Error(Error::StsUnmatchedSizes, "Mismatch between input and output blob elements count");
}
} }
Ptr<Layer> createFlattenLayer(LayerParams&)
{
LayerParams params;
int shapeDesc[] = {0, -1};
params.set("dim", DictValue::arrayInt(shapeDesc, 2));
return Ptr<Layer>(new ReshapeLayer(params));
}
} }
} }
...@@ -42,27 +42,23 @@ ...@@ -42,27 +42,23 @@
#ifndef __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__ #ifndef __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__ #define __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__
#include "../precomp.hpp" #include "../precomp.hpp"
#include <opencv2/dnn/all_layers.hpp>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
class ReshapeLayer : public Layer class ReshapeLayerImpl : public ReshapeLayer
{ {
std::vector<BlobShape> outShapes;
public: public:
ReshapeLayer(LayerParams &params); ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
protected:
BlobShape shapeDesc;
std::vector<BlobShape> outShapes;
int inAxis, inNumAxes, autoAxisIdx;
void computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape);
}; };
Ptr<Layer> createFlattenLayer(LayerParams&); Ptr<Layer> createFlattenLayer(LayerParams&);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment