Commit b51ffe3e authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Adding of public interfaces and refactoring of Reshape and MVN layer.

parent 4f578068
......@@ -275,8 +275,26 @@ namespace dnn
static Ptr<InnerProductLayer> create(int axis = 1);
};
class CV_EXPORTS_W MVNLayer : public Layer
{
public:
double eps;
bool normVariance, acrossChannels;
static Ptr<MVNLayer> create(bool normVariance = true, bool acrossChannels = false, double eps = 1e-9);
};
/* Reshaping */
class CV_EXPORTS_W ReshapeLayer : public Layer
{
public:
BlobShape newShapeDesc;
Range newShapeRange;
static Ptr<ReshapeLayer> create(const BlobShape &newShape, Range applyingRange = Range::all());
};
class CV_EXPORTS_W ConcatLayer : public Layer
{
public:
......
......@@ -115,6 +115,8 @@ namespace dnn
/** @brief Returns pointer to the first element of continuous size array. */
const int *ptr() const;
/** @overload */
int *ptr();
bool equal(const BlobShape &other) const; //!< Checks equality of two shapes.
bool operator== (const BlobShape &r) const; //!< @sa equal()
......
......@@ -208,6 +208,11 @@ inline const int *BlobShape::ptr() const
return sz;
}
inline int *BlobShape::ptr()
{
return sz;
}
inline bool BlobShape::equal(const BlobShape &other) const
{
if (this->dims() != other.dims())
......
......@@ -57,6 +57,7 @@ inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
}
//Reshaping
//TODO: add -1 specifier for automatic size inferring
template<typename Mat>
void reshape(Mat &m, const BlobShape &shape)
......@@ -129,31 +130,7 @@ Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2, co
return m(&ranges[0]);
}
//Traits for switching in ploymorphic implementations
template<typename XMat>
struct MatTraits
{
};
template<>
struct MatTraits<cv::Mat>
{
enum
{
IS_MAT = 1,
IS_UMAT = 0,
};
};
template<>
struct MatTraits<cv::UMat>
{
enum
{
IS_MAT = 0,
IS_UMAT = 1,
};
};
BlobShape computeShapeByReshapeMask(const BlobShape &srcShape, const BlobShape &maskShape, Range srcRange = Range::all());
}
}
......
......@@ -40,321 +40,377 @@
//M*/
#include "precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
Blob::Blob()
{
CV_DNN_UMAT_ONLY(state = UNINITIALIZED);
}
Blob::Blob()
{
CV_DNN_UMAT_ONLY(state = UNINITIALIZED);
}
Blob::Blob(const BlobShape &shape, int type, int allocFlags)
{
CV_DNN_UMAT_ONLY(state = UNINITIALIZED);
this->create(shape, type, allocFlags);
}
Blob::Blob(const BlobShape &shape, int type, int allocFlags)
{
CV_DNN_UMAT_ONLY(state = UNINITIALIZED);
this->create(shape, type, allocFlags);
}
Blob::Blob(InputArray data)
{
Blob::Blob(InputArray data)
{
#ifndef CV_DNN_UMAT
m = data.getMat();
m = data.getMat();
#else
CV_Assert(data.isMat() || data.isUMat());
if (data.isMat())
{
m = data.getMat();
state = HEAD_AT_MAT;
}
else
{
um = data.getUMat();
state = HEAD_AT_UMAT;
}
#endif
CV_Assert(data.isMat() || data.isUMat());
if (data.isMat())
{
m = data.getMat();
state = HEAD_AT_MAT;
}
void Blob::create(const BlobShape &shape, int type, int allocFlags)
else
{
um = data.getUMat();
state = HEAD_AT_UMAT;
}
#endif
}
void Blob::create(const BlobShape &shape, int type, int allocFlags)
{
#ifndef CV_DNN_UMAT
CV_Assert(allocFlags & ALLOC_MAT);
m.create(shape.dims(), shape.ptr(), type);
CV_Assert(allocFlags & ALLOC_MAT);
m.create(shape.dims(), shape.ptr(), type);
#else
CV_Assert(allocFlags & ALLOC_MAT || allocFlags & ALLOC_UMAT);
if (allocFlags & ALLOC_MAT)
m.create(shape.dims(), shape.ptr(), type);
if (allocFlags & ALLOC_UMAT)
um.create(shape.dims(), shape.ptr(), type);
CV_Assert(allocFlags & ALLOC_MAT || allocFlags & ALLOC_UMAT);
if (state == UNINITIALIZED)
{
if (allocFlags & ALLOC_MAT && allocFlags & ALLOC_UMAT)
state = SYNCED;
else if (allocFlags & ALLOC_MAT)
state = HEAD_AT_MAT;
else
state = HEAD_AT_UMAT;
}
#endif
}
if (allocFlags & ALLOC_MAT)
m.create(shape.dims(), shape.ptr(), type);
if (allocFlags & ALLOC_UMAT)
um.create(shape.dims(), shape.ptr(), type);
void Blob::fill(InputArray in)
if (state == UNINITIALIZED)
{
#ifdef CV_DNN_UMAT
CV_Assert(in.isMat() || in.isUMat());
if (in.isMat())
{
m = in.getMat();
if (allocFlags & ALLOC_MAT && allocFlags & ALLOC_UMAT)
state = SYNCED;
else if (allocFlags & ALLOC_MAT)
state = HEAD_AT_MAT;
}
else
{
um = in.getUMat();
state = HEAD_AT_UMAT;
}
#else
CV_Assert(in.isMat());
m = in.getMat();
#endif
}
#endif
}
static inline int getMatChannels(const Mat &mat)
void Blob::fill(InputArray in)
{
#ifdef CV_DNN_UMAT
CV_Assert(in.isMat() || in.isUMat());
if (in.isMat())
{
return (mat.dims <= 2) ? mat.channels() : mat.size[0];
m = in.getMat();
state = HEAD_AT_MAT;
}
static BlobShape getBlobShape(std::vector<Mat> &vmat, int requestedCn = -1)
else
{
BlobShape shape(BlobShape::all(4));
int cnSum = 0, matCn;
CV_Assert(vmat.size() > 0);
um = in.getUMat();
state = HEAD_AT_UMAT;
}
#else
CV_Assert(in.isMat());
m = in.getMat();
#endif
}
for (size_t i = 0; i < vmat.size(); i++)
{
Mat &mat = vmat[i];
CV_Assert(!mat.empty());
CV_Assert((mat.dims == 3 && mat.channels() == 1) || mat.dims <= 2);
static inline int getMatChannels(const Mat &mat)
{
return (mat.dims <= 2) ? mat.channels() : mat.size[0];
}
matCn = getMatChannels(mat);
cnSum += getMatChannels(mat);
static BlobShape getBlobShape(std::vector<Mat> &vmat, int requestedCn = -1)
{
BlobShape shape(BlobShape::all(4));
int cnSum = 0, matCn;
if (i == 0)
{
shape[-1] = mat.cols;
shape[-2] = mat.rows;
shape[-3] = (requestedCn <= 0) ? matCn : requestedCn;
}
else
{
if (mat.cols != shape[-1] || mat.rows != shape[-2])
CV_Error(Error::StsError, "Each Mat.size() must be equal");
CV_Assert(vmat.size() > 0);
if (requestedCn <= 0 && matCn != shape[-3])
CV_Error(Error::StsError, "Each Mat.chnannels() (or number of planes) must be equal");
}
}
if (cnSum % shape[-3] != 0)
CV_Error(Error::StsError, "Total number of channels in vector is not a multiple of requsted channel number");
for (size_t i = 0; i < vmat.size(); i++)
{
Mat &mat = vmat[i];
CV_Assert(!mat.empty());
CV_Assert((mat.dims == 3 && mat.channels() == 1) || mat.dims <= 2);
shape[0] = cnSum / shape[-3];
return shape;
}
matCn = getMatChannels(mat);
cnSum += getMatChannels(mat);
static std::vector<Mat> extractMatVector(InputArray in)
{
if (in.isMat() || in.isUMat())
{
return std::vector<Mat>(1, in.getMat());
}
else if (in.isMatVector())
{
return *static_cast<const std::vector<Mat>*>(in.getObj());
}
else if (in.isUMatVector())
if (i == 0)
{
std::vector<Mat> vmat;
in.getMatVector(vmat);
return vmat;
shape[-1] = mat.cols;
shape[-2] = mat.rows;
shape[-3] = (requestedCn <= 0) ? matCn : requestedCn;
}
else
{
CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector());
return std::vector<Mat>();
if (mat.cols != shape[-1] || mat.rows != shape[-2])
CV_Error(Error::StsError, "Each Mat.size() must be equal");
if (requestedCn <= 0 && matCn != shape[-3])
CV_Error(Error::StsError, "Each Mat.chnannels() (or number of planes) must be equal");
}
}
void Blob::batchFromImages(InputArray image, int dstCn)
if (cnSum % shape[-3] != 0)
CV_Error(Error::StsError, "Total number of channels in vector is not a multiple of requsted channel number");
shape[0] = cnSum / shape[-3];
return shape;
}
static std::vector<Mat> extractMatVector(InputArray in)
{
if (in.isMat() || in.isUMat())
{
return std::vector<Mat>(1, in.getMat());
}
else if (in.isMatVector())
{
CV_Assert(dstCn == -1 || dstCn > 0);
std::vector<Mat> inMats = extractMatVector(image);
BlobShape dstShape = getBlobShape(inMats, dstCn);
return *static_cast<const std::vector<Mat>*>(in.getObj());
}
else if (in.isUMatVector())
{
std::vector<Mat> vmat;
in.getMatVector(vmat);
return vmat;
}
else
{
CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector());
return std::vector<Mat>();
}
}
int dtype = CV_32F;
this->create(dstShape, dtype, ALLOC_MAT);
uchar *dstPtr = this->matRef().ptr();
int elemSize = CV_ELEM_SIZE(dtype);
void Blob::batchFromImages(InputArray image, int dstCn)
{
CV_Assert(dstCn == -1 || dstCn > 0);
std::vector<Mat> inMats = extractMatVector(image);
BlobShape dstShape = getBlobShape(inMats, dstCn);
std::vector<Mat> wrapBuf(dstShape[-3]);
for (size_t i = 0; i < inMats.size(); i++)
{
Mat inMat = inMats[i];
int dtype = CV_32F;
this->create(dstShape, dtype, ALLOC_MAT);
uchar *dstPtr = this->matRef().ptr();
int elemSize = CV_ELEM_SIZE(dtype);
if (inMat.dims <= 2)
{
inMat.convertTo(inMat, dtype);
std::vector<Mat> wrapBuf(dstShape[-3]);
for (size_t i = 0; i < inMats.size(); i++)
{
Mat inMat = inMats[i];
wrapBuf.resize(0);
for (int cn = 0; cn < inMat.channels(); cn++)
{
wrapBuf.push_back(Mat(inMat.rows, inMat.cols, dtype, dstPtr));
dstPtr += elemSize * inMat.total();
}
if (inMat.dims <= 2)
{
inMat.convertTo(inMat, dtype);
cv::split(inMat, wrapBuf);
}
else
wrapBuf.resize(0);
for (int cn = 0; cn < inMat.channels(); cn++)
{
inMat.convertTo(Mat(inMat.dims, inMat.size, dtype, dstPtr), dtype);
wrapBuf.push_back(Mat(inMat.rows, inMat.cols, dtype, dstPtr));
dstPtr += elemSize * inMat.total();
}
}
}
Blob Blob::fromImages(InputArray image, int dstCn)
{
Blob res;
res.batchFromImages(image, dstCn);
return res;
}
void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy)
{
if (deepCopy)
{
create(shape, type);
memcpy(ptr(), data, this->total() * CV_ELEM_SIZE(type));
cv::split(inMat, wrapBuf);
}
else
{
m = Mat(shape.dims(), shape.ptr(), type, data);
inMat.convertTo(Mat(inMat.dims, inMat.size, dtype, dstPtr), dtype);
dstPtr += elemSize * inMat.total();
}
CV_DNN_UMAT_ONLY(state = HEAD_AT_MAT);
}
}
void Blob::setTo(InputArray value, int allocFlags)
Blob Blob::fromImages(InputArray image, int dstCn)
{
Blob res;
res.batchFromImages(image, dstCn);
return res;
}
void Blob::fill(const BlobShape &shape, int type, void *data, bool deepCopy)
{
if (deepCopy)
{
#ifdef CV_DNN_UMAT
if (allocFlags == -1)
{
if (state == HEAD_AT_UMAT)
um.setTo(value);
else if (state == HEAD_AT_MAT)
m.setTo(value);
else //SYNCED or UNINITIALIZED
{
um.setTo(value);
m.setTo(value);
create(shape, type);
memcpy(ptr(), data, this->total() * CV_ELEM_SIZE(type));
}
else
{
m = Mat(shape.dims(), shape.ptr(), type, data);
}
CV_DNN_UMAT_ONLY(state = HEAD_AT_MAT);
}
if (state == UNINITIALIZED)
state = SYNCED;
}
}
else if (allocFlags == ALLOC_BOTH)
{
m.setTo(value);
void Blob::setTo(InputArray value, int allocFlags)
{
#ifdef CV_DNN_UMAT
if (allocFlags == -1)
{
if (state == HEAD_AT_UMAT)
um.setTo(value);
state = SYNCED;
}
else if (allocFlags == ALLOC_MAT)
{
matRef().setTo(value);
}
else if (allocFlags == ALLOC_UMAT)
{
umatRef().setTo(value);
}
else
else if (state == HEAD_AT_MAT)
m.setTo(value);
else //SYNCED or UNINITIALIZED
{
CV_Error(Error::StsBadArg, "allocFlags sholud be -1 or one of Blob::AllocFlag values");
um.setTo(value);
m.setTo(value);
if (state == UNINITIALIZED)
state = SYNCED;
}
#else
}
else if (allocFlags == ALLOC_BOTH)
{
m.setTo(value);
#endif
um.setTo(value);
state = SYNCED;
}
void Blob::updateMat(bool syncData) const
else if (allocFlags == ALLOC_MAT)
{
matRef().setTo(value);
}
else if (allocFlags == ALLOC_UMAT)
{
umatRef().setTo(value);
}
else
{
CV_Error(Error::StsBadArg, "allocFlags sholud be -1 or one of Blob::AllocFlag values");
}
#else
m.setTo(value);
#endif
}
void Blob::updateMat(bool syncData) const
{
#ifdef CV_DNN_UMAT
if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_MAT)
{
return;
}
else if (state == HEAD_AT_UMAT)
{
if (syncData)
um.copyTo(m);
else
m.create(dims(), sizes(), type());
state = SYNCED;
}
if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_MAT)
{
return;
}
else if (state == HEAD_AT_UMAT)
{
if (syncData)
um.copyTo(m);
else
{
CV_Error(Error::StsInternal, "");
}
m.create(dims(), sizes(), type());
state = SYNCED;
}
else
{
CV_Error(Error::StsInternal, "");
}
#else
(void)syncData;
(void)syncData;
#endif
}
void Blob::updateUMat(bool syncData) const
{
#ifdef CV_DNN_UMAT
if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_UMAT)
{
return;
}
else if (state == HEAD_AT_MAT)
{
if (syncData)
m.copyTo(um);
else
um.create(dims(), sizes(), type());
}
else
{
CV_Error(Error::StsInternal, "");
}
#else
(void)syncData;
#endif
}
void Blob::sync() const
{
updateMat();
updateUMat();
}
void Blob::updateUMat(bool syncData) const
Vec4i Blob::shape4() const
{
return Vec4i(num(), channels(), rows(), cols());
}
//BlobShape
std::ostream &operator<< (std::ostream &stream, const BlobShape &shape)
{
stream << "[";
for (int i = 0; i < shape.dims() - 1; i++)
stream << shape[i] << ", ";
if (shape.dims() > 0)
stream << shape[-1];
return stream << "]";
}
BlobShape computeShapeByReshapeMask(const BlobShape &srcShape, const BlobShape &maskShape, Range srcRange /*= Range::all()*/)
{
if (srcRange == Range::all())
srcRange = Range(0, srcShape.dims());
CV_Assert(0 <= srcRange.start && srcRange.start <= srcRange.end && srcRange.end <= srcShape.dims());
Shape dstShape(srcShape.dims() - srcRange.size() + maskShape.dims(), nullptr);
std::copy(srcShape.ptr(), srcShape.ptr() + srcRange.start, dstShape.ptr());
std::copy(srcShape.ptr() + srcRange.end, srcShape.ptr() + srcShape.dims(), dstShape.ptr() + srcRange.start + maskShape.dims());
int inferDim = -1;
for (int i = 0; i < maskShape.dims(); i++)
{
#ifdef CV_DNN_UMAT
if (state == UNINITIALIZED || state == SYNCED || state == HEAD_AT_UMAT)
if (maskShape[i] > 0)
{
return;
dstShape[srcRange.start + i] = maskShape[i];
}
else if (state == HEAD_AT_MAT)
else if (maskShape[i] == 0)
{
if (syncData)
m.copyTo(um);
else
um.create(dims(), sizes(), type());
if (srcRange.start + i >= srcShape.dims())
CV_Error(Error::StsBadArg, format("Copy dim[%d] (which has zero size) is out of the source shape bounds", srcRange.start + i));
dstShape[srcRange.start + i] = srcShape[srcRange.start + i];
}
else
else if (maskShape[i] == -1)
{
CV_Error(Error::StsInternal, "");
if (inferDim != -1)
CV_Error(Error::StsAssert, "Duplicate of inferred dim (which is denoted by -1)");
inferDim = srcRange.start + i;
dstShape[inferDim] = 1;
}
#else
(void)syncData;
#endif
else
CV_Error(Error::StsBadArg, "maskShape[i] >= -1");
}
void Blob::sync() const
if (inferDim != -1)
{
updateMat();
updateUMat();
}
ptrdiff_t srcTotal = srcShape.total();
ptrdiff_t dstTotal = dstShape.total();
if (srcTotal % dstTotal != 0)
CV_Error(Error::StsBackTrace, "Can't infer a dim denoted by -1");
Vec4i Blob::shape4() const
{
return Vec4i(num(), channels(), rows(), cols());
dstShape[inferDim] = (int)(srcTotal / dstTotal);
}
std::ostream &operator<< (std::ostream &stream, const BlobShape &shape)
else
{
stream << "[";
CV_Assert(srcShape.total() == dstShape.total());
}
for (int i = 0; i < shape.dims() - 1; i++)
stream << shape[i] << ", ";
if (shape.dims() > 0)
stream << shape[-1];
return dstShape;
}
return stream << "]";
}
}
}
......@@ -162,8 +162,45 @@ Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams& params)
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta));
}
template<>
Ptr<Layer> createLayerFromCaffe<MVNLayer>(LayerParams &params)
{
return Ptr<Layer>(MVNLayer::create(
params.get<bool>("normalize_variance", true),
params.get<bool>("across_channels", false),
params.get<double>("eps", 1e-9)
));
}
/* Reshape layers */
template<>
Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams &params)
{
int axis = params.get<int>("axis", 0);
int numAxes = params.get<int>("num_axes", -1);
CV_Assert(numAxes >= -1);
Range applyingRange = (numAxes == -1) ? Range::all() : Range(axis, axis + numAxes);
Shape newShape;
if (params.has("dim"))
{
const DictValue &paramShape = params.get("dim");
newShape = Shape(paramShape.size(), nullptr);
for (int i = 0; i < paramShape.size(); i++)
newShape[i] = paramShape.get<int>(i);
}
else
newShape = Shape::all(0);
return Ptr<Layer>(ReshapeLayer::create(newShape, applyingRange));
}
Ptr<Layer> createFlattenLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(ReshapeLayer::create(Shape(0, -1)));
}
template<>
Ptr<Layer> createLayerFromCaffe<ConcatLayer>(LayerParams& params)
{
......@@ -239,6 +276,11 @@ template Ptr<Layer> createLayerFromCaffe<DeconvolutionLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SoftmaxLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<InnerProductLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<MVNLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<ConcatLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SliceLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SplitLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<ReLULayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SigmoidLayer>(LayerParams&);
......
......@@ -53,6 +53,8 @@ namespace dnn
template <typename PublicLayer>
Ptr<Layer> createLayerFromCaffe(LayerParams&);
Ptr<Layer> createFlattenLayerFromCaffe(LayerParams&);
}
}
#endif
\ No newline at end of file
......@@ -71,10 +71,8 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(Slice, createLayerFromCaffe<SliceLayer>);
REG_RUNTIME_LAYER_FUNC(Split, createLayerFromCaffe<SplitLayer>);
REG_RUNTIME_LAYER_FUNC(Concat, createLayerFromCaffe<ConcatLayer>);
REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer)
REG_RUNTIME_LAYER_FUNC(Flatten, createFlattenLayer);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer)
REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer)
REG_RUNTIME_LAYER_FUNC(Reshape, createLayerFromCaffe<ReshapeLayer>);
REG_RUNTIME_LAYER_FUNC(Flatten, createFlattenLayerFromCaffe);
REG_RUNTIME_LAYER_FUNC(Convolution, createLayerFromCaffe<ConvolutionLayer>);
REG_RUNTIME_LAYER_FUNC(Deconvolution, createLayerFromCaffe<DeconvolutionLayer>);
......@@ -82,6 +80,7 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(LRN, createLayerFromCaffe<LRNLayer>);
REG_RUNTIME_LAYER_FUNC(InnerProduct, createLayerFromCaffe<InnerProductLayer>);
REG_RUNTIME_LAYER_FUNC(Softmax, createLayerFromCaffe<SoftmaxLayer>);
REG_RUNTIME_LAYER_FUNC(MVN, createLayerFromCaffe<MVNLayer>);
REG_RUNTIME_LAYER_FUNC(ReLU, createLayerFromCaffe<ReLULayer>);
REG_RUNTIME_LAYER_FUNC(Sigmoid, createLayerFromCaffe<SigmoidLayer>);
......@@ -89,6 +88,7 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(BNLL, createLayerFromCaffe<BNLLLayer>);
REG_RUNTIME_LAYER_FUNC(AbsVal, createLayerFromCaffe<AbsLayer>);
REG_RUNTIME_LAYER_FUNC(Power, createLayerFromCaffe<PowerLayer>);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer)
init.status = true;
}
......
......@@ -42,11 +42,12 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "lrn_layer.hpp"
#include "opencl_kernels_dnn.hpp"
#include "modules/dnn/opencl_kernels_dnn.hpp"
#include <opencv2/imgproc.hpp>
#include <opencv2/core/ocl.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include <algorithm>
#include <type_traits>
namespace cv
{
......@@ -220,7 +221,7 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
XMat src = getPlane(srcMat, n, cn);
XMat dst = getPlane(dstMat, n, cn);
if (MatTraits<XMat>::IS_UMAT)
if (std::is_same<XMat, UMat>::value)
{
cv::sqrBoxFilter(src, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT | BORDER_ISOLATED);
}
......
......@@ -42,20 +42,21 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "mvn_layer.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
MVNLayer::MVNLayer(LayerParams &params) : Layer(params)
MVNLayerImpl::MVNLayerImpl(bool normVariance_, bool acrossChannels_, double eps_)
{
eps = params.get<double>("eps", 1e-9);
acrossChannels = params.get<bool>("across_channels", false);
normalizeVariance = params.get<bool>("normalize_variance", true);
normVariance = normVariance_;
acrossChannels = acrossChannels_;
eps = eps_;
}
void MVNLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
void MVNLayerImpl::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
......@@ -65,20 +66,17 @@ void MVNLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &ou
}
}
void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
void MVNLayerImpl::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
{
Blob &inpBlob = *inputs[inpIdx];
Blob &outBlob = outputs[inpIdx];
int workSize[2];
int splitDim = (acrossChannels) ? 1 : 2;
workSize[0] = (int)inpBlob.total(0, splitDim);
workSize[1] = (int)inpBlob.total(splitDim);
Mat inpMat = inpBlob.matRef().reshape(1, 2, workSize);
Mat outMat = outBlob.matRef().reshape(1, 2, workSize);
Shape workSize((int)inpBlob.total(0, splitDim), (int)inpBlob.total(splitDim));
Mat inpMat = reshaped(inpBlob.matRefConst(), workSize);
Mat outMat = reshaped(outBlob.matRef(), workSize);
Scalar mean, dev;
for (int i = 0; i < workSize[0]; i++)
......@@ -86,12 +84,18 @@ void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
Mat inpRow = inpMat.row(i);
Mat outRow = outMat.row(i);
cv::meanStdDev(inpRow, mean, (normalizeVariance) ? dev : noArray());
double alpha = (normalizeVariance) ? 1/(eps + dev[0]) : 1;
cv::meanStdDev(inpRow, mean, (normVariance) ? dev : noArray());
double alpha = (normVariance) ? 1/(eps + dev[0]) : 1;
inpRow.convertTo(outRow, outRow.type(), alpha, -mean[0] * alpha);
}
}
}
Ptr<MVNLayer> MVNLayer::create(bool normVariance, bool acrossChannels, double eps)
{
return Ptr<MVNLayer>(new MVNLayerImpl(normVariance, acrossChannels, eps));
}
}
}
......@@ -42,20 +42,18 @@
#ifndef __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_MVN_LAYER_HPP__
#include "../precomp.hpp"
#include <opencv2/dnn/all_layers.hpp>
namespace cv
{
namespace dnn
{
class MVNLayer : public Layer
class MVNLayerImpl : public MVNLayer
{
double eps;
bool acrossChannels, normalizeVariance;
public:
MVNLayer(LayerParams &params);
MVNLayerImpl(bool normVariance_ = true, bool acrossChannels_ = false, double eps_ = 1e-9);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
......
......@@ -42,73 +42,33 @@
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "reshape_layer.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace cv
{
namespace dnn
{
ReshapeLayer::ReshapeLayer(LayerParams &params) : Layer(params)
ReshapeLayerImpl::ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_)
{
inAxis = params.get<int>("axis", 0);
inNumAxes = params.get<int>("num_axes", -1);
CV_Assert(inNumAxes >= -1);
autoAxisIdx = -1;
if (!params.has("dim"))
{
shapeDesc = BlobShape::all(0);
return;
}
DictValue paramShape = params.get("dim");
shapeDesc = BlobShape::all(paramShape.size());
for (int i = 0; i < paramShape.size(); i++)
{
int dim = paramShape.get<int>(i);
CV_Assert(dim >= -1);
if (dim == -1)
{
if (autoAxisIdx != -1)
CV_Error(Error::StsBadArg, "New shape contains multiple -1 dims");
autoAxisIdx = i;
}
shapeDesc[i] = dim;
}
newShapeDesc = newShape_;
newShapeRange = applyingRange_;
}
void ReshapeLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
void ReshapeLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
outputs.resize(inputs.size());
outShapes.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
Blob &inpBlob = *inputs[i];
Blob &outBlob = outputs[i];
BlobShape inpShape = inpBlob.shape();
int startAxis = (inAxis >= 0) ? inAxis : inpShape.dims() + 1 + inAxis;
int endAxis = (inNumAxes == -1) ? inpShape.dims() : startAxis + inNumAxes;
CV_Assert(0 <= startAxis && startAxis <= inpShape.dims());
CV_Assert(0 <= endAxis && endAxis <= inpShape.dims());
int newDims = inpShape.dims() - (endAxis - startAxis) + shapeDesc.dims();
BlobShape outShape = BlobShape::all(newDims);
computeOutputShape(startAxis, endAxis, inpShape, outShape);
outShapes[i] = outShape;
outBlob.shareFrom(inpBlob);
outBlob.reshape(outShape);
outShapes[i] = computeShapeByReshapeMask(inputs[i]->shape(), newShapeDesc, newShapeRange);
outputs[i].shareFrom(*inputs[i]);
outputs[i].reshape(outShapes[i]);
}
}
void ReshapeLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
void ReshapeLayerImpl::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
for (size_t i = 0; i < outputs.size(); i++)
{
......@@ -117,61 +77,11 @@ void ReshapeLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &output
}
}
void ReshapeLayer::computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape)
Ptr<ReshapeLayer> ReshapeLayer::create(const BlobShape &newShape, Range applyingRange /*= Range::all()*/)
{
int idx = 0;
for (int i = 0; i < startAxis; i++)
outShape[idx++] = inpShape[i];
for (int i = 0; i < shapeDesc.dims(); i++)
{
if (shapeDesc[i] == 0)
{
int inpAxisIdx = startAxis + i;
if (inpAxisIdx < 0 || inpShape.dims() <= inpAxisIdx)
CV_Error(Error::StsOutOfRange, "copy dimension (which has zero size) is not presented into reshaped blob");
outShape[idx++] = inpShape[startAxis + i];
}
else
{
outShape[idx++] = (shapeDesc[i] > 0) ? shapeDesc[i] : 1;
}
}
for (int i = endAxis; i < inpShape.dims(); i++)
outShape[idx++] = inpShape[i];
if (autoAxisIdx >= 0)
{
size_t total = inpShape.total();
size_t curTotal = 1;
for (int i = 0; i < outShape.dims(); i++)
{
if (i != startAxis + autoAxisIdx)
curTotal *= outShape[i];
}
CV_DbgAssert(curTotal <= total && total % curTotal == 0);
outShape[startAxis + autoAxisIdx] = (int)(total / curTotal);
}
if (inpShape.total() != outShape.total())
{
CV_Error(Error::StsUnmatchedSizes, "Mismatch between input and output blob elements count");
}
return Ptr<ReshapeLayer>(new ReshapeLayerImpl(newShape, applyingRange));
}
Ptr<Layer> createFlattenLayer(LayerParams&)
{
LayerParams params;
int shapeDesc[] = {0, -1};
params.set("dim", DictValue::arrayInt(shapeDesc, 2));
return Ptr<Layer>(new ReshapeLayer(params));
}
}
}
......@@ -42,27 +42,23 @@
#ifndef __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_RESHAPE_LAYER_HPP__
#include "../precomp.hpp"
#include <opencv2/dnn/all_layers.hpp>
namespace cv
{
namespace dnn
{
class ReshapeLayer : public Layer
class ReshapeLayerImpl : public ReshapeLayer
{
std::vector<BlobShape> outShapes;
public:
ReshapeLayer(LayerParams &params);
ReshapeLayerImpl(const BlobShape &newShape_, Range applyingRange_);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
protected:
BlobShape shapeDesc;
std::vector<BlobShape> outShapes;
int inAxis, inNumAxes, autoAxisIdx;
void computeOutputShape(int startAxis, int endAxis, BlobShape &inpShape, BlobShape &outShape);
};
Ptr<Layer> createFlattenLayer(LayerParams&);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment