Commit 0674b6f3 authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Rewriting FlattenLayer and small fixes

parent 8e68d837
......@@ -59,11 +59,12 @@ namespace dnn
struct DictValue
{
DictValue(const DictValue &r);
DictValue(int p = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overlaod
DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
template<typename TypeIter>
static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array
......
......@@ -204,11 +204,6 @@ Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams &params)
return Ptr<Layer>(ReshapeLayer::create(newShape, applyingRange));
}
Ptr<Layer> createFlattenLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(ReshapeLayer::create(Shape(0, -1)));
}
template<>
Ptr<Layer> createLayerFromCaffe<ConcatLayer>(LayerParams& params)
{
......
......@@ -367,9 +367,9 @@ struct Net::Impl
//allocate layer
ld.outputBlobs.resize(std::max((size_t)1, ld.requiredOutputs.size())); //layer produce at least one output blob
Ptr<Layer> layerPtr = ld.getLayerInstance();
try
{
Ptr<Layer> layerPtr = ld.getLayerInstance();
layerPtr->allocate(ld.inputBlobs, ld.outputBlobs);
}
catch (const cv::Exception &err)
......
......@@ -77,7 +77,7 @@ void initModule()
REG_RUNTIME_LAYER_FUNC(Split, createLayerFromCaffe<SplitLayer>);
REG_RUNTIME_LAYER_FUNC(Concat, createLayerFromCaffe<ConcatLayer>);
REG_RUNTIME_LAYER_FUNC(Reshape, createLayerFromCaffe<ReshapeLayer>);
REG_RUNTIME_LAYER_FUNC(Flatten, createFlattenLayerFromCaffe);
REG_RUNTIME_LAYER_CLASS(Flatten, FlattenLayer);
REG_RUNTIME_LAYER_FUNC(Convolution, createLayerFromCaffe<ConvolutionLayer>);
REG_RUNTIME_LAYER_FUNC(Deconvolution, createLayerFromCaffe<DeconvolutionLayer>);
......@@ -99,7 +99,6 @@ void initModule()
REG_RUNTIME_LAYER_CLASS(Eltwise, EltwiseLayer)
REG_RUNTIME_LAYER_CLASS(Permute, PermuteLayer)
//REG_RUNTIME_LAYER_CLASS(Flatten, FlattenLayer)
REG_RUNTIME_LAYER_CLASS(PriorBox, PriorBoxLayer)
REG_RUNTIME_LAYER_CLASS(DetectionOutput, DetectionOutputLayer)
REG_RUNTIME_LAYER_CLASS(NormalizeBBox, NormalizeBBoxLayer)
......
......@@ -136,7 +136,7 @@ template<typename XMat>
void ConvolutionLayerImpl::forward_(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
XMat weightsMat = reshaped(blobs[0].getRefConst<XMat>(), Shape(outCn, ksize));
XMat biasesMat = reshaped(blobs[1].getRefConst<XMat>(), Shape(outCn, 1));
XMat biasesMat = (bias) ? reshaped(blobs[1].getRefConst<XMat>(), Shape(outCn, 1)) : XMat();
for (size_t ii = 0; ii < outputs.size(); ii++)
{
......
......@@ -47,74 +47,76 @@ namespace cv
{
namespace dnn
{
CropLayer::CropLayer(LayerParams &params) : Layer(params)
{
start_axis = params.get<int>("axis");
if (4 <= start_axis)
CV_Error(Error::StsBadArg, "crop axis bigger than input dim");
DictValue paramOffset = params.get("offset");
CropLayer::CropLayer(LayerParams &params) : Layer(params)
{
start_axis = params.get<int>("axis", 2);
if (4 <= start_axis)
CV_Error(Error::StsBadArg, "crop axis bigger than input dim");
DictValue paramOffset = params.get("offset");
offset.resize(4, 0);
if (1 < paramOffset.size())
offset.resize(4, 0);
if (1 < paramOffset.size())
{
if (4 - start_axis != paramOffset.size())
CV_Error(Error::StsBadArg, "number of offset values specified must be equal to the number of dimensions following axis.");
for (size_t i = start_axis; i < offset.size(); i++)
{
if (4 - start_axis != paramOffset.size())
CV_Error(Error::StsBadArg, "number of offset values specified must be equal to the number of dimensions following axis.");
for (size_t i = start_axis; i < offset.size(); i++)
{
offset[i] = paramOffset.get<int>(i);
}
offset[i] = paramOffset.get<int>(i);
}
else
}
else
{
const int offset_val = paramOffset.get<int>(0);
for (size_t i = start_axis; i < offset.size(); i++)
{
const int offset_val = paramOffset.get<int>(0);
for (size_t i = start_axis; i < offset.size(); i++)
{
offset[i] = offset_val;
}
offset[i] = offset_val;
}
}
}
void CropLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
CV_Assert(2 == inputs.size());
const Blob &inpBlob = *inputs[0];
CV_Assert(inpBlob.dims() == 4 && inpBlob.type() == CV_32F);
void CropLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
CV_Assert(2 == inputs.size());
const Blob &inpSzBlob = *inputs[1];
const Blob &inpBlob = *inputs[0];
CV_Assert(inpBlob.dims() == 4 && inpBlob.type() == CV_32F);
outSizes.resize(4, 0);
for (int i = 0; i < 4; i++)
{
if (i < start_axis)
outSizes[i] = inpBlob.size(i);
else
outSizes[i] = inpSzBlob.size(i);
if (offset[i] + outSizes[i] > inpBlob.size(i))
CV_Error(Error::StsBadArg, "invalid crop parameters");
}
const Blob &inpSzBlob = *inputs[1];
outputs.resize(1);
outputs[0].create(BlobShape(outSizes));
outSizes.resize(4, 0);
for (int i = 0; i < 4; i++)
{
if (i < start_axis)
outSizes[i] = inpBlob.size(i);
else
outSizes[i] = inpSzBlob.size(i);
if (offset[i] + outSizes[i] > inpBlob.size(i))
CV_Error(Error::StsBadArg, "invalid crop parameters");
}
void CropLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
outputs.resize(1);
outputs[0].create(BlobShape(outSizes));
}
void CropLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
Blob input = *inputs[0];
Blob output = outputs[0];
for (int num = 0; num < outSizes[0]; ++num)
{
Blob input = *inputs[0];
Blob output = outputs[0];
for (int num = 0; num < outSizes[0]; ++num)
for (int ch = 0; ch < outSizes[1]; ++ch)
{
for (int ch = 0; ch < outSizes[1]; ++ch)
for (int row = 0; row < outSizes[2]; ++row)
{
for (int row = 0; row < outSizes[2]; ++row)
{
float *srcData = input.ptrf(num + offset[0], ch + offset[1], row + offset[2]);
float *dstData = output.ptrf(num, ch, row);
memcpy(dstData, srcData + offset[3], sizeof(float) * outSizes[3]);
}
float *srcData = input.ptrf(num + offset[0], ch + offset[1], row + offset[2]);
float *dstData = output.ptrf(num, ch, row);
memcpy(dstData, srcData + offset[3], sizeof(float) * outSizes[3]);
}
}
}
}
}
}
......@@ -50,52 +50,10 @@ namespace cv
namespace dnn
{
const std::string FlattenLayer::_layerName = std::string("Flatten");
bool FlattenLayer::getParameterDict(const LayerParams &params,
const std::string &parameterName,
DictValue& result)
{
if (!params.has(parameterName))
{
return false;
}
result = params.get(parameterName);
return true;
}
template<typename T>
T FlattenLayer::getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx,
const bool required,
const T& defaultValue)
{
DictValue dictValue;
bool success = getParameterDict(params, parameterName, dictValue);
if(!success)
{
if(required)
{
std::string message = _layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " parameter.";
CV_Error(Error::StsBadArg, message);
}
else
{
return defaultValue;
}
}
return dictValue.get<T>(idx);
}
FlattenLayer::FlattenLayer(LayerParams &params) : Layer(params)
{
_startAxis = getParameter<int>(params, "axis");
_endAxis = getParameter<int>(params, "end_axis", 0, false, -1);
_startAxis = params.get<int>("axis", 1);
_endAxis = params.get<int>("end_axis", -1);
}
void FlattenLayer::checkInputs(const std::vector<Blob*> &inputs)
......@@ -114,35 +72,36 @@ void FlattenLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob>
{
checkInputs(inputs);
_numAxes = inputs[0]->shape().dims();
if(_endAxis <= 0)
{
_endAxis += _numAxes;
}
_numAxes = inputs[0]->dims();
_endAxis = inputs[0]->canonicalAxis(_endAxis);
CV_Assert(_startAxis >= 0);
CV_Assert(_endAxis >= _startAxis && _endAxis < (int)_numAxes);
size_t flattenedDimensionSize = 1;
for (int i = _startAxis; i <= _endAxis; i++)
{
flattenedDimensionSize *= inputs[0]->shape()[i];
flattenedDimensionSize *= inputs[0]->size(i);
}
std::vector<int> outputShape;
std::vector<int> outputShapeVec;
for (int i = 0; i < _startAxis; i++)
{
outputShape.push_back(inputs[0]->shape()[i]);
outputShapeVec.push_back(inputs[0]->size(i));
}
outputShape.push_back(flattenedDimensionSize);
outputShapeVec.push_back(flattenedDimensionSize);
for (size_t i = _endAxis + 1; i < _numAxes; i++)
{
outputShape.push_back(inputs[0]->shape()[i]);
outputShapeVec.push_back(inputs[0]->size(i));
}
CV_Assert(outputShape.size() <= 4);
CV_Assert(outputShapeVec.size() <= 4);
resultShape = BlobShape(outputShapeVec);
for (size_t i = 0; i < inputs.size(); i++)
{
outputs[i].create(BlobShape(outputShape));
//in-place
outputs[i].shareFrom(*inputs[i]);
outputs[i].reshape(resultShape);
}
}
......@@ -150,7 +109,8 @@ void FlattenLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &output
{
for (size_t j = 0; j < inputs.size(); j++)
{
outputs[j].matRef() = inputs[j]->matRef();
outputs[j].shareFrom(*inputs[j]);
outputs[j].reshape(resultShape);
}
}
}
......
......@@ -51,9 +51,9 @@ class FlattenLayer : public Layer
{
int _startAxis;
int _endAxis;
size_t _numAxes;
static const std::string _layerName;
BlobShape resultShape;
public:
FlattenLayer(LayerParams &params);
......@@ -61,16 +61,6 @@ public:
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void checkInputs(const std::vector<Blob*> &inputs);
template<typename T>
T getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx = 0,
const bool required = true,
const T& defaultValue = T());
bool getParameterDict(const LayerParams &params,
const std::string &parameterName, DictValue &result);
};
}
}
......
......@@ -208,9 +208,8 @@ void LRNLayerImpl::spatialNormalization(Blob &src, Blob &dst)
template<>
void LRNLayerImpl::sqrBoxFilter_<Mat>(const Mat &src, Mat &dst)
{
Mat bufMat = buf.getRef<Mat>();
src.copyTo(bufMat);
cv::sqrBoxFilter(bufMat, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT);
Mat srcRawWrapper(src.rows, src.cols, src.type(), src.data, src.step[0]);
cv::sqrBoxFilter(srcRawWrapper, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT);
}
template<>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment