Commit 551e17ea authored by Anna Petrovicheva's avatar Anna Petrovicheva

Fixes

parent 71248c99
......@@ -50,28 +50,49 @@ namespace cv
namespace dnn
{
void FlattenLayer::checkParameter(const LayerParams &params, const string &parameterName)
const std::string FlattenLayer::_layerName = std::string("Flatten");
DictValue FlattenLayer::getParameterDict(const LayerParams &params,
const std::string &parameterName)
{
if (!params.has(parameterName))
{
CV_Error(Error::StsBadArg, "Flatten layer parameter does not contain " + parameterName + " index.");
std::string message = _layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " index.";
CV_Error(Error::StsBadArg, message);
}
DictValue parameter = params.get(parameterName);
if(parameter.size() != 1)
{
std::string message = parameterName;
message += " field in ";
message += _layerName;
message += " layer parameter is required";
CV_Error(Error::StsBadArg, message);
}
return parameter;
}
FlattenLayer::FlattenLayer(LayerParams &params) : Layer(params)
template<typename T>
T FlattenLayer::getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx)
{
checkParameter(params, "start_axis");
checkParameter(params, "end_axis");
return getParameterDict(params, parameterName).get<T>(idx);
}
_startAxis = params.start_axis;
FlattenLayer::FlattenLayer(LayerParams &params) : Layer(params)
{
_startAxis = getParameter<size_t>(params, "start_axis");
_endAxis = getParameter<size_t>(params, "end_axis");
if(params.end_axis > 0)
if(_endAxis <= 0)
{
_endAxis = params.end_axis;
}
else
{
_endAxis = _numAxes + params.end_axis;
_endAxis += _numAxes;
}
}
......@@ -82,7 +103,7 @@ void FlattenLayer::checkInputs(const std::vector<Blob*> &inputs)
{
for (size_t j = 0; j < _numAxes; j++)
{
CV_Assert(inputs[i]->shape[j] == inputs[0]->shape[j]);
CV_Assert(inputs[i]->shape()[j] == inputs[0]->shape()[j]);
}
}
}
......@@ -125,10 +146,7 @@ void FlattenLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &output
{
for (size_t j = 0; j < inputs.size(); j++)
{
float *srcData = inputs[j]->ptrf();
float *dstData = outputs[j]->ptrf();
dstData = srcData;
outputs[j].matRef() = inputs[j]->matRef();
}
}
}
......
......@@ -53,13 +53,21 @@ class FlattenLayer : public Layer
size_t _endAxis;
static const size_t _numAxes = 4;
static const std::string _layerName;
public:
FlattenLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void checkParameter(const LayerParams &params, const string &parameterName);
void checkInputs(const std::vector<Blob*> &inputs);
template<typename T>
T getParameter(const LayerParams &params, const std::string &parameterName,
const size_t &idx = 0);
DictValue getParameterDict(const LayerParams &params,
const std::string &parameterName);
};
}
}
......
......@@ -52,23 +52,47 @@ namespace cv
namespace dnn
{
void NormalizeBBoxLayer::checkParameter(const LayerParams &params, const string &parameterName)
const std::string NormalizeBBoxLayer::_layerName = std::string("NormalizeBBox");
DictValue NormalizeBBoxLayer::getParameterDict(const LayerParams &params,
const std::string &parameterName)
{
if (!params.has(parameterName))
{
CV_Error(Error::StsBadArg, "NormalizeBBox layer parameter does not contain " + parameterName + " index.");
std::string message = _layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " index.";
CV_Error(Error::StsBadArg, message);
}
DictValue parameter = params.get(parameterName);
if(parameter.size() != 1)
{
std::string message = parameterName;
message += " field in ";
message += _layerName;
message += " layer parameter is required";
CV_Error(Error::StsBadArg, message);
}
return parameter;
}
NormalizeBBoxLayer::NormalizeBBoxLayer(LayerParams &params) : Layer(params)
template<typename T>
T NormalizeBBoxLayer::getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx)
{
checkParameter(params, "eps");
checkParameter(params, "across_spatial");
checkParameter(params, "channel_shared");
return getParameterDict(params, parameterName).get<T>(idx);
}
_eps = params.eps();
_across_spatial = params.across_spatial();
_channel_shared = params.channel_shared();
NormalizeBBoxLayer::NormalizeBBoxLayer(LayerParams &params) : Layer(params)
{
_eps = getParameter<float>(params, "eps");
_across_spatial = getParameter<bool>(params, "across_spatial");
_channel_shared = getParameter<bool>(params, "channel_shared");
}
void NormalizeBBoxLayer::checkInputs(const std::vector<Blob*> &inputs)
......@@ -78,7 +102,7 @@ void NormalizeBBoxLayer::checkInputs(const std::vector<Blob*> &inputs)
{
for (size_t j = 0; j < _numAxes; j++)
{
CV_Assert(inputs[i]->shape[j] == inputs[0]->shape[j]);
CV_Assert(inputs[i]->shape()[j] == inputs[0]->shape()[j]);
}
}
CV_Assert(inputs[0]->dims() > 2);
......@@ -89,9 +113,9 @@ void NormalizeBBoxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<
checkInputs(inputs);
_num = inputs[0]->num();
_channels = inputs[0]->shape[1];
_rows = inputs[0]->shape[2];
_cols = inputs[0]->shape[3];
_channels = inputs[0]->shape()[1];
_rows = inputs[0]->shape()[2];
_cols = inputs[0]->shape()[3];
_channelSize = _rows * _cols;
_imageSize = _channelSize * _channels;
......@@ -110,7 +134,7 @@ void NormalizeBBoxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<
}
// add eps to avoid overflow
_norm.fill(Scalar(eps));
_norm.fill(Scalar(_eps));
_sumChannelMultiplier = Blob(BlobShape(1, _channels, 1, 1));
_sumChannelMultiplier.fill(Scalar(1.0));
......@@ -135,22 +159,20 @@ void NormalizeBBoxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<
void NormalizeBBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
Mat zeroBuffer = Mat(_buffer.matRef().size, _buffer.matRef().type(), Scalar(0));
Mat zeroBuffer = Mat(_buffer.matRef().rows, _buffer.matRef().cols,
_buffer.matRef().type(), Scalar(0));
Mat sumAbs;
for (size_t j = 0; j < inputs.size(); j++)
{
for (int n = 0; n < _num; ++n)
for (size_t n = 0; n < _num; ++n)
{
Blob src(BlobShape(1, _channels, _rows, _cols));
src.ptrf() = inputs[j]->ptrf(n);
Blob dst(BlobShape(1, _channels, _rows, _cols));
dst.ptrf() = outputs[j]->ptrf(n);
Mat src = inputs[j]->getPlanes(n);
Mat dst = outputs[j].getPlanes(n);
Mat normCurrent = _norm.ptrf(n);
Mat normCurrent = _norm.getPlanes(n);
cv::sqrt(src.matRefConst(), _buffer.matRef());
cv::sqrt(src, _buffer.matRef());
if (_across_spatial)
{
......@@ -160,7 +182,7 @@ void NormalizeBBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &
pow(sumAbs, 0.5f, normCurrent);
dst.matRef() = src.matRef() / normCurrent;
dst = src / normCurrent;
}
else
{
......@@ -172,18 +194,18 @@ void NormalizeBBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &
// scale the layer
gemmCPU(_sumChannelMultiplier.matRef(), normCurrent, 1, _buffer.matRef(), 0);
dst.matRef() = src.matRef() / _buffer.matRef().at<float>(0, 0);
dst = src / _buffer.matRef().at<float>(0, 0);
}
// scale the output
if (_channel_shared)
{
dst.matRef() *= _scale.matRef();
dst *= _scale.matRef();
}
else
{
gemmCPU(_scale.matRef(), _sumSpatialMultiplier.matRef(), 1, _buffer.matRef(), 0);
dst.matRef() *= _buffer.matRef();
dst *= _buffer.matRef();
}
}
}
......
......@@ -59,8 +59,8 @@ class NormalizeBBoxLayer : public Layer
Blob _scale;
float _eps;
bool _across_spatial;
double _eps;
bool _channel_shared;
size_t _num;
......@@ -72,13 +72,21 @@ class NormalizeBBoxLayer : public Layer
size_t _imageSize;
static const size_t _numAxes = 4;
static const std::string _layerName;
public:
NormalizeBBoxLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void checkParameter(const LayerParams &params, const string &parameterName);
void checkInputs(const std::vector<Blob*> &inputs);
template<typename T>
T getParameter(const LayerParams &params, const std::string &parameterName,
const size_t &idx = 0);
DictValue getParameterDict(const LayerParams &params,
const std::string &parameterName);
};
}
}
......
......@@ -49,63 +49,71 @@ namespace cv
{
namespace dnn
{
PermuteLayer::PermuteLayer(LayerParams &params) : Layer(params)
void PermuteLayer::checkCurrentOrder(int currentOrder)
{
if (!params.has("order"))
if(currentOrder < 0 || currentOrder > 3)
{
_needsPermute = false;
CV_Error(
Error::StsBadArg,
"Orders of dimensions in Permute layer parameter"
"must be in [0...3] interval");
}
else
{
for (size_t i = 0; i < _numAxes; i++)
{
size_t current_order = params.order(i);
if(std::find(_order.begin(), _order.end(), current_order) != _order.end())
{
CV_Error(Error::StsBadArg, "Permute layer parameter contains duplicated orders.");
}
_order.push_back(current_order);
}
if(std::find(_order.begin(), _order.end(), currentOrder) != _order.end())
{
CV_Error(Error::StsBadArg,
"Permute layer parameter contains duplicated orders.");
}
}
_needsPermute = false;
for (int i = 0; i < _numAxes; ++i)
void PermuteLayer::checkNeedForPermutation()
{
_needsPermute = false;
for (size_t i = 0; i < _numAxes; ++i)
{
if (_order[i] != i)
{
if (_order[i] != i)
{
_needsPermute = true;
break;
}
_needsPermute = true;
break;
}
}
}
void PermuteLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
PermuteLayer::PermuteLayer(LayerParams &params) : Layer(params)
{
CV_Assert(inputs.size() > 0);
_oldDimensionSize = inputs[0]->shape();
for (size_t i = 0; i < _numAxes; i++)
if (!params.has("order"))
{
_newDimensionSize[i] = _oldDimensionSize[order[i]];
_needsPermute = false;
return;
}
outputs.resize(inputs.size());
DictValue paramOrder = params.get("order");
if(paramOrder.size() != 4)
{
CV_Error(
Error::StsBadArg,
"4 orders of dimensions in Permute layer parameter is required");
}
for (size_t i = 0; i < inputs.size(); i++)
for (size_t i = 0; i < _numAxes; i++)
{
CV_Assert(inputs[i]->rows() == _oldDimensionSize[2] && inputs[i]->cols() == _oldDimensionSize[3]);
outputs[i].create(BlobShape(_newDimensionSize));
int currentOrder = paramOrder.get<int>(i);
checkCurrentOrder(currentOrder);
_order.push_back(currentOrder);
}
checkNeedForPermutation();
}
void PermuteLayer::computeStrides()
{
_oldStride.resize(_numAxes);
_newStride.resize(_numAxes);
_oldStride[3] = 1;
_newStride[3] = 1;
for(size_t i = 2; i >= 0; i--)
for(int i = 2; i >= 0; i--)
{
_oldStride[i] = _oldStride[i - 1] * _oldDimensionSize[i - 1];
_newStride[i] = _newStride[i - 1] * _newDimensionSize[i - 1];
......@@ -114,28 +122,53 @@ void PermuteLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob>
_count = _oldStride[0] * _oldDimensionSize[0];
}
void PermuteLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
void PermuteLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
if(!_needsPermute)
{
float *srcData = inputs[j]->ptrf();
float *dstData = outputs[j]->ptrf();
return;
}
CV_Assert(inputs.size() > 0);
outputs.resize(inputs.size());
dstData = srcData;
_oldDimensionSize = inputs[0]->shape();
for (size_t i = 0; i < _numAxes; i++)
{
_newDimensionSize[i] = _oldDimensionSize[_order[i]];
}
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->rows() == _oldDimensionSize[2] && inputs[i]->cols() == _oldDimensionSize[3]);
outputs[i].create(BlobShape(_newDimensionSize));
}
computeStrides();
}
void PermuteLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
if(!_needsPermute)
{
for (size_t j = 0; j < inputs.size(); j++)
{
outputs[j].matRef() = inputs[j]->matRef();
}
return;
}
for (size_t j = 0; j < inputs.size(); j++)
{
float *srcData = inputs[j]->ptrf();
float *dstData = outputs[j]->ptrf();
float *dstData = outputs[j].ptrf();
for (int i = 0; i < _count; ++i)
for (size_t i = 0; i < _count; ++i)
{
int oldPosition = 0;
int newPosition = i;
for (int j = 0; j < _numAxes; ++j)
for (size_t j = 0; j < _numAxes; ++j)
{
oldPosition += (newPosition / _newStride[j]) * _oldStride[_order[j]];
newPosition %= _newStride[j];
......
......@@ -61,6 +61,10 @@ class PermuteLayer : public Layer
static const size_t _numAxes = 4;
void checkCurrentOrder(int currentOrder);
void checkNeedForPermutation();
void computeStrides();
public:
PermuteLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
......
......@@ -51,40 +51,58 @@ namespace cv
namespace dnn
{
void PriorBoxLayer::checkParameter(const LayerParams &params, const string &parameterName)
const std::string PriorBoxLayer::_layerName = std::string("PriorBox");
DictValue PriorBoxLayer::getParameterDict(const LayerParams &params,
const std::string &parameterName)
{
if (!params.has(parameterName))
{
CV_Error(Error::StsBadArg, "PriorBox layer parameter does not contain " + parameterName + " index.");
std::string message = _layerName;
message += " layer parameter does not contain ";
message += parameterName;
message += " index.";
CV_Error(Error::StsBadArg, message);
}
}
PriorBoxLayer::PriorBoxLayer(LayerParams &params) : Layer(params)
{
checkParameter(params, "min_size");
_minSize = params.min_size();
CV_Assert(_minSize > 0);
DictValue parameter = params.get(parameterName);
if(parameter.size() != 1)
{
std::string message = parameterName;
message += " field in ";
message += _layerName;
message += " layer parameter is required";
CV_Error(Error::StsBadArg, message);
}
_aspectRatios.clear();
_aspectRatios.push_back(1.);
return parameter;
}
_flip = params.flip();
template<typename T>
T PriorBoxLayer::getParameter(const LayerParams &params,
const std::string &parameterName,
const size_t &idx)
{
return getParameterDict(params, parameterName).get<T>(idx);
}
for (int i = 0; i < params.aspect_ratio_size(); ++i)
void PriorBoxLayer::getAspectRatios(const LayerParams &params)
{
DictValue aspectRatioParameter = getParameterDict(params, "aspect_ratio");
for (int i = 0; i < aspectRatioParameter.size(); ++i)
{
float aspectRatio = params.aspect_ratio(i);
bool already_exist = false;
float aspectRatio = aspectRatioParameter.get<float>(i);
bool alreadyExists = false;
for (int j = 0; j < _aspectRatios.size(); ++j)
for (size_t j = 0; j < _aspectRatios.size(); ++j)
{
if (fabs(aspectRatio - _aspectRatios[j]) < 1e-6)
{
already_exist = true;
alreadyExists = true;
break;
}
}
if (!already_exist)
if (!alreadyExists)
{
_aspectRatios.push_back(aspectRatio);
if (_flip)
......@@ -93,21 +111,12 @@ PriorBoxLayer::PriorBoxLayer(LayerParams &params) : Layer(params)
}
}
}
}
_numPriors = _aspectRatios.size();
_maxSize = -1;
if (params.has(max_size))
{
_maxSize = params.max_size();
CV_Assert(_maxSize > _minSize);
_numPriors += 1;
}
_clip = params.clip();
int varianceSize = params.variance_size();
void PriorBoxLayer::getVariance(const LayerParams &params)
{
DictValue varianceParameter = getParameterDict(params, "variance");
int varianceSize = varianceParameter.size();
if (varianceSize > 1)
{
// Must and only provide 4 variance.
......@@ -115,7 +124,7 @@ PriorBoxLayer::PriorBoxLayer(LayerParams &params) : Layer(params)
for (int i = 0; i < varianceSize; ++i)
{
float variance = params.variance(i);
float variance = varianceParameter.get<float>(i);
CV_Assert(variance > 0);
_variance.push_back(variance);
}
......@@ -124,7 +133,7 @@ PriorBoxLayer::PriorBoxLayer(LayerParams &params) : Layer(params)
{
if (varianceSize == 1)
{
float variance = params.variance(0);
float variance = varianceParameter.get<float>(0);
CV_Assert(variance > 0);
_variance.push_back(variance);
}
......@@ -136,15 +145,41 @@ PriorBoxLayer::PriorBoxLayer(LayerParams &params) : Layer(params)
}
}
PriorBoxLayer::PriorBoxLayer(LayerParams &params) : Layer(params)
{
_minSize = getParameter<size_t>(params, "min_size");
CV_Assert(_minSize > 0);
_flip = getParameter<bool>(params, "flip");
_clip = getParameter<bool>(params, "clip");
_aspectRatios.clear();
_aspectRatios.push_back(1.);
getAspectRatios(params);
getVariance(params);
_numPriors = _aspectRatios.size();
_maxSize = -1;
if (params.has("max_size"))
{
_maxSize = params.get("max_size").get<float>(0);
CV_Assert(_maxSize > _minSize);
_numPriors += 1;
}
}
void PriorBoxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{
CV_Assert(inputs.size() == 2);
_layerWidth = inputs[0]->width();
_layerHeight = inputs[0]->height();
_layerWidth = inputs[0]->cols();
_layerHeight = inputs[0]->rows();
_imageWidth = inputs[1]->width();
_imageHeight = inputs[1]->height();
_imageWidth = inputs[1]->cols();
_imageHeight = inputs[1]->rows();
_stepX = static_cast<float>(_imageWidth) / _layerWidth;
_stepY = static_cast<float>(_imageHeight) / _layerHeight;
......@@ -171,9 +206,9 @@ void PriorBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outpu
_boxWidth = _boxHeight = _minSize;
int idx = 0;
for (int h = 0; h < _layerHeight; ++h)
for (size_t h = 0; h < _layerHeight; ++h)
{
for (int w = 0; w < _layerWidth; ++w)
for (size_t w = 0; w < _layerWidth; ++w)
{
float center_x = (w + 0.5) * _stepX;
float center_y = (h + 0.5) * _stepY;
......@@ -201,7 +236,7 @@ void PriorBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outpu
}
// rest of priors
for (int r = 0; r < _aspectRatios.size(); ++r)
for (size_t r = 0; r < _aspectRatios.size(); ++r)
{
float ar = _aspectRatios[r];
if (fabs(ar - 1.) < 1e-6)
......@@ -224,7 +259,7 @@ void PriorBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outpu
// clip the prior's coordidate such that it is within [0, 1]
if (_clip)
{
for (int d = 0; d < _outChannelSize; ++d)
for (size_t d = 0; d < _outChannelSize; ++d)
{
outputPtr[d] = std::min<float>(std::max<float>(outputPtr[d], 0.), 1.);
}
......@@ -233,17 +268,17 @@ void PriorBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outpu
outputPtr = outputs[0].ptrf(0, 1);
if(_variance.size() == 1)
{
Mat secondChannel(outputs[0].height(), outputs[0].width(), CV_32F, outputPtr);
Mat secondChannel(outputs[0].rows(), outputs[0].cols(), CV_32F, outputPtr);
secondChannel.setTo(Scalar(_variance[0]));
}
else
{
int count = 0;
for (int h = 0; h < _layerHeight; ++h)
for (size_t h = 0; h < _layerHeight; ++h)
{
for (int w = 0; w < _layerWidth; ++w)
for (size_t w = 0; w < _layerWidth; ++w)
{
for (int i = 0; i < _numPriors; ++i)
for (size_t i = 0; i < _numPriors; ++i)
{
for (int j = 0; j < 4; ++j)
{
......
......@@ -75,12 +75,22 @@ class PriorBoxLayer : public Layer
size_t _numPriors;
static const size_t _numAxes = 4;
static const std::string _layerName;
public:
PriorBoxLayer(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void checkParameter(const LayerParams &params, const std::string &parameterName);
template<typename T>
T getParameter(const LayerParams &params, const std::string &parameterName,
const size_t &idx = 0);
DictValue getParameterDict(const LayerParams &params,
const std::string &parameterName);
void getAspectRatios(const LayerParams &params);
void getVariance(const LayerParams &params);
};
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment