Commit ffcb070d authored by Anna Petrovicheva's avatar Anna Petrovicheva

Rewrote Concat layer

parent 22a14ea7
...@@ -47,54 +47,72 @@ namespace cv ...@@ -47,54 +47,72 @@ namespace cv
{ {
namespace dnn namespace dnn
{ {
ConcatLayer::ConcatLayer(LayerParams &params) : Layer(params) ConcatLayer::ConcatLayer(LayerParams &params) : Layer(params)
{ {
axis = params.get<int>("axis", 1); axis = params.get<int>("axis", 1);
CV_Assert(axis >= 0); CV_Assert(axis >= 0);
} }
void ConcatLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs) void ConcatLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{ {
CV_Assert(inputs.size() > 0); CV_Assert(inputs.size() > 0);
int refType = inputs[0]->type(); int refType = inputs[0]->type();
BlobShape refShape = inputs[0]->shape(); BlobShape refShape = inputs[0]->shape();
CV_Assert(axis < refShape.dims()); CV_Assert(axis < refShape.dims());
int axisSum = 0; int axisSum = 0;
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{
BlobShape curShape = inputs[i]->shape();
CV_Assert(curShape.dims() == refShape.dims() && inputs[i]->type() == refType);
for (int axisId = 0; axisId < refShape.dims(); axisId++)
{ {
BlobShape curShape = inputs[i]->shape(); if (axisId != axis && refShape[axisId] != curShape[axisId])
CV_Error(Error::StsBadSize, "Inconsitent shape for ConcatLayer");
}
CV_Assert(curShape.dims() == refShape.dims() && inputs[i]->type() == refType); axisSum += curShape[axis];
for (int axisId = 0; axisId < refShape.dims(); axisId++) }
{
if (axisId != axis && refShape[axisId] != curShape[axisId])
CV_Error(Error::StsBadSize, "Inconsitent shape for ConcatLayer");
}
axisSum += curShape[axis]; refShape[axis] = axisSum;
} outputs.resize(1);
outputs[0].create(refShape);
}
refShape[axis] = axisSum; void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
outputs.resize(1); {
outputs[0].create(refShape); // In case when Blob shape used in allocation and inner matrix shape do not match, this layer did not work in previous implementation. This implementation is just a fix and needs to be rewritten more optimally.
if (inputs.size() == 1)
{
return;
} }
float* outputData = outputs[0].ptrf();
void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs) size_t numConcats = inputs[0]->total(0, axis);
size_t outputStride = outputs[0].total(axis);
size_t offset = 0;
for (int i = 0; i < inputs.size(); ++i)
{ {
// In case when Blob shape used in allocation and inner matrix shape do not match, this layer did not work in previous implementation. This implementation is just a fix and needs to be rewritten. size_t inputSliceSize = inputs[i]->total(axis);
const float* inputData = inputs[i]->ptrf();
size_t usedSize = 0; for (size_t n = 0; n < numConcats; ++n)
for (size_t i = 0; i < inputs.size(); i++)
{ {
Mat inMat(1, inputs[i]->total(), CV_32F, inputs[i]->ptrf()); const float* src = inputData + n * inputSliceSize;
Mat outMat(1, inputs[i]->total(), CV_32F, outputs[0].ptrf() + usedSize); float* dst = outputData + n * outputStride + offset;
// memcpy(dst, src, inputSliceSize);
inMat.copyTo(outMat); for(size_t k = 0; k < inputSliceSize; k++)
usedSize += inputs[i]->total(); {
dst[k] = src[k];
}
} }
offset += inputSliceSize;
} }
} }
} }
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment