Commit 1ca1a125 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #1183 from dkurt:fix_allocation_issues

parents a4cc8014 d5ac902f
......@@ -49,18 +49,16 @@ class BlankLayerImpl : public BlankLayer
public:
BlankLayerImpl(const LayerParams&) {}
void allocate(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
outputs[i] = *inputs[i];
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
{
for (size_t i = 0; i < inputs.size(); i++)
outputs[i] = *inputs[i];
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) {}
};
Ptr<BlankLayer> BlankLayer::create(const LayerParams& params)
......
......@@ -37,9 +37,9 @@ public:
ElementWiseLayer(bool run_parallel_=false, const Func &f=Func()) : func(f), run_parallel(run_parallel_) {}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
......
......@@ -203,7 +203,13 @@ public:
CV_Assert(inputs.size() != 0);
Size in(inputs[0][3], inputs[0][2]), out;
if (padMode.empty()) {
if (globalPooling)
{
out.height = 1;
out.width = 1;
}
else if (padMode.empty())
{
//Yeah, something strange Caffe scheme-)
out.height = static_cast<int>(ceil(static_cast<float>(in.height + 2 * pad.height -
kernel.height) / stride.height)) + 1;
......
......@@ -149,6 +149,7 @@ public:
outputs.push_back(MatShape());
computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back());
}
internals = outputs;
return true;
}
......@@ -160,9 +161,16 @@ public:
Mat srcBlob = *inputs[0];
int dims = srcBlob.dims;
MatShape inputShape = shape(srcBlob), outShape = shape(outputs[0]);
bool channelsReduced = dims > (int)outShape.size() ||
(dims == 4 && inputShape[1] > outShape[1]);
performReordering = enableReordering && dims == 4 && channelsReduced;
// input.total() == output.total(). So if reordering is require,
// one of the sizes will be are not equal.
// Example where reordering is require: from 1x128x4x4 to 1x2048
// Example where reordering is NOT require: from 1x1024x1x1 to 1x1024.
bool reorderingRequire = false;
const int minDims = min(dims, (int)outShape.size());
for (int i = 0; !reorderingRequire && i < minDims; ++i)
reorderingRequire = inputShape[i] != outShape[i];
performReordering = enableReordering && reorderingRequire;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
......@@ -170,13 +178,11 @@ public:
for (size_t i = 0; i < inputs.size(); i++)
{
Mat srcBlob = *inputs[i];
MatShape inputShape = shape(srcBlob), outShape = shape(outputs[i]);
MatShape inputShape = shape(srcBlob);
if (performReordering)
{
Mat reordered_blob(inputShape, srcBlob.type());
float *dstData = reordered_blob.ptr<float>();
float *dstData = internals[i].ptr<float>();
const float *srcData = srcBlob.ptr<float>();
int num = inputShape[0], channels = inputShape[1], height = inputShape[2], width = inputShape[3];
......@@ -196,8 +202,7 @@ public:
}
}
}
outputs[i] = reordered_blob.reshape(1, outShape);
internals[i].copyTo(outputs[i]);
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment