Commit dcf42525 authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Extended and generalized CropLayer implementation

Also, small fix inside inside Python types conversion was added
parent 8b4b7f6c
...@@ -20,7 +20,7 @@ if(NOT EXISTS ${${model}_dst}) ...@@ -20,7 +20,7 @@ if(NOT EXISTS ${${model}_dst})
else() else()
file(DOWNLOAD ${${model}_url} ${${model}_dst} SHOW_PROGRESS STATUS status_vec) file(DOWNLOAD ${${model}_url} ${${model}_dst} SHOW_PROGRESS STATUS status_vec)
endif() endif()
list(GET status_vec 0 status) list(GET status_vec 0 status)
list(GET status_vec 1 status_msg) list(GET status_vec 1 status_msg)
if(status EQUAL 0) if(status EQUAL 0)
......
...@@ -370,6 +370,9 @@ namespace dnn ...@@ -370,6 +370,9 @@ namespace dnn
class CV_EXPORTS_W CropLayer : public Layer class CV_EXPORTS_W CropLayer : public Layer
{ {
public: public:
CV_PROP int startAxis;
CV_PROP std::vector<int> offset;
static Ptr<CropLayer> create(int start_axis, const std::vector<int> &offset); static Ptr<CropLayer> create(int start_axis, const std::vector<int> &offset);
}; };
......
...@@ -70,7 +70,7 @@ bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name) ...@@ -70,7 +70,7 @@ bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
return true; //Current state will be used return true; //Current state will be used
else if (PyLong_Check(o)) else if (PyLong_Check(o))
{ {
dv = dnn::DictValue(PyLong_AsLong(o)); dv = dnn::DictValue((int64)PyLong_AsLongLong(o));
return true; return true;
} }
else if (PyFloat_Check(o)) else if (PyFloat_Check(o))
......
...@@ -10,7 +10,7 @@ def prepare_image(img): ...@@ -10,7 +10,7 @@ def prepare_image(img):
blob = np.moveaxis(img, 2, 0) blob = np.moveaxis(img, 2, 0)
blob = np.reshape(blob.astype(np.float32), (-1, 3, 224, 224)) blob = np.reshape(blob.astype(np.float32), (-1, 3, 224, 224))
return blob return blob
def timeit_forward(net): def timeit_forward(net):
print("OpenCL:", cv2.ocl.useOpenCL()) print("OpenCL:", cv2.ocl.useOpenCL())
print("Runtime:", timeit.timeit(lambda: net.forward(), number=10)) print("Runtime:", timeit.timeit(lambda: net.forward(), number=10))
...@@ -26,7 +26,7 @@ cv2.ocl.setUseOpenCL(True) #Disable OCL if you want ...@@ -26,7 +26,7 @@ cv2.ocl.setUseOpenCL(True) #Disable OCL if you want
net = dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel') net = dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel')
net.setBlob(".data", blob) net.setBlob(".data", blob)
net.forward() net.forward()
timeit_forward(net) #Uncomment to check performance #timeit_forward(net) #Uncomment to check performance
prob = net.getBlob("prob") prob = net.getBlob("prob")
print("Output:", prob.shape, prob.dtype) print("Output:", prob.shape, prob.dtype)
......
...@@ -232,30 +232,16 @@ Ptr<Layer> createLayerFromCaffe<PowerLayer>(LayerParams& params) ...@@ -232,30 +232,16 @@ Ptr<Layer> createLayerFromCaffe<PowerLayer>(LayerParams& params)
template<> //CropLayer specialization template<> //CropLayer specialization
Ptr<Layer> createLayerFromCaffe<CropLayer>(LayerParams& params) Ptr<Layer> createLayerFromCaffe<CropLayer>(LayerParams& params)
{ {
int start_axis = params.get<int>("axis"); int start_axis = params.get<int>("axis", 2);
if (4 <= start_axis) DictValue *paramOffset = params.ptr("offset");
CV_Error(Error::StsBadArg, "crop axis bigger than input dim");
DictValue paramOffset = params.get("offset"); std::vector<int> offset;
if (paramOffset)
std::vector<int> offset(4, 0);
if (1 < paramOffset.size())
{ {
if (4 - start_axis != paramOffset.size()) for (int i = 0; i < paramOffset->size(); i++)
CV_Error(Error::StsBadArg, "number of offset values specified must be equal to the number of dimensions following axis."); offset.push_back(paramOffset->get<int>(i));
for (size_t i = start_axis; i < offset.size(); i++)
{
offset[i] = paramOffset.get<int>(i);
}
}
else
{
const int offset_val = paramOffset.get<int>(0);
for (size_t i = start_axis; i < offset.size(); i++)
{
offset[i] = offset_val;
}
} }
return Ptr<Layer>(CropLayer::create(start_axis, offset)); return Ptr<Layer>(CropLayer::create(start_axis, offset));
} }
......
...@@ -47,57 +47,82 @@ namespace cv ...@@ -47,57 +47,82 @@ namespace cv
{ {
namespace dnn namespace dnn
{ {
CropLayerImpl::CropLayerImpl(int start_axis_, const std::vector<int> &offset_)
CropLayerImpl::CropLayerImpl(int start_axis_, const std::vector<int> &offset_)
{
startAxis = start_axis_;
offset = offset_;
}
void CropLayerImpl::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
CV_Assert(2 == inputs.size());
const Blob &inpBlob = *inputs[0];
const Blob &inpSzBlob = *inputs[1];
int start_axis = inpBlob.canonicalAxis(startAxis);
int dims = inpBlob.dims();
std::vector<int> offset_final(dims, 0);
if (offset.size() == 1)
{ {
start_axis = start_axis_; for (int i = start_axis; i < dims; i++)
offset = offset_; offset_final[i] = offset[0];
} }
else if (offset.size() > 1)
void CropLayerImpl::allocate(const std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{ {
CV_Assert(2 == inputs.size()); if ((int)offset.size() != dims - start_axis)
CV_Error(Error::StsBadArg, "number of offset values specified must be equal to the number of dimensions following axis.");
const Blob &inpBlob = *inputs[0]; for (int i = start_axis; i < dims; i++)
CV_Assert(inpBlob.dims() == 4 && inpBlob.type() == CV_32F); offset_final[i] = offset[i - start_axis];
}
const Blob &inpSzBlob = *inputs[1]; BlobShape dstShape = inpBlob.shape();
crop_ranges.resize(dims, Range::all());
for (int i = start_axis; i < dims; i++)
{
dstShape[i] = inpSzBlob.size(i);
outSizes.resize(4, 0); if (!offset.empty()) //normal case
for (int i = 0; i < 4; i++)
{ {
if (i < start_axis) if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size(i) > inpBlob.size(i))
outSizes[i] = inpBlob.size(i);
else
outSizes[i] = inpSzBlob.size(i);
if (offset[i] + outSizes[i] > inpBlob.size(i))
CV_Error(Error::StsBadArg, "invalid crop parameters"); CV_Error(Error::StsBadArg, "invalid crop parameters");
}
outputs.resize(1);
outputs[0].create(BlobShape(outSizes));
}
void CropLayerImpl::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs) crop_ranges[i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size(i));
{ }
Blob input = *inputs[0]; else //detect offset automatically so that cropped image is center of original one
Blob output = outputs[0];
for (int num = 0; num < outSizes[0]; ++num)
{ {
for (int ch = 0; ch < outSizes[1]; ++ch) if (inpSzBlob.size(i) > inpBlob.size(i))
{ CV_Error(Error::StsBadArg, "invalid output blob size");
for (int row = 0; row < outSizes[2]; ++row)
{ int cur_crop = (inpBlob.size(i) - inpSzBlob.size(i)) / 2;
float *srcData = input.ptrf(num + offset[0], ch + offset[1], row + offset[2]); crop_ranges[i] = Range(cur_crop, cur_crop + inpSzBlob.size(i));
float *dstData = output.ptrf(num, ch, row);
memcpy(dstData, srcData + offset[3], sizeof(float) * outSizes[3]);
}
}
} }
} }
Ptr<CropLayer> CropLayer::create(int start_axis, const std::vector<int> &offset) outputs.resize(1);
{ outputs[0].create(dstShape);
return Ptr<CropLayer>(new CropLayerImpl(start_axis, offset)); }
}
void CropLayerImpl::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{
Blob &input = *inputs[0];
Blob &output = outputs[0];
#ifdef HAVE_OPENCL
if (input.getState() == Blob::HEAD_AT_UMAT)
input.umatRefConst()(&crop_ranges[0]).copyTo(output.umatRef());
else
#endif
input.matRefConst()(&crop_ranges[0]).copyTo(output.matRef());
}
Ptr<CropLayer> CropLayer::create(int start_axis, const std::vector<int> &offset)
{
return Ptr<CropLayer>(new CropLayerImpl(start_axis, offset));
}
} }
} }
...@@ -50,9 +50,7 @@ namespace dnn ...@@ -50,9 +50,7 @@ namespace dnn
{ {
class CropLayerImpl : public CropLayer class CropLayerImpl : public CropLayer
{ {
int start_axis; std::vector<Range> crop_ranges;
std::vector<int> offset;
std::vector<int> outSizes;
public: public:
CropLayerImpl(int start_axis, const std::vector<int> &offset); CropLayerImpl(int start_axis, const std::vector<int> &offset);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment