Commit e76b375a authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents 9f5f64e1 90b8a03a
...@@ -224,8 +224,10 @@ void calib::calibDataController::filterFrames() ...@@ -224,8 +224,10 @@ void calib::calibDataController::filterFrames()
cv::Mat newErrorsVec = cv::Mat((int)numberOfFrames - 1, 1, CV_64F); cv::Mat newErrorsVec = cv::Mat((int)numberOfFrames - 1, 1, CV_64F);
std::copy(mCalibData->perViewErrors.ptr<double>(0), std::copy(mCalibData->perViewErrors.ptr<double>(0),
mCalibData->perViewErrors.ptr<double>((int)worstElemIndex), newErrorsVec.ptr<double>(0)); mCalibData->perViewErrors.ptr<double>((int)worstElemIndex), newErrorsVec.ptr<double>(0));
std::copy(mCalibData->perViewErrors.ptr<double>((int)worstElemIndex + 1), mCalibData->perViewErrors.ptr<double>((int)numberOfFrames), if((int)worstElemIndex < (int)numberOfFrames-1) {
std::copy(mCalibData->perViewErrors.ptr<double>((int)worstElemIndex + 1), mCalibData->perViewErrors.ptr<double>((int)numberOfFrames),
newErrorsVec.ptr<double>((int)worstElemIndex)); newErrorsVec.ptr<double>((int)worstElemIndex));
}
mCalibData->perViewErrors = newErrorsVec; mCalibData->perViewErrors = newErrorsVec;
} }
} }
......
...@@ -269,6 +269,11 @@ static inline std::ostream& operator << (std::ostream& out, const MatSize& msize ...@@ -269,6 +269,11 @@ static inline std::ostream& operator << (std::ostream& out, const MatSize& msize
return out; return out;
} }
static inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
{
return s << "[" << r.start << " : " << r.end << ")";
}
} // cv } // cv
#ifdef _MSC_VER #ifdef _MSC_VER
......
...@@ -42,8 +42,8 @@ ...@@ -42,8 +42,8 @@
#ifndef OPENCV_DNN_DNN_SHAPE_UTILS_HPP #ifndef OPENCV_DNN_DNN_SHAPE_UTILS_HPP
#define OPENCV_DNN_DNN_SHAPE_UTILS_HPP #define OPENCV_DNN_DNN_SHAPE_UTILS_HPP
#include <opencv2/core.hpp> #include <opencv2/dnn/dnn.hpp>
#include <opencv2/core/types_c.h> #include <opencv2/core/types_c.h> // CV_MAX_DIM
#include <iostream> #include <iostream>
#include <ostream> #include <ostream>
#include <sstream> #include <sstream>
...@@ -52,12 +52,6 @@ namespace cv { ...@@ -52,12 +52,6 @@ namespace cv {
namespace dnn { namespace dnn {
CV__DNN_EXPERIMENTAL_NS_BEGIN CV__DNN_EXPERIMENTAL_NS_BEGIN
//Useful shortcut
inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
{
return s << "[" << r.start << ", " << r.end << ")";
}
//Slicing //Slicing
struct _Range : public cv::Range struct _Range : public cv::Range
......
...@@ -186,15 +186,20 @@ public: ...@@ -186,15 +186,20 @@ public:
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE std::vector<MatShape> &internals) const CV_OVERRIDE
{ {
outputs.clear(); if (inputs.size() == 1 || inputs.size() == requiredOutputs)
for (size_t i = 0; i < inputs.size(); i++)
{ {
outputs.push_back(MatShape()); outputs.clear();
computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back()); for (size_t i = 0; i < inputs.size(); i++)
{
outputs.push_back(MatShape());
computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back());
}
}
else
{
CV_Assert(inputs.size() == 2, total(inputs[0]) == total(inputs[1]));
outputs.assign(1, inputs[1]);
} }
internals = outputs;
return true; return true;
} }
...@@ -206,7 +211,7 @@ public: ...@@ -206,7 +211,7 @@ public:
inps.getUMatVector(inputs); inps.getUMatVector(inputs);
outs.getUMatVector(outputs); outs.getUMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < outputs.size(); i++)
{ {
UMat srcBlob = inputs[i]; UMat srcBlob = inputs[i];
void *src_handle = inputs[i].handle(ACCESS_READ); void *src_handle = inputs[i].handle(ACCESS_READ);
...@@ -240,7 +245,7 @@ public: ...@@ -240,7 +245,7 @@ public:
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str()); CV_TRACE_ARG_VALUE(name, "name", name.c_str());
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < outputs.size(); i++)
{ {
Mat srcBlob = *inputs[i]; Mat srcBlob = *inputs[i];
if (outputs[i].data != srcBlob.data) if (outputs[i].data != srcBlob.data)
...@@ -248,7 +253,7 @@ public: ...@@ -248,7 +253,7 @@ public:
} }
} }
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp; InferenceEngine::LayerParams lp;
...@@ -256,7 +261,15 @@ public: ...@@ -256,7 +261,15 @@ public:
lp.type = "Reshape"; lp.type = "Reshape";
lp.precision = InferenceEngine::Precision::FP32; lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ReshapeLayer> ieLayer(new InferenceEngine::ReshapeLayer(lp)); std::shared_ptr<InferenceEngine::ReshapeLayer> ieLayer(new InferenceEngine::ReshapeLayer(lp));
ieLayer->shape = newShapeDesc; if (!newShapeDesc.empty())
ieLayer->shape = newShapeDesc;
else
{
CV_Assert(inputs.size() == 2);
InferenceEngine::DataPtr shapeSrc = infEngineDataNode(inputs[1]);
// NOTE: shapeSrc->dims are reversed
ieLayer->shape = std::vector<int>(shapeSrc->dims.rbegin(), shapeSrc->dims.rend());
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
return Ptr<BackendNode>(); return Ptr<BackendNode>();
......
...@@ -524,8 +524,7 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) ...@@ -524,8 +524,7 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
{ {
// NOTE: Inference Engine sizes are reversed. // NOTE: Inference Engine sizes are reversed.
std::vector<size_t> dims = blob->dims(); std::vector<size_t> dims = blob->dims();
std::vector<int> size(dims.begin(), dims.end()); std::vector<int> size(dims.rbegin(), dims.rend());
std::reverse(size.begin(), size.end());
return Mat(size, CV_32F, (void*)blob->buffer()); return Mat(size, CV_32F, (void*)blob->buffer());
} }
...@@ -540,8 +539,7 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs, ...@@ -540,8 +539,7 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
std::vector<MatShape> &internals) const std::vector<MatShape> &internals) const
{ {
std::vector<size_t> dims = output->dims; std::vector<size_t> dims = output->dims;
std::vector<int> shape(dims.begin(), dims.end()); std::vector<int> shape(dims.rbegin(), dims.rend());
std::reverse(shape.begin(), shape.end());
outputs.assign(1, shape); outputs.assign(1, shape);
return false; return false;
} }
......
...@@ -615,6 +615,19 @@ public: ...@@ -615,6 +615,19 @@ public:
} }
}; };
class ReshapeAsShapeSubgraph : public Subgraph
{
public:
ReshapeAsShapeSubgraph()
{
int input = addNodeToMatch("");
int shapeSrc = addNodeToMatch("");
int shape = addNodeToMatch("Shape", shapeSrc);
addNodeToMatch("Reshape", input, shape);
setFusedNode("Reshape", input, shapeSrc);
}
};
void simplifySubgraphs(tensorflow::GraphDef& net) void simplifySubgraphs(tensorflow::GraphDef& net)
{ {
std::vector<Ptr<Subgraph> > subgraphs; std::vector<Ptr<Subgraph> > subgraphs;
...@@ -630,6 +643,7 @@ void simplifySubgraphs(tensorflow::GraphDef& net) ...@@ -630,6 +643,7 @@ void simplifySubgraphs(tensorflow::GraphDef& net)
subgraphs.push_back(Ptr<Subgraph>(new DeconvolutionSameKerasSubgraph())); subgraphs.push_back(Ptr<Subgraph>(new DeconvolutionSameKerasSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new ResizeBilinearSubgraph())); subgraphs.push_back(Ptr<Subgraph>(new ResizeBilinearSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new UpsamplingKerasSubgraph())); subgraphs.push_back(Ptr<Subgraph>(new UpsamplingKerasSubgraph()));
subgraphs.push_back(Ptr<Subgraph>(new ReshapeAsShapeSubgraph()));
int numNodes = net.node_size(); int numNodes = net.node_size();
std::vector<int> matchedNodesIds; std::vector<int> matchedNodesIds;
......
...@@ -1038,37 +1038,50 @@ void TFImporter::populateNet(Net dstNet) ...@@ -1038,37 +1038,50 @@ void TFImporter::populateNet(Net dstNet)
else if (type == "Reshape") else if (type == "Reshape")
{ {
Pin inpId = parsePin(layer.input(0)); Pin inpId = parsePin(layer.input(0));
Mat newShape = getTensorContent(getConstBlob(layer, value_id, 1));
int inpLayout = getDataLayout(layer.input(0), data_layouts); int inpLayout = getDataLayout(layer.input(0), data_layouts);
if (newShape.total() != 4 && inpLayout == DATA_LAYOUT_NHWC) // There are two possible implementations: reshape an input using
// predefined sizes or use a second input blob as a source of new shape.
if (value_id.find(layer.input(1)) != value_id.end())
{ {
LayerParams permLP; Mat newShape = getTensorContent(getConstBlob(layer, value_id, 1));
int order[] = {0, 2, 3, 1}; // From OpenCV's NCHW to NHWC.
permLP.set("order", DictValue::arrayInt<int*>(order, 4));
std::string permName = name + "/nchw"; if (newShape.total() != 4 && inpLayout == DATA_LAYOUT_NHWC)
CV_Assert(layer_id.find(permName) == layer_id.end()); {
int permId = dstNet.addLayer(permName, "Permute", permLP); LayerParams permLP;
layer_id[permName] = permId; int order[] = {0, 2, 3, 1}; // From OpenCV's NCHW to NHWC.
connect(layer_id, dstNet, inpId, permId, 0); permLP.set("order", DictValue::arrayInt<int*>(order, 4));
inpId = Pin(permName);
inpLayout = DATA_LAYOUT_NCHW; std::string permName = name + "/nchw";
CV_Assert(layer_id.find(permName) == layer_id.end());
int permId = dstNet.addLayer(permName, "Permute", permLP);
layer_id[permName] = permId;
connect(layer_id, dstNet, inpId, permId, 0);
inpId = Pin(permName);
inpLayout = DATA_LAYOUT_NCHW;
}
else if (newShape.total() == 4 && inpLayout == DATA_LAYOUT_NHWC)
{
// NHWC->NCHW
std::swap(*newShape.ptr<int32_t>(0, 2), *newShape.ptr<int32_t>(0, 3));
std::swap(*newShape.ptr<int32_t>(0, 1), *newShape.ptr<int32_t>(0, 2));
}
layerParams.set("dim", DictValue::arrayInt<int*>(newShape.ptr<int>(), newShape.total()));
int id = dstNet.addLayer(name, "Reshape", layerParams);
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, inpId, id, 0);
data_layouts[name] = newShape.total() == 2 ? DATA_LAYOUT_PLANAR : inpLayout;
} }
else if (newShape.total() == 4 && inpLayout == DATA_LAYOUT_NHWC) else
{ {
// NHWC->NCHW int id = dstNet.addLayer(name, "Reshape", layerParams);
std::swap(*newShape.ptr<int32_t>(0, 2), *newShape.ptr<int32_t>(0, 3)); layer_id[name] = id;
std::swap(*newShape.ptr<int32_t>(0, 1), *newShape.ptr<int32_t>(0, 2)); connect(layer_id, dstNet, inpId, id, 0);
connect(layer_id, dstNet, parsePin(layer.input(1)), id, 1);
data_layouts[name] = inpLayout;
} }
layerParams.set("dim", DictValue::arrayInt<int*>(newShape.ptr<int>(), newShape.total()));
int id = dstNet.addLayer(name, "Reshape", layerParams);
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, inpId, id, 0);
data_layouts[name] = newShape.total() == 2 ? DATA_LAYOUT_PLANAR : inpLayout;
} }
else if (type == "Flatten" || type == "Squeeze") else if (type == "Flatten" || type == "Squeeze")
{ {
......
...@@ -212,6 +212,7 @@ TEST_P(Test_TensorFlow_layers, reshape) ...@@ -212,6 +212,7 @@ TEST_P(Test_TensorFlow_layers, reshape)
runTensorFlowNet("shift_reshape_no_reorder"); runTensorFlowNet("shift_reshape_no_reorder");
runTensorFlowNet("reshape_no_reorder"); runTensorFlowNet("reshape_no_reorder");
runTensorFlowNet("reshape_reduce"); runTensorFlowNet("reshape_reduce");
runTensorFlowNet("reshape_as_shape");
} }
TEST_P(Test_TensorFlow_layers, flatten) TEST_P(Test_TensorFlow_layers, flatten)
......
...@@ -178,6 +178,8 @@ void cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) ...@@ -178,6 +178,8 @@ void cvtColor( InputArray _src, OutputArray _dst, int code, int dcn )
{ {
CV_INSTRUMENT_REGION() CV_INSTRUMENT_REGION()
CV_Assert(!_src.empty());
if(dcn <= 0) if(dcn <= 0)
dcn = dstChannels(code); dcn = dstChannels(code);
......
...@@ -247,10 +247,14 @@ struct CvtHelper ...@@ -247,10 +247,14 @@ struct CvtHelper
{ {
CvtHelper(InputArray _src, OutputArray _dst, int dcn) CvtHelper(InputArray _src, OutputArray _dst, int dcn)
{ {
CV_Assert(!_src.empty());
int stype = _src.type(); int stype = _src.type();
scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype); scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype);
CV_Assert( VScn::contains(scn) && VDcn::contains(dcn) && VDepth::contains(depth) ); CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image");
CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image");
CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image");
if (_src.getObj() == _dst.getObj()) // inplace processing (#6653) if (_src.getObj() == _dst.getObj()) // inplace processing (#6653)
_src.copyTo(src); _src.copyTo(src);
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include "precomp.hpp" #include "precomp.hpp"
#include "color.hpp" #include "color.hpp"
#define IPP_DISABLE_CVTCOLOR_GRAY2BGR_8UC3 1
namespace cv namespace cv
{ {
...@@ -1228,10 +1230,12 @@ static ippiGeneralFunc ippiRGB2GrayC4Tab[] = ...@@ -1228,10 +1230,12 @@ static ippiGeneralFunc ippiRGB2GrayC4Tab[] =
}; };
#if !IPP_DISABLE_CVTCOLOR_GRAY2BGR_8UC3
static IppStatus ippiGrayToRGB_C1C3R(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep, IppiSize roiSize) static IppStatus ippiGrayToRGB_C1C3R(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep, IppiSize roiSize)
{ {
return CV_INSTRUMENT_FUN_IPP(ippiGrayToRGB_8u_C1C3R, pSrc, srcStep, pDst, dstStep, roiSize); return CV_INSTRUMENT_FUN_IPP(ippiGrayToRGB_8u_C1C3R, pSrc, srcStep, pDst, dstStep, roiSize);
} }
#endif
static IppStatus ippiGrayToRGB_C1C3R(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep, IppiSize roiSize) static IppStatus ippiGrayToRGB_C1C3R(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep, IppiSize roiSize)
{ {
return CV_INSTRUMENT_FUN_IPP(ippiGrayToRGB_16u_C1C3R, pSrc, srcStep, pDst, dstStep, roiSize); return CV_INSTRUMENT_FUN_IPP(ippiGrayToRGB_16u_C1C3R, pSrc, srcStep, pDst, dstStep, roiSize);
...@@ -1516,7 +1520,11 @@ void cvtGraytoBGR(const uchar * src_data, size_t src_step, ...@@ -1516,7 +1520,11 @@ void cvtGraytoBGR(const uchar * src_data, size_t src_step,
if(dcn == 3) if(dcn == 3)
{ {
if( depth == CV_8U ) if( depth == CV_8U )
{
#if !IPP_DISABLE_CVTCOLOR_GRAY2BGR_8UC3
ippres = CvtColorIPPLoop(src_data, src_step, dst_data, dst_step, width, height, IPPGray2BGRFunctor<Ipp8u>()); ippres = CvtColorIPPLoop(src_data, src_step, dst_data, dst_step, width, height, IPPGray2BGRFunctor<Ipp8u>());
#endif
}
else if( depth == CV_16U ) else if( depth == CV_16U )
ippres = CvtColorIPPLoop(src_data, src_step, dst_data, dst_step, width, height, IPPGray2BGRFunctor<Ipp16u>()); ippres = CvtColorIPPLoop(src_data, src_step, dst_data, dst_step, width, height, IPPGray2BGRFunctor<Ipp16u>());
else else
......
...@@ -70,7 +70,7 @@ def affine_detect(detector, img, mask=None, pool=None): ...@@ -70,7 +70,7 @@ def affine_detect(detector, img, mask=None, pool=None):
''' '''
affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
Apply a set of affine transormations to the image, detect keypoints and Apply a set of affine transformations to the image, detect keypoints and
reproject them into initial image coordinates. reproject them into initial image coordinates.
See http://www.ipol.im/pub/algo/my_affine_sift/ for the details. See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment