Commit 2ad0487c authored by Alexander Alekhin's avatar Alexander Alekhin Committed by Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents 72ccb5fe 7c96857c
...@@ -123,6 +123,9 @@ if(CV_GCC OR CV_CLANG) ...@@ -123,6 +123,9 @@ if(CV_GCC OR CV_CLANG)
add_extra_compiler_option(-Wsign-promo) add_extra_compiler_option(-Wsign-promo)
add_extra_compiler_option(-Wuninitialized) add_extra_compiler_option(-Wuninitialized)
add_extra_compiler_option(-Winit-self) add_extra_compiler_option(-Winit-self)
if(CV_GCC AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 6.0) AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0))
add_extra_compiler_option(-Wno-psabi)
endif()
if(HAVE_CXX11) if(HAVE_CXX11)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT ENABLE_PRECOMPILED_HEADERS) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT ENABLE_PRECOMPILED_HEADERS)
add_extra_compiler_option(-Wsuggest-override) add_extra_compiler_option(-Wsuggest-override)
......
...@@ -845,36 +845,24 @@ inline v_uint64x2 v_popcount(const v_int64x2& a) ...@@ -845,36 +845,24 @@ inline v_uint64x2 v_popcount(const v_int64x2& a)
/** Mask **/ /** Mask **/
inline int v_signmask(const v_uint8x16& a) inline int v_signmask(const v_uint8x16& a)
{ {
vec_uchar16 sv = vec_sr(a.val, vec_uchar16_sp(7)); static const vec_uchar16 qperm = {120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0};
static const vec_uchar16 slm = {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7}; return vec_extract((vec_int4)vec_vbpermq(v_reinterpret_as_u8(a).val, qperm), 2);
sv = vec_sl(sv, slm);
vec_uint4 sv4 = vec_sum4s(sv, vec_uint4_z);
static const vec_uint4 slm4 = {0, 0, 8, 8};
sv4 = vec_sl(sv4, slm4);
return vec_extract(vec_sums((vec_int4) sv4, vec_int4_z), 3);
} }
inline int v_signmask(const v_int8x16& a) inline int v_signmask(const v_int8x16& a)
{ return v_signmask(v_reinterpret_as_u8(a)); } { return v_signmask(v_reinterpret_as_u8(a)); }
inline int v_signmask(const v_int16x8& a) inline int v_signmask(const v_int16x8& a)
{ {
static const vec_ushort8 slm = {0, 1, 2, 3, 4, 5, 6, 7}; static const vec_uchar16 qperm = {112, 96, 80, 64, 48, 32, 16, 0, 128, 128, 128, 128, 128, 128, 128, 128};
vec_short8 sv = vec_sr(a.val, vec_ushort8_sp(15)); return vec_extract((vec_int4)vec_vbpermq(v_reinterpret_as_u8(a).val, qperm), 2);
sv = vec_sl(sv, slm);
vec_int4 svi = vec_int4_z;
svi = vec_sums(vec_sum4s(sv, svi), svi);
return vec_extract(svi, 3);
} }
inline int v_signmask(const v_uint16x8& a) inline int v_signmask(const v_uint16x8& a)
{ return v_signmask(v_reinterpret_as_s16(a)); } { return v_signmask(v_reinterpret_as_s16(a)); }
inline int v_signmask(const v_int32x4& a) inline int v_signmask(const v_int32x4& a)
{ {
static const vec_uint4 slm = {0, 1, 2, 3}; static const vec_uchar16 qperm = {96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128};
vec_int4 sv = vec_sr(a.val, vec_uint4_sp(31)); return vec_extract((vec_int4)vec_vbpermq(v_reinterpret_as_u8(a).val, qperm), 2);
sv = vec_sl(sv, slm);
sv = vec_sums(sv, vec_int4_z);
return vec_extract(sv, 3);
} }
inline int v_signmask(const v_uint32x4& a) inline int v_signmask(const v_uint32x4& a)
{ return v_signmask(v_reinterpret_as_s32(a)); } { return v_signmask(v_reinterpret_as_s32(a)); }
......
...@@ -554,7 +554,9 @@ struct HWFeatures ...@@ -554,7 +554,9 @@ struct HWFeatures
have[CV_CPU_FP16] = true; have[CV_CPU_FP16] = true;
#endif #endif
#endif #endif
#if defined _ARM_ && (defined(_WIN32_WCE) && _WIN32_WCE >= 0x800)
have[CV_CPU_NEON] = true;
#endif
// there's no need to check VSX availability in runtime since it's always available on ppc64le CPUs // there's no need to check VSX availability in runtime since it's always available on ppc64le CPUs
have[CV_CPU_VSX] = (CV_VSX); have[CV_CPU_VSX] = (CV_VSX);
// TODO: Check VSX3 availability in runtime for other platforms // TODO: Check VSX3 availability in runtime for other platforms
......
...@@ -160,14 +160,7 @@ TEST(Core_Ptr, assignment) ...@@ -160,14 +160,7 @@ TEST(Core_Ptr, assignment)
{ {
Ptr<Reporter> p1(new Reporter(&deleted1)); Ptr<Reporter> p1(new Reporter(&deleted1));
#if defined(__clang__) && (__clang_major__ >= 9) && !defined(__APPLE__) p1 = *&p1;
CV_DO_PRAGMA(GCC diagnostic push)
CV_DO_PRAGMA(GCC diagnostic ignored "-Wself-assign-overloaded")
#endif
p1 = p1;
#if defined(__clang__) && (__clang_major__ >= 9) && !defined(__APPLE__)
CV_DO_PRAGMA(GCC diagnostic pop)
#endif
EXPECT_FALSE(deleted1); EXPECT_FALSE(deleted1);
} }
......
...@@ -37,7 +37,9 @@ else() ...@@ -37,7 +37,9 @@ else()
-Wunused-parameter -Wsign-compare -Wunused-parameter -Wsign-compare
) )
endif() endif()
if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
endif()
if(NOT HAVE_CXX11) if(NOT HAVE_CXX11)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-undef) # LANG_CXX11 from protobuf files ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-undef) # LANG_CXX11 from protobuf files
endif() endif()
......
...@@ -123,9 +123,12 @@ PERF_TEST_P_(DNNTestNetwork, SSD) ...@@ -123,9 +123,12 @@ PERF_TEST_P_(DNNTestNetwork, SSD)
PERF_TEST_P_(DNNTestNetwork, OpenFace) PERF_TEST_P_(DNNTestNetwork, OpenFace)
{ {
if (backend == DNN_BACKEND_HALIDE || if (backend == DNN_BACKEND_HALIDE)
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
processNet("dnn/openface_nn4.small2.v1.t7", "", "", processNet("dnn/openface_nn4.small2.v1.t7", "", "",
Mat(cv::Size(96, 96), CV_32FC3)); Mat(cv::Size(96, 96), CV_32FC3));
} }
...@@ -185,16 +188,6 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow) ...@@ -185,16 +188,6 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad in OpenVINO 2019R2");
#endif
processNet("dnn/ssd_inception_v2_coco_2017_11_17.pb", "ssd_inception_v2_coco_2017_11_17.pbtxt", "", processNet("dnn/ssd_inception_v2_coco_2017_11_17.pb", "ssd_inception_v2_coco_2017_11_17.pbtxt", "",
Mat(cv::Size(300, 300), CV_32FC3)); Mat(cv::Size(300, 300), CV_32FC3));
} }
......
...@@ -719,21 +719,23 @@ struct DataLayer : public Layer ...@@ -719,21 +719,23 @@ struct DataLayer : public Layer
CV_Assert(numChannels <= 4); CV_Assert(numChannels <= 4);
// Scale // Scale
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
{numChannels}); InferenceEngine::Layout::C);
auto weights = InferenceEngine::make_shared_blob<float>(td);
weights->allocate(); weights->allocate();
weights->set(std::vector<float>(numChannels, scaleFactors[0]));
float* weight_buf = weights->buffer().as<float*>();
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
// Mean subtraction // Mean subtraction
auto biases = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, auto biases = InferenceEngine::make_shared_blob<float>(td);
{numChannels});
biases->allocate(); biases->allocate();
std::vector<float> biasesVec(numChannels); float* bias_buf = biases->buffer().as<float*>();
for (int i = 0; i < numChannels; ++i) for (int i = 0; i < numChannels; ++i)
{ {
biasesVec[i] = -means[0][i] * scaleFactors[0]; bias_buf[i] = -means[0][i] * scaleFactors[0];
} }
biases->set(biasesVec);
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer); addConstantData("weights", weights, ieLayer);
...@@ -1536,7 +1538,11 @@ struct Net::Impl ...@@ -1536,7 +1538,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]; dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
#endif
} }
} }
else else
...@@ -1544,7 +1550,11 @@ struct Net::Impl ...@@ -1544,7 +1550,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name; dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
} }
} }
} }
...@@ -1565,7 +1575,11 @@ struct Net::Impl ...@@ -1565,7 +1575,11 @@ struct Net::Impl
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = netInputLayer->outNames[i]; dataPtr->name = netInputLayer->outNames[i];
#else
dataPtr->setName(netInputLayer->outNames[i]);
#endif
} }
} }
else else
...@@ -1573,7 +1587,11 @@ struct Net::Impl ...@@ -1573,7 +1587,11 @@ struct Net::Impl
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{ {
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]); InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
dataPtr->name = ld.name; dataPtr->name = ld.name;
#else
dataPtr->setName(ld.name);
#endif
} }
} }
ieNode->net->addBlobs(ld.inputBlobsWrappers); ieNode->net->addBlobs(ld.inputBlobsWrappers);
......
...@@ -111,7 +111,8 @@ public: ...@@ -111,7 +111,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(!input->dims.empty()); std::vector<size_t> dims = input->getDims();
CV_Assert(!dims.empty());
InferenceEngine::Builder::Layer ieLayer(name); InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name); ieLayer.setName(name);
...@@ -122,12 +123,10 @@ public: ...@@ -122,12 +123,10 @@ public:
else else
{ {
ieLayer.setType("Split"); ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = input->dims.size() - 1; ieLayer.getParameters()["axis"] = dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = input->dims[0]; ieLayer.getParameters()["out_sizes"] = dims[0];
} }
std::vector<size_t> shape(input->dims); ieLayer.setInputPorts({InferenceEngine::Port(dims)});
std::reverse(shape.begin(), shape.end());
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1)); ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
......
...@@ -316,7 +316,7 @@ public: ...@@ -316,7 +316,7 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name); InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->dims.size())); ieLayer.setAxis(clamp(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size())); ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
......
...@@ -541,15 +541,14 @@ public: ...@@ -541,15 +541,14 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(input->dims.size() == 4 || input->dims.size() == 5); std::vector<size_t> dims = input->getDims();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = input->dims[input->dims.size() - 2]; // NOTE: input->dims are reversed (WHIO or WHDIO) const int inpCn = dims[1];
const int outCn = blobs[0].size[0]; const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1]; const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn; const int group = inpCn / inpGroupCn;
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
InferenceEngine::Layout layout = (input->dims.size() == 4) ? InferenceEngine::Layout::OIHW : InferenceEngine::Layout::NCDHW;
InferenceEngine::Layout::NCDHW;
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights) if (fusedWeights)
...@@ -561,9 +560,10 @@ public: ...@@ -561,9 +560,10 @@ public:
} }
else else
{ {
ieWeights = InferenceEngine::make_shared_blob<float>( ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, layout, InferenceEngine::Precision::FP32,
ieWeights->dims()); ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate(); ieWeights->allocate();
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn); Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
...@@ -1953,9 +1953,10 @@ public: ...@@ -1953,9 +1953,10 @@ public:
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout); auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
if (fusedWeights) if (fusedWeights)
{ {
ieWeights = InferenceEngine::make_shared_blob<float>( ieWeights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Precision::FP32, layout, InferenceEngine::Precision::FP32,
ieWeights->dims()); ieWeights->getTensorDesc().getDims(), layout
});
ieWeights->allocate(); ieWeights->allocate();
int inpCn = blobs[0].size[0]; int inpCn = blobs[0].size[0];
......
...@@ -261,7 +261,8 @@ public: ...@@ -261,7 +261,8 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
if (input->dims.size() == 4) std::vector<size_t> dims = input->getDims();
if (dims.size() == 4)
{ {
InferenceEngine::Builder::NormalizeLayer ieLayer(name); InferenceEngine::Builder::NormalizeLayer ieLayer(name);
...@@ -270,13 +271,14 @@ public: ...@@ -270,13 +271,14 @@ public:
ieLayer.setEpsilon(epsilon); ieLayer.setEpsilon(epsilon);
InferenceEngine::Builder::Layer l = ieLayer; InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn) const int numChannels = dims[1];
InferenceEngine::Blob::Ptr weights; InferenceEngine::Blob::Ptr weights;
if (blobs.empty()) if (blobs.empty())
{ {
weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, weights = InferenceEngine::make_shared_blob<float>({
InferenceEngine::Layout::C, InferenceEngine::Precision::FP32,
{(size_t)numChannels}); {(size_t)numChannels}, InferenceEngine::Layout::C
});
weights->allocate(); weights->allocate();
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels); Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
......
...@@ -167,9 +167,11 @@ public: ...@@ -167,9 +167,11 @@ public:
if (kernel_size.size() == 3) if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU; return preferableTarget == DNN_TARGET_CPU;
if (preferableTarget == DNN_TARGET_MYRIAD) { if (preferableTarget == DNN_TARGET_MYRIAD) {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) { if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) {
return !isMyriadX(); return !isMyriadX();
} }
#endif
return type == MAX || type == AVE; return type == MAX || type == AVE;
} }
else else
......
...@@ -207,12 +207,13 @@ public: ...@@ -207,12 +207,13 @@ public:
} }
else else
{ {
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, auto weights = InferenceEngine::make_shared_blob<float>({
{numChannels}); InferenceEngine::Precision::FP32, {(size_t)numChannels},
InferenceEngine::Layout::C
});
weights->allocate(); weights->allocate();
float* buf = weights->buffer().as<float*>();
std::vector<float> ones(numChannels, 1); std::fill(buf, buf + numChannels, 1);
weights->set(ones);
addConstantData("weights", weights, l); addConstantData("weights", weights, l);
} }
if (hasBias) if (hasBias)
......
...@@ -301,14 +301,14 @@ public: ...@@ -301,14 +301,14 @@ public:
{ {
std::vector<size_t> outShape(numDims); std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i) for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size(); outShape[i] = sliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights"); ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights). auto shapeSource = InferenceEngine::make_shared_blob<float>({
auto shapeSource = InferenceEngine::make_shared_blob<float>( InferenceEngine::Precision::FP32, outShape,
InferenceEngine::Precision::FP32, InferenceEngine::Layout::ANY
InferenceEngine::Layout::ANY, outShape); });
shapeSource->allocate(); shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer); addConstantData("weights", shapeSource, ieLayer);
} }
......
...@@ -329,7 +329,8 @@ public: ...@@ -329,7 +329,8 @@ public:
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name); InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->dims.size())); ieLayer.setAxis(clamp(axisRaw, input->getDims().size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
......
This diff is collapsed.
...@@ -92,18 +92,22 @@ public: ...@@ -92,18 +92,22 @@ public:
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync); bool isAsync);
void initPlugin(InferenceEngine::ICNNNetwork& net); void initPlugin(InferenceEngine::CNNNetwork& net);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs); void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
private: private:
InferenceEngine::Builder::Network netBuilder; InferenceEngine::Builder::Network netBuilder;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec; InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::BlobMap allBlobs; InferenceEngine::BlobMap allBlobs;
InferenceEngine::TargetDevice targetDevice; std::string device_name;
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
#else
bool isInit = false;
#endif
struct InfEngineReqWrapper struct InfEngineReqWrapper
{ {
......
...@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList() ...@@ -136,13 +136,10 @@ static const std::vector<std::string> getOpenVINOTestModelsList()
static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr) static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr)
{ {
std::vector<int> reversedDims(dims.begin(), dims.end()); m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
std::reverse(reversedDims.begin(), reversedDims.end());
m.create(reversedDims, CV_32F);
randu(m, -1, 1); randu(m, -1, 1);
dataPtr = make_shared_blob<float>(Precision::FP32, dims, (float*)m.data); dataPtr = make_shared_blob<float>({Precision::FP32, dims, Layout::ANY}, (float*)m.data);
} }
void runIE(Target target, const std::string& xmlPath, const std::string& binPath, void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
...@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -154,32 +151,42 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
CNNNetwork net = reader.getNetwork(); CNNNetwork net = reader.getNetwork();
std::string device_name;
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
Core ie;
#else
InferenceEnginePluginPtr enginePtr; InferenceEnginePluginPtr enginePtr;
InferencePlugin plugin; InferencePlugin plugin;
#endif
ExecutableNetwork netExec; ExecutableNetwork netExec;
InferRequest infRequest; InferRequest infRequest;
try try
{ {
auto dispatcher = InferenceEngine::PluginDispatcher({""});
switch (target) switch (target)
{ {
case DNN_TARGET_CPU: case DNN_TARGET_CPU:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eCPU); device_name = "CPU";
break; break;
case DNN_TARGET_OPENCL: case DNN_TARGET_OPENCL:
case DNN_TARGET_OPENCL_FP16: case DNN_TARGET_OPENCL_FP16:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eGPU); device_name = "GPU";
break; break;
case DNN_TARGET_MYRIAD: case DNN_TARGET_MYRIAD:
enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eMYRIAD); device_name = "MYRIAD";
break; break;
case DNN_TARGET_FPGA: case DNN_TARGET_FPGA:
enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU"); device_name = "FPGA";
break; break;
default: default:
CV_Error(Error::StsNotImplemented, "Unknown target"); CV_Error(Error::StsNotImplemented, "Unknown target");
}; };
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
auto dispatcher = InferenceEngine::PluginDispatcher({""});
enginePtr = dispatcher.getPluginByDevice(device_name);
#endif
if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA) if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA)
{ {
std::string suffixes[] = {"_avx2", "_sse4", ""}; std::string suffixes[] = {"_avx2", "_sse4", ""};
...@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -202,16 +209,23 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
try try
{ {
IExtensionPtr extension = make_so_pointer<IExtension>(libName); IExtensionPtr extension = make_so_pointer<IExtension>(libName);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
ie.AddExtension(extension, device_name);
#else
enginePtr->AddExtension(extension, 0); enginePtr->AddExtension(extension, 0);
#endif
break; break;
} }
catch(...) {} catch(...) {}
} }
// Some of networks can work without a library of extra layers. // Some of networks can work without a library of extra layers.
} }
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
netExec = ie.LoadNetwork(net, device_name);
#else
plugin = InferencePlugin(enginePtr); plugin = InferencePlugin(enginePtr);
netExec = plugin.LoadNetwork(net, {}); netExec = plugin.LoadNetwork(net, {});
#endif
infRequest = netExec.CreateInferRequest(); infRequest = netExec.CreateInferRequest();
} }
catch (const std::exception& ex) catch (const std::exception& ex)
...@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -224,7 +238,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap inputBlobs; BlobMap inputBlobs;
for (auto& it : net.getInputsInfo()) for (auto& it : net.getInputsInfo())
{ {
genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]); genData(it.second->getTensorDesc().getDims(), inputsMap[it.first], inputBlobs[it.first]);
} }
infRequest.SetInput(inputBlobs); infRequest.SetInput(inputBlobs);
...@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath ...@@ -233,7 +247,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap outputBlobs; BlobMap outputBlobs;
for (auto& it : net.getOutputsInfo()) for (auto& it : net.getOutputsInfo())
{ {
genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]); genData(it.second->getTensorDesc().getDims(), outputsMap[it.first], outputBlobs[it.first]);
} }
infRequest.SetOutput(outputBlobs); infRequest.SetOutput(outputBlobs);
......
...@@ -469,6 +469,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine( ...@@ -469,6 +469,42 @@ INSTANTIATE_TEST_CASE_P(/**/, Async, Combine(
Values(CV_32F, CV_8U), Values(CV_32F, CV_8U),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
)); ));
typedef testing::TestWithParam<Target> Test_Model_Optimizer;
TEST_P(Test_Model_Optimizer, forward_two_nets)
{
const int target = GetParam();
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
Net net0 = readNet(model, proto);
net0.setPreferableTarget(target);
Net net1 = readNet(model, proto);
net1.setPreferableTarget(target);
// Generate inputs.
int blobSize[] = {2, 6, 75, 113};
Mat input(4, &blobSize[0], CV_32F);
randu(input, 0, 255);
net0.setInput(input);
Mat ref0 = net0.forward().clone();
net1.setInput(input);
Mat ref1 = net1.forward();
net0.setInput(input);
Mat ref2 = net0.forward();
normAssert(ref0, ref2, 0, 0);
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
);
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
}} // namespace }} // namespace
...@@ -357,11 +357,9 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD) ...@@ -357,11 +357,9 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{ {
#if INF_ENGINE_VER_MAJOR_EQ(2019010000) #if INF_ENGINE_VER_MAJOR_GE(2019020000)
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#else
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif #endif
} }
#endif #endif
...@@ -395,16 +393,10 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD) ...@@ -395,16 +393,10 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD) TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
{ {
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB); applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
{ getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
#if INF_ENGINE_VER_MAJOR_LE(2019010000) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#else
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
}
#endif #endif
checkBackend(); checkBackend();
...@@ -456,12 +448,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD) ...@@ -456,12 +448,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3; float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3;
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
) {
scoreDiff = 0.061; scoreDiff = 0.061;
iouDiff = 0.12; iouDiff = 0.12;
detectionConfThresh = 0.36; detectionConfThresh = 0.36;
}
#endif #endif
normAssertDetections(ref, out, "", detectionConfThresh, scoreDiff, iouDiff); normAssertDetections(ref, out, "", detectionConfThresh, scoreDiff, iouDiff);
expectNoFallbacksFromIE(net); expectNoFallbacksFromIE(net);
......
...@@ -262,7 +262,7 @@ class Test_Torch_nets : public DNNTestLayer {}; ...@@ -262,7 +262,7 @@ class Test_Torch_nets : public DNNTestLayer {};
TEST_P(Test_Torch_nets, OpenFace_accuracy) TEST_P(Test_Torch_nets, OpenFace_accuracy)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif #endif
...@@ -287,8 +287,8 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy) ...@@ -287,8 +287,8 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy)
// Reference output values are in range [-0.17212, 0.263492] // Reference output values are in range [-0.17212, 0.263492]
// on Myriad problem layer: l4_Pooling - does not use pads_begin // on Myriad problem layer: l4_Pooling - does not use pads_begin
float l1 = (target == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-5; float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2e-3 : 1e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16) ? 1.5e-3 : 1e-3; float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5e-3 : 1e-3;
Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true); Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true);
normAssert(out, outRef, "", l1, lInf); normAssert(out, outRef, "", l1, lInf);
} }
......
...@@ -98,7 +98,7 @@ core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bit ...@@ -98,7 +98,7 @@ core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bit
'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \ 'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \
'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \ 'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \
'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \ 'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \
'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'setIdentity', 'setRNGSeed', \ 'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed', \
'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'], 'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'],
'Algorithm': []} 'Algorithm': []}
......
...@@ -941,4 +941,22 @@ QUnit.test('test_filter', function(assert) { ...@@ -941,4 +941,22 @@ QUnit.test('test_filter', function(assert) {
inv3.delete(); inv3.delete();
inv4.delete(); inv4.delete();
} }
//Rotate
{
let dst = new cv.Mat();
let src = cv.matFromArray(3, 2, cv.CV_8U, [1,2,3,4,5,6]);
cv.rotate(src, dst, cv.ROTATE_90_CLOCKWISE);
size = dst.size();
assert.equal(size.height, 2, "ROTATE_HEIGHT");
assert.equal(size.width, 3, "ROTATE_WIGTH");
let expected = new Uint8Array([5,3,1,6,4,2]);
assert.deepEqual(dst.data, expected);
dst.delete();
src.delete();
}
}); });
This diff is collapsed.
set(the_description "Images stitching") set(the_description "Images stitching")
if(HAVE_CUDA) if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wstrict-aliasing)
endif() endif()
set(STITCHING_CONTRIB_DEPS "opencv_xfeatures2d") set(STITCHING_CONTRIB_DEPS "opencv_xfeatures2d")
......
...@@ -499,7 +499,7 @@ struct CvCapture_FFMPEG ...@@ -499,7 +499,7 @@ struct CvCapture_FFMPEG
double r2d(AVRational r) const; double r2d(AVRational r) const;
int64_t dts_to_frame_number(int64_t dts); int64_t dts_to_frame_number(int64_t dts);
double dts_to_sec(int64_t dts); double dts_to_sec(int64_t dts) const;
AVFormatContext * ic; AVFormatContext * ic;
AVCodec * avcodec; AVCodec * avcodec;
...@@ -892,7 +892,14 @@ bool CvCapture_FFMPEG::open( const char* _filename ) ...@@ -892,7 +892,14 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#else #else
av_dict_set(&dict, "rtsp_transport", "tcp", 0); av_dict_set(&dict, "rtsp_transport", "tcp", 0);
#endif #endif
int err = avformat_open_input(&ic, _filename, NULL, &dict); AVInputFormat* input_format = NULL;
AVDictionaryEntry* entry = av_dict_get(dict, "input_format", NULL, 0);
if (entry != 0)
{
input_format = av_find_input_format(entry->value);
}
int err = avformat_open_input(&ic, _filename, input_format, &dict);
#else #else
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL); int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
#endif #endif
...@@ -1168,7 +1175,11 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const ...@@ -1168,7 +1175,11 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const
switch( property_id ) switch( property_id )
{ {
case CAP_PROP_POS_MSEC: case CAP_PROP_POS_MSEC:
return 1000.0*(double)frame_number/get_fps(); if (picture_pts == AV_NOPTS_VALUE_)
{
return 0;
}
return (dts_to_sec(picture_pts) * 1000);
case CAP_PROP_POS_FRAMES: case CAP_PROP_POS_FRAMES:
return (double)frame_number; return (double)frame_number;
case CAP_PROP_POS_AVI_RATIO: case CAP_PROP_POS_AVI_RATIO:
...@@ -1278,7 +1289,7 @@ int64_t CvCapture_FFMPEG::dts_to_frame_number(int64_t dts) ...@@ -1278,7 +1289,7 @@ int64_t CvCapture_FFMPEG::dts_to_frame_number(int64_t dts)
return (int64_t)(get_fps() * sec + 0.5); return (int64_t)(get_fps() * sec + 0.5);
} }
double CvCapture_FFMPEG::dts_to_sec(int64_t dts) double CvCapture_FFMPEG::dts_to_sec(int64_t dts) const
{ {
return (double)(dts - ic->streams[video_stream]->start_time) * return (double)(dts - ic->streams[video_stream]->start_time) *
r2d(ic->streams[video_stream]->time_base); r2d(ic->streams[video_stream]->time_base);
......
...@@ -796,11 +796,10 @@ bool CvCaptureCAM_V4L::open(int _index) ...@@ -796,11 +796,10 @@ bool CvCaptureCAM_V4L::open(int _index)
name = cv::format("/dev/video%d", _index); name = cv::format("/dev/video%d", _index);
} }
/* Print the CameraNumber at the end of the string with a width of one character */
bool res = open(name.c_str()); bool res = open(name.c_str());
if (!res) if (!res)
{ {
fprintf(stderr, "VIDEOIO ERROR: V4L: can't open camera by index %d\n", _index); CV_LOG_WARNING(NULL, cv::format("VIDEOIO ERROR: V4L: can't open camera by index %d", _index));
} }
return res; return res;
} }
......
...@@ -84,7 +84,7 @@ public: ...@@ -84,7 +84,7 @@ public:
{ {
if (!videoio_registry::hasBackend(apiPref)) if (!videoio_registry::hasBackend(apiPref))
throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref)); throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref));
if (cvtest::skipUnstableTests && apiPref == CAP_MSMF && (ext == "h264" || ext == "h265")) if (cvtest::skipUnstableTests && apiPref == CAP_MSMF && (ext == "h264" || ext == "h265" || ext == "mpg"))
throw SkipTestException("Unstable MSMF test"); throw SkipTestException("Unstable MSMF test");
writeVideo(); writeVideo();
VideoCapture cap; VideoCapture cap;
...@@ -172,7 +172,7 @@ public: ...@@ -172,7 +172,7 @@ public:
{ {
if (!videoio_registry::hasBackend(apiPref)) if (!videoio_registry::hasBackend(apiPref))
throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref)); throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref));
if (cvtest::skipUnstableTests && apiPref == CAP_MSMF && (ext == "h264" || ext == "h265")) if (cvtest::skipUnstableTests && apiPref == CAP_MSMF && (ext == "h264" || ext == "h265" || ext == "mpg"))
throw SkipTestException("Unstable MSMF test"); throw SkipTestException("Unstable MSMF test");
VideoCapture cap; VideoCapture cap;
EXPECT_NO_THROW(cap.open(video_file, apiPref)); EXPECT_NO_THROW(cap.open(video_file, apiPref));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment