Commit 019c2f21 authored by Dmitry Kurtaev's avatar Dmitry Kurtaev

Enable more deep learning tests

parent a0baae8a
......@@ -2729,9 +2729,9 @@ void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*>
}
else if (targetId == DNN_TARGET_OPENCL)
{
int c_split = outC > 8 ? (outC > 16 ? 8 : 4) : outC;
if (outW == 1 && outH == 1)
{
int c_split = outC > 8 ? (outC > 16 ? 8 : 4) : outC;
top.split(c, co, ci, c_split)
.fuse(x, y, tile).fuse(co, tile, tile).fuse(n, tile, tile)
.gpu_blocks(tile)
......@@ -2741,6 +2741,8 @@ void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*>
{
int x_split = outW > 8 ? (outW >= 32 ? 16 : 8) : outW;
int y_split = outH > 8 ? (outH >= 32 ? 16 : 8) : outH;
// Supported vectorization widths: 2, 3, 4, 8, 16
int c_split = outC > 8 ? (outC > 16 ? 8 : 4) : std::min(4, outC);
top.split(x, xo, xi, x_split).split(y, yo, yi, y_split)
.split(c, co, ci, c_split)
.gpu_blocks(xo, yo, co)
......
......@@ -82,7 +82,21 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return preferableTarget != DNN_TARGET_MYRIAD || type != "Deconvolution" || adjustPad == Size();
{
if (type == "Convolution")
return preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height;
else
{
CV_Assert(type == "Deconvolution");
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
if (group != 1)
return false;
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return dilation.width == 1 && dilation.height == 1;
return true;
}
}
else
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
......
......@@ -97,8 +97,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
......
......@@ -41,9 +41,9 @@
//M*/
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/all_layers.hpp>
#include <iostream>
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
......@@ -85,6 +85,11 @@ public:
return false;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE;
}
#ifdef HAVE_OPENCL
bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
{
......@@ -169,6 +174,20 @@ public:
}
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ReorgYolo";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["stride"] = format("%d", reorgStride);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
......
......@@ -192,6 +192,11 @@ public:
return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
if (!outWidth && !outHeight)
......@@ -204,6 +209,22 @@ public:
scaleHeight = (outHeight > 1) ? (static_cast<float>(inpHeight - 1) / (outHeight - 1)) : 0.f;
scaleWidth = (outWidth > 1) ? (static_cast<float>(inpWidth - 1) / (outWidth - 1)) : 0.f;
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Interp";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["pad_beg"] = "0";
ieLayer->params["pad_end"] = "0";
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
};
Ptr<Layer> InterpLayer::create(const LayerParams& params)
......
......@@ -266,7 +266,21 @@ public:
std::shared_ptr<InferenceEngine::CropLayer> ieLayer(new InferenceEngine::CropLayer(lp));
CV_Assert(sliceRanges.size() == 1);
for (int i = sliceRanges[0].size() - 1; i >= 0; --i)
int from, to, step;
if (preferableTarget == DNN_TARGET_MYRIAD)
{
from = 1;
to = sliceRanges[0].size() + 1;
step = 1;
}
else
{
from = sliceRanges[0].size() - 1;
to = -1;
step = -1;
}
for (int i = from; i != to; i += step)
{
ieLayer->axis.push_back(i);
ieLayer->offset.push_back(sliceRanges[0][i].start);
......
......@@ -10,18 +10,9 @@
namespace opencv_test { namespace {
class DNNTestNetwork : public TestWithParam <tuple<DNNBackend, DNNTarget> >
class DNNTestNetwork : public DNNTestLayer
{
public:
dnn::Backend backend;
dnn::Target target;
DNNTestNetwork()
{
backend = (dnn::Backend)(int)get<0>(GetParam());
target = (dnn::Target)(int)get<1>(GetParam());
}
void processNet(const std::string& weights, const std::string& proto,
Size inpSize, const std::string& outputLayer = "",
const std::string& halideScheduler = "",
......@@ -40,32 +31,10 @@ public:
std::string halideScheduler = "",
double l1 = 0.0, double lInf = 0.0, double detectionConfThresh = 0.2)
{
if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
{
#ifdef HAVE_OPENCL
if (!cv::ocl::useOpenCL())
#endif
{
throw SkipTestException("OpenCL is not available/disabled in OpenCV");
}
}
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{
if (!checkMyriadTarget())
{
throw SkipTestException("Myriad is not available/disabled in OpenCV");
}
}
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
l1 = l1 == 0.0 ? 4e-3 : l1;
lInf = lInf == 0.0 ? 2e-2 : lInf;
}
else
{
l1 = l1 == 0.0 ? 1e-5 : l1;
lInf = lInf == 0.0 ? 1e-4 : lInf;
}
checkBackend();
l1 = l1 ? l1 : default_l1;
lInf = lInf ? lInf : default_lInf;
weights = findDataFile(weights, false);
if (!proto.empty())
proto = findDataFile(proto, false);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -69,6 +69,93 @@ static testing::internal::ParamGenerator<DNNTarget> availableDnnTargets()
return testing::ValuesIn(targets);
}
static testing::internal::ParamGenerator<tuple<DNNBackend, DNNTarget> > dnnBackendsAndTargets()
{
static const tuple<DNNBackend, DNNTarget> testCases[] = {
#ifdef HAVE_INF_ENGINE
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
#endif
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_CPU),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
};
return testing::ValuesIn(testCases);
}
class DNNTestLayer : public TestWithParam <tuple<DNNBackend, DNNTarget> >
{
public:
dnn::Backend backend;
dnn::Target target;
double default_l1, default_lInf;
DNNTestLayer()
{
backend = (dnn::Backend)(int)get<0>(GetParam());
target = (dnn::Target)(int)get<1>(GetParam());
getDefaultThresholds(backend, target, &default_l1, &default_lInf);
}
static void getDefaultThresholds(int backend, int target, double* l1, double* lInf)
{
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
*l1 = 4e-3;
*lInf = 2e-2;
}
else
{
*l1 = 1e-5;
*lInf = 1e-4;
}
}
static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
{
if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
{
#ifdef HAVE_OPENCL
if (!cv::ocl::useOpenCL())
#endif
{
throw SkipTestException("OpenCL is not available/disabled in OpenCV");
}
}
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{
if (!checkMyriadTarget())
{
throw SkipTestException("Myriad is not available/disabled in OpenCV");
}
if (inp && ref && inp->size[0] != 1)
{
// Myriad plugin supports only batch size 1. Slice a single sample.
if (inp->size[0] == ref->size[0])
{
std::vector<cv::Range> range(inp->dims, Range::all());
range[0] = Range(0, 1);
*inp = inp->operator()(range);
range = std::vector<cv::Range>(ref->dims, Range::all());
range[0] = Range(0, 1);
*ref = ref->operator()(range);
}
else
throw SkipTestException("Myriad plugin supports only batch size 1");
}
}
}
protected:
void checkBackend(Mat* inp = 0, Mat* ref = 0)
{
checkBackend(backend, target, inp, ref);
}
};
}}
#endif
This diff is collapsed.
......@@ -296,7 +296,6 @@ TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
Mat inputBlob = blobFromImage(img, 1.0, Size(), Scalar(103.939, 116.779, 123.68), false);
net.setInput(inputBlob);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward();
// Deprocessing.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment