Commit 93c3f20d authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #9569 from dkurt:test_dnn_ssd_halide

parents 258b13f2 cad7c4d5
......@@ -146,6 +146,11 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/
virtual void copyToHost() = 0;
/**
* @brief Indicate that an actual data is on CPU.
*/
virtual void setHostDirty() = 0;
int backendId; //!< Backend identifier.
int targetId; //!< Target identifier.
};
......
This diff is collapsed.
......@@ -18,11 +18,30 @@ namespace dnn
{
#ifdef HAVE_HALIDE
static MatShape getBufferShape(const MatShape& shape)
{
if (shape.size() == 2 || shape.size() == 4)
{
int w, h, c, n;
getCanonicalSize(shape, &w, &h, &c, &n);
return {w, h, c, n};
}
else
{
MatShape bufferShape(shape);
std::reverse(bufferShape.begin(), bufferShape.end());
return bufferShape;
}
}
static MatShape getBufferShape(const MatSize& size)
{
return getBufferShape(MatShape(size.p, size.p + size[-1]));
}
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat)
{
int n, c, w, h;
getCanonicalSize(mat.size, &w, &h, &c, &n);
return wrapToHalideBuffer(mat, {w, h, c, n});
return wrapToHalideBuffer(mat, getBufferShape(mat.size));
}
Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat,
......@@ -97,11 +116,9 @@ HalideBackendWrapper::HalideBackendWrapper(const Ptr<BackendWrapper>& base,
: BackendWrapper(DNN_BACKEND_HALIDE, base->targetId)
{
managesDevMemory = false;
int w, h, c, n;
getCanonicalSize(shape, &w, &h, &c, &n);
Halide::Buffer<float> baseBuffer = halideBuffer(base);
buffer = Halide::Buffer<float>((float*)baseBuffer.raw_buffer()->host,
{w, h, c, n});
getBufferShape(shape));
if (baseBuffer.has_device_allocation())
{
buffer.raw_buffer()->device = baseBuffer.raw_buffer()->device;
......@@ -127,32 +144,23 @@ HalideBackendWrapper::~HalideBackendWrapper()
void HalideBackendWrapper::copyToHost()
{
CV_Assert(targetId == DNN_TARGET_CPU || buffer.device_dirty());
if (buffer.device_dirty())
{
buffer.device_sync();
buffer.copy_to_host();
}
}
void HalideBackendWrapper::setHostDirty()
{
buffer.set_device_dirty(false);
buffer.set_host_dirty();
}
#endif // HAVE_HALIDE
void getCanonicalSize(const MatSize& size, int* width, int* height,
int* channels, int* batch)
void getCanonicalSize(const MatSize& size, int* w, int* h, int* c, int* n)
{
const int dims = size.p[-1];
CV_Assert(dims == 2 || dims == 4);
*batch = size[0];
*channels = size[1];
if (dims == 4)
{
*width = size[3];
*height = size[2];
}
else
{
*width = 1;
*height = 1;
}
getCanonicalSize(MatShape(size.p, size.p + size[-1]), w, h, c, n);
}
void getCanonicalSize(const MatShape& shape, int* width, int* height,
......@@ -174,7 +182,7 @@ void getCanonicalSize(const MatShape& shape, int* width, int* height,
}
}
void compileHalide(std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId)
void compileHalide(const std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId)
{
#ifdef HAVE_HALIDE
CV_Assert(!node.empty());
......
......@@ -61,6 +61,8 @@ namespace dnn
virtual void copyToHost();
virtual void setHostDirty();
Halide::Buffer<float> buffer;
private:
......@@ -80,7 +82,7 @@ namespace dnn
const Ptr<BackendNode>& node);
// Compile Halide pipeline to specific target. Use outputs to set bounds of functions.
void compileHalide(std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId);
void compileHalide(const std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId);
bool haveHalide();
} // namespace dnn
......
......@@ -646,6 +646,48 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Eltwise, Combine(
/*num convs*/ Values(1, 2, 3),
/*weighted(for sum only)*/ Bool()
));
////////////////////////////////////////////////////////////////////////////
// Mixed backends
////////////////////////////////////////////////////////////////////////////
TEST(MixedBackends_Halide_Default_Halide, Accuracy)
{
// Just a layer that supports Halide backend.
LayerParams lrn;
lrn.type = "LRN";
lrn.name = "testLRN";
// Some of layers that doesn't supports Halide backend yet.
LayerParams mvn;
mvn.type = "MVN";
mvn.name = "testMVN";
// Halide layer again.
LayerParams lrn2;
lrn2.type = "LRN";
lrn2.name = "testLRN2";
Net net;
int lrnId = net.addLayer(lrn.name, lrn.type, lrn);
net.connect(0, 0, lrnId, 0);
net.addLayerToPrev(mvn.name, mvn.type, mvn);
net.addLayerToPrev(lrn2.name, lrn2.type, lrn2);
Mat input({4, 3, 5, 6}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setInput(input);
Mat outputDefault = net.forward().clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.setInput(input);
Mat outputHalide = net.forward().clone();
normAssert(outputDefault, outputHalide);
net.setPreferableTarget(DNN_TARGET_OPENCL);
net.setInput(input);
outputHalide = net.forward().clone();
normAssert(outputDefault, outputHalide);
}
#endif // HAVE_HALIDE
} // namespace cvtest
......@@ -62,6 +62,7 @@ static void test(const std::string& weights, const std::string& proto,
netHalide.setInput(blobFromImage(input.clone(), 1.0, Size(), Scalar(), false));
normAssert(outputDefault, outputHalide, "Second run", l1, lInf);
std::cout << "." << std::endl;
// Swap backends.
netHalide.setPreferableBackend(DNN_BACKEND_DEFAULT);
......@@ -79,6 +80,20 @@ static void test(const std::string& weights, const std::string& proto,
////////////////////////////////////////////////////////////////////////////////
// CPU target
////////////////////////////////////////////////////////////////////////////////
TEST(Reproducibility_MobileNetSSD_Halide, Accuracy)
{
test(findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false),
findDataFile("dnn/MobileNetSSD_deploy.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_SSD_Halide, Accuracy)
{
test(findDataFile("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", false),
findDataFile("dnn/ssd_vgg16.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_GoogLeNet_Halide, Accuracy)
{
test(findDataFile("dnn/bvlc_googlenet.caffemodel", false),
......@@ -126,6 +141,20 @@ TEST(Reproducibility_ENet_Halide, Accuracy)
////////////////////////////////////////////////////////////////////////////////
// OpenCL target
////////////////////////////////////////////////////////////////////////////////
TEST(Reproducibility_MobileNetSSD_Halide_opencl, Accuracy)
{
test(findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false),
findDataFile("dnn/MobileNetSSD_deploy.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_OPENCL);
};
TEST(Reproducibility_SSD_Halide_opencl, Accuracy)
{
test(findDataFile("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel", false),
findDataFile("dnn/ssd_vgg16.prototxt", false),
"", 300, 300, "detection_out", "caffe", DNN_TARGET_OPENCL);
};
TEST(Reproducibility_GoogLeNet_Halide_opencl, Accuracy)
{
test(findDataFile("dnn/bvlc_googlenet.caffemodel", false),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment