Commit 8c03c639 authored by dkurt's avatar dkurt

Updated Halide tests according to https://github.com/opencv/opencv_contrib/pull/1229

parent aa0d8060
......@@ -12,14 +12,18 @@ namespace cvtest
using namespace cv;
using namespace dnn;
static void loadNet(const std::string& weights, const std::string& proto,
const std::string& scheduler, int inWidth, int inHeight,
const std::string& outputLayer, const std::string& framework,
int targetId, Net* net, int* outputLayerId)
static void loadNet(std::string weights, std::string proto, std::string scheduler,
int inWidth, int inHeight, const std::string& outputLayer,
const std::string& framework, int targetId, Net* net)
{
Mat input(inHeight, inWidth, CV_32FC3);
randu(input, 0.0f, 1.0f);
weights = findDataFile(weights, false);
if (!proto.empty())
proto = findDataFile(proto, false);
if (!scheduler.empty())
scheduler = findDataFile(scheduler, false);
if (framework == "caffe")
{
*net = cv::dnn::readNetFromCaffe(proto, weights);
......@@ -35,106 +39,116 @@ static void loadNet(const std::string& weights, const std::string& proto,
else
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);
net->setBlob("", cv::dnn::blobFromImage(input, 1.0, false));
net->setInput(blobFromImage(input, 1.0, false));
net->setPreferableBackend(DNN_BACKEND_HALIDE);
net->compileHalide(scheduler);
*outputLayerId = net->getLayerId(outputLayer);
net->forward(*outputLayerId);
net->setHalideScheduler(scheduler);
net->forward(outputLayer);
}
PERF_TEST(GoogLeNet, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/bvlc_googlenet.caffemodel"),
findDataFile("dnn/bvlc_googlenet.prototxt"),
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
try {
Net net;
loadNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net);
TEST_CYCLE_N(10)
{
net.forward();
}
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
SANITY_CHECK_NOTHING();
}
PERF_TEST(AlexNet, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/bvlc_alexnet.caffemodel"),
findDataFile("dnn/bvlc_alexnet.prototxt"),
findDataFile("dnn/halide_scheduler_alexnet.yml"),
227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
try {
Net net;
loadNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
"dnn/halide_scheduler_alexnet.yml", 227, 227, "prob", "caffe",
DNN_TARGET_CPU, &net);
TEST_CYCLE_N(10)
{
net.forward();
}
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
SANITY_CHECK_NOTHING();
}
// PERF_TEST(ResNet50, HalidePerfTest)
// {
// Net net;
// int outputLayerId;
// loadNet(findDataFile("dnn/ResNet-50-model.caffemodel"),
// findDataFile("dnn/ResNet-50-deploy.prototxt"),
// findDataFile("dnn/halide_scheduler_resnet_50.yml"),
// 224, 224, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
//
// TEST_CYCLE_N(10)
// {
// net.forward(outputLayerId);
// }
// SANITY_CHECK_NOTHING();
// }
// PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
// {
// Net net;
// int outputLayerId;
// loadNet(findDataFile("dnn/squeezenet_v1_1.caffemodel"),
// findDataFile("dnn/squeezenet_v1_1.prototxt"),
// findDataFile("dnn/halide_scheduler_squeezenet_v1_1.yml"),
// 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
//
// TEST_CYCLE_N(10)
// {
// net.forward(outputLayerId);
// }
// SANITY_CHECK_NOTHING();
// }
PERF_TEST(ResNet50, HalidePerfTest)
{
try {
Net net;
loadNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
"dnn/halide_scheduler_resnet_50.yml", 224, 224, "prob", "caffe",
DNN_TARGET_CPU, &net);
TEST_CYCLE_N(10)
{
net.forward();
}
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
}
PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
{
try {
Net net;
loadNet("dnn/squeezenet_v1_1.caffemodel", "dnn/squeezenet_v1_1.prototxt",
"dnn/halide_scheduler_squeezenet_v1_1.yml", 227, 227, "prob",
"caffe", DNN_TARGET_CPU, &net);
TEST_CYCLE_N(10)
{
net.forward();
}
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
}
PERF_TEST(Inception_5h, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/tensorflow_inception_graph.pb"), "",
findDataFile("dnn/halide_scheduler_inception_5h.yml"),
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU,
&net, &outputLayerId);
TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
try {
Net net;
loadNet("dnn/tensorflow_inception_graph.pb", "",
"dnn/halide_scheduler_inception_5h.yml",
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU, &net);
TEST_CYCLE_N(10)
{
net.forward("softmax2");
}
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
SANITY_CHECK_NOTHING();
}
PERF_TEST(ENet, HalidePerfTest)
{
Net net;
int outputLayerId;
loadNet(findDataFile("dnn/Enet-model-best.net"), "",
findDataFile("dnn/halide_scheduler_enet.yml"),
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU,
&net, &outputLayerId);
TEST_CYCLE_N(10)
{
net.forward(outputLayerId);
try {
Net net;
loadNet("dnn/Enet-model-best.net", "", "dnn/halide_scheduler_enet.yml",
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU, &net);
TEST_CYCLE_N(10)
{
net.forward("l367_Deconvolution");
}
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
SANITY_CHECK_NOTHING();
}
#endif // HAVE_HALIDE
......
......@@ -93,28 +93,22 @@ int main(int argc, char **argv)
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob); // Set the network input.
net.setInput(inputBlob); // Set the network input.
//! [Set input blob]
//! [Enable Halide backend]
net.setPreferableBackend(DNN_BACKEND_HALIDE); // Tell engine to use Halide where it possible.
//! [Enable Halide backend]
//! [Compile Halide pipeline]
// net.compileHalide(); // Compile Halide pipeline.
//! [Compile Halide pipeline]
//! [Make forward pass]
Mat prob = net.forward("prob"); // Compute output.
Mat prob = net.forward("prob"); // Compute output.
//! [Make forward pass]
//! [Gather output]
// net.getBlob(); // Gather output of "prob" layer.
//! [Determine the best class]
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb); // Find the best class.
//! [Gather output]
//! [Determine the best class]
//! [Print results]
std::vector<std::string> classNames = readClassNames();
......
......@@ -24,14 +24,11 @@ static void test(LayerParams& params, Mat& input)
int lid = net.addLayer(params.name, params.type, params);
net.connect(0, 0, lid, 0);
net.setBlob("", input);
net.allocate();
net.forward();
Mat outputDefault = net.getBlob(params.name).clone();
net.setInput(input);
Mat outputDefault = net.forward(params.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward();
Mat outputHalide = net.getBlob(params.name).clone();
Mat outputHalide = net.forward(params.name).clone();
normAssert(outputDefault, outputHalide);
}
......@@ -346,14 +343,12 @@ TEST(MaxPoolUnpool_Halide, Accuracy)
Mat input({1, 1, 4, 4}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setBlob("", input);
net.forward();
Mat outputDefault = net.getBlob("testUnpool").clone();
net.setInput(input);
Mat outputDefault = net.forward("testUnpool").clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.setBlob("", input);
net.forward();
Mat outputHalide = net.getBlob("testUnpool").clone();
net.setInput(input);
Mat outputHalide = net.forward("testUnpool").clone();
normAssert(outputDefault, outputHalide);
}
......@@ -381,14 +376,12 @@ void testInPlaceActivation(LayerParams& lp)
Mat input({1, kNumChannels, 10, 10}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setBlob("", input);
net.forward();
Mat outputDefault = net.getBlob(lp.name).clone();
net.setInput(input);
Mat outputDefault = net.forward(lp.name).clone();
net.setBlob("", input);
net.setInput(input);
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward();
Mat outputHalide = net.getBlob(lp.name).clone();
Mat outputHalide = net.forward(lp.name).clone();
normAssert(outputDefault, outputHalide);
}
......@@ -555,13 +548,11 @@ TEST_P(Concat, Accuracy)
Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setBlob("", input);
net.forward();
Mat outputDefault = net.getBlob(concatParam.name).clone();
net.setInput(input);
Mat outputDefault = net.forward(concatParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward();
Mat outputHalide = net.getBlob(concatParam.name).clone();
Mat outputHalide = net.forward(concatParam.name).clone();
normAssert(outputDefault, outputHalide);
}
......@@ -617,13 +608,11 @@ TEST_P(Eltwise, Accuracy)
Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F);
randu(input, -1.0f, 1.0f);
net.setBlob("", input);
net.forward();
Mat outputDefault = net.getBlob(eltwiseParam.name).clone();
net.setInput(input);
Mat outputDefault = net.forward(eltwiseParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward();
Mat outputHalide = net.getBlob(eltwiseParam.name).clone();
Mat outputHalide = net.forward(eltwiseParam.name).clone();
normAssert(outputDefault, outputHalide);
}
......
......@@ -43,80 +43,76 @@ static void test(const std::string& weights, const std::string& proto,
loadNet(weights, proto, framework, &netDefault);
loadNet(weights, proto, framework, &netHalide);
netDefault.setBlob("", blobFromImage(input.clone(), 1.0f, false));
netDefault.forward(netDefault.getLayerId(outputLayer));
outputDefault = netDefault.getBlob(outputLayer).clone();
netDefault.setInput(blobFromImage(input.clone(), 1.0f, false));
outputDefault = netDefault.forward(outputLayer).clone();
netHalide.setBlob("", blobFromImage(input.clone(), 1.0f, false));
netHalide.setInput(blobFromImage(input.clone(), 1.0f, false));
netHalide.setPreferableBackend(DNN_BACKEND_HALIDE);
netHalide.compileHalide(scheduler);
netHalide.forward(netHalide.getLayerId(outputLayer));
outputHalide = netHalide.getBlob(outputLayer).clone();
netHalide.setHalideScheduler(scheduler);
outputHalide = netHalide.forward(outputLayer).clone();
normAssert(outputDefault, outputHalide);
// An extra test: change input.
input *= 0.1f;
netDefault.setBlob("", blobFromImage(input.clone(), 1.0, false));
netHalide.setBlob("", blobFromImage(input.clone(), 1.0, false));
netDefault.setInput(blobFromImage(input.clone(), 1.0, false));
netHalide.setInput(blobFromImage(input.clone(), 1.0, false));
normAssert(outputDefault, outputHalide);
// Swap backends.
netHalide.setPreferableBackend(DNN_BACKEND_DEFAULT);
netHalide.forward(netHalide.getLayerId(outputLayer));
outputDefault = netHalide.forward(outputLayer).clone();
netDefault.setPreferableBackend(DNN_BACKEND_HALIDE);
netDefault.compileHalide(scheduler);
netDefault.forward(netDefault.getLayerId(outputLayer));
netDefault.setHalideScheduler(scheduler);
outputHalide = netDefault.forward(outputLayer).clone();
outputDefault = netHalide.getBlob(outputLayer).clone();
outputHalide = netDefault.getBlob(outputLayer).clone();
normAssert(outputDefault, outputHalide);
}
TEST(Reproducibility_GoogLeNet_Halide, Accuracy)
{
test(findDataFile("dnn/bvlc_googlenet.caffemodel"),
findDataFile("dnn/bvlc_googlenet.prototxt"),
test(findDataFile("dnn/bvlc_googlenet.caffemodel", false),
findDataFile("dnn/bvlc_googlenet.prototxt", false),
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_AlexNet_Halide, Accuracy)
{
test(getOpenCVExtraDir() + "/dnn/bvlc_alexnet.caffemodel",
getOpenCVExtraDir() + "/dnn/bvlc_alexnet.prototxt",
getOpenCVExtraDir() + "/dnn/halide_scheduler_alexnet.yml",
test(findDataFile("dnn/bvlc_alexnet.caffemodel", false),
findDataFile("dnn/bvlc_alexnet.prototxt", false),
findDataFile("dnn/halide_scheduler_alexnet.yml", false),
227, 227, "prob", "caffe", DNN_TARGET_CPU);
};
// TEST(Reproducibility_ResNet_50_Halide, Accuracy)
// {
// test(getOpenCVExtraDir() + "/dnn/ResNet-50-model.caffemodel",
// getOpenCVExtraDir() + "/dnn/ResNet-50-deploy.prototxt",
// getOpenCVExtraDir() + "/dnn/halide_scheduler_resnet_50.yml",
// 224, 224, "prob", "caffe", DNN_TARGET_CPU);
// };
// TEST(Reproducibility_SqueezeNet_v1_1_Halide, Accuracy)
// {
// test(getOpenCVExtraDir() + "/dnn/squeezenet_v1_1.caffemodel",
// getOpenCVExtraDir() + "/dnn/squeezenet_v1_1.prototxt",
// getOpenCVExtraDir() + "/dnn/halide_scheduler_squeezenet_v1_1.yml",
// 227, 227, "prob", "caffe", DNN_TARGET_CPU);
// };
TEST(Reproducibility_ResNet_50_Halide, Accuracy)
{
test(findDataFile("dnn/ResNet-50-model.caffemodel", false),
findDataFile("dnn/ResNet-50-deploy.prototxt", false),
findDataFile("dnn/halide_scheduler_resnet_50.yml", false),
224, 224, "prob", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_SqueezeNet_v1_1_Halide, Accuracy)
{
test(findDataFile("dnn/squeezenet_v1_1.caffemodel", false),
findDataFile("dnn/squeezenet_v1_1.prototxt", false),
findDataFile("dnn/halide_scheduler_squeezenet_v1_1.yml", false),
227, 227, "prob", "caffe", DNN_TARGET_CPU);
};
TEST(Reproducibility_Inception_5h_Halide, Accuracy)
{
test(getOpenCVExtraDir() + "/dnn/tensorflow_inception_graph.pb", "",
getOpenCVExtraDir() + "/dnn/halide_scheduler_inception_5h.yml",
test(findDataFile("dnn/tensorflow_inception_graph.pb", false), "",
findDataFile("dnn/halide_scheduler_inception_5h.yml", false),
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU);
};
TEST(Reproducibility_ENet_Halide, Accuracy)
{
test(getOpenCVExtraDir() + "/dnn/Enet-model-best.net", "",
getOpenCVExtraDir() + "/dnn/halide_scheduler_enet.yml",
test(findDataFile("dnn/Enet-model-best.net", false), "",
findDataFile("dnn/halide_scheduler_enet.yml", false),
512, 512, "l367_Deconvolution", "torch", DNN_TARGET_CPU);
};
#endif // HAVE_HALIDE
......
......@@ -111,12 +111,9 @@ Put these files into working dir of this program example.
-# Pass the blob to the network
@snippet dnn/samples/squeezenet_halide.cpp Set input blob
-# Enable using Halide backend for layers where it is implemented
-# Enable Halide backend for layers where it is implemented
@snippet dnn/samples/squeezenet_halide.cpp Enable Halide backend
-# Compile Halide functions to execute on CPU
@snippet dnn/samples/squeezenet_halide.cpp Compile Halide pipeline
-# Make forward pass
@snippet dnn/samples/squeezenet_halide.cpp Make forward pass
Remember that the first forward pass after initialization require quite more
......@@ -124,7 +121,7 @@ time that the next ones. It's because of runtime compilation of Halide pipelines
at the first invocation.
-# Determine the best class
@snippet dnn/samples/squeezenet_halide.cpp Gather output
@snippet dnn/samples/squeezenet_halide.cpp Determine the best class
-# Print results
@snippet dnn/samples/squeezenet_halide.cpp Print results
......
......@@ -10,8 +10,10 @@ For better understanding of Halide scheduling you might want to read tutorials @
If it's your first meeting with Halide in OpenCV, we recommend to start from @ref tutorial_dnn_halide.
## Configuration files
When you call ```cv::dnn::Net::compileHalide```, you can pass a path to textual file
contains scheduling directives for specific device.
You can schedule computations of Halide pipeline by writing textual configuration files.
It means that you can easily vectorize, parallelize and manage loops order of
layers computation. Pass path to file with scheduling directives for specific
device into ```cv::dnn::Net::setHalideScheduler``` before the first ```cv::dnn::Net::forward``` call.
Scheduling configuration files represented as YAML files where each node is a
scheduled function or a scheduling directive.
......@@ -74,10 +76,7 @@ fc8:
@endcode
## Automatic scheduling
Based on manual scheduling experience, proposed way to schedule layers
automatically. Just skip scheduling file path argument at ```cv::dnn::Net::compileHalide```
for let DNN schedule your network. Sometimes it might be even better
than manual scheduling.
You can mix both manual and automatic scheduling ways. Write scheduling file
You can let DNN to schedule layers automatically. Just skip call of ```cv::dnn::Net::setHalideScheduler```. Sometimes it might be even more efficient than manual scheduling.
But if specific layers require be scheduled manually, you would be able to
mix both manual and automatic scheduling ways. Write scheduling file
and skip layers that you want to be scheduled automatically.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment