Commit 8c03c639 authored by dkurt's avatar dkurt

Updated Halide tests according to https://github.com/opencv/opencv_contrib/pull/1229

parent aa0d8060
...@@ -12,14 +12,18 @@ namespace cvtest ...@@ -12,14 +12,18 @@ namespace cvtest
using namespace cv; using namespace cv;
using namespace dnn; using namespace dnn;
static void loadNet(const std::string& weights, const std::string& proto, static void loadNet(std::string weights, std::string proto, std::string scheduler,
const std::string& scheduler, int inWidth, int inHeight, int inWidth, int inHeight, const std::string& outputLayer,
const std::string& outputLayer, const std::string& framework, const std::string& framework, int targetId, Net* net)
int targetId, Net* net, int* outputLayerId)
{ {
Mat input(inHeight, inWidth, CV_32FC3); Mat input(inHeight, inWidth, CV_32FC3);
randu(input, 0.0f, 1.0f); randu(input, 0.0f, 1.0f);
weights = findDataFile(weights, false);
if (!proto.empty())
proto = findDataFile(proto, false);
if (!scheduler.empty())
scheduler = findDataFile(scheduler, false);
if (framework == "caffe") if (framework == "caffe")
{ {
*net = cv::dnn::readNetFromCaffe(proto, weights); *net = cv::dnn::readNetFromCaffe(proto, weights);
...@@ -35,106 +39,116 @@ static void loadNet(const std::string& weights, const std::string& proto, ...@@ -35,106 +39,116 @@ static void loadNet(const std::string& weights, const std::string& proto,
else else
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework); CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);
net->setBlob("", cv::dnn::blobFromImage(input, 1.0, false)); net->setInput(blobFromImage(input, 1.0, false));
net->setPreferableBackend(DNN_BACKEND_HALIDE); net->setPreferableBackend(DNN_BACKEND_HALIDE);
net->compileHalide(scheduler); net->setHalideScheduler(scheduler);
*outputLayerId = net->getLayerId(outputLayer); net->forward(outputLayer);
net->forward(*outputLayerId);
} }
PERF_TEST(GoogLeNet, HalidePerfTest) PERF_TEST(GoogLeNet, HalidePerfTest)
{ {
Net net; try {
int outputLayerId; Net net;
loadNet(findDataFile("dnn/bvlc_googlenet.caffemodel"), loadNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
findDataFile("dnn/bvlc_googlenet.prototxt"), "", 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net);
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
TEST_CYCLE_N(10)
TEST_CYCLE_N(10) {
{ net.forward();
net.forward(outputLayerId); }
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
} }
SANITY_CHECK_NOTHING();
} }
PERF_TEST(AlexNet, HalidePerfTest) PERF_TEST(AlexNet, HalidePerfTest)
{ {
Net net; try {
int outputLayerId; Net net;
loadNet(findDataFile("dnn/bvlc_alexnet.caffemodel"), loadNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
findDataFile("dnn/bvlc_alexnet.prototxt"), "dnn/halide_scheduler_alexnet.yml", 227, 227, "prob", "caffe",
findDataFile("dnn/halide_scheduler_alexnet.yml"), DNN_TARGET_CPU, &net);
227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
TEST_CYCLE_N(10)
TEST_CYCLE_N(10) {
{ net.forward();
net.forward(outputLayerId); }
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
} }
SANITY_CHECK_NOTHING();
} }
// PERF_TEST(ResNet50, HalidePerfTest) PERF_TEST(ResNet50, HalidePerfTest)
// { {
// Net net; try {
// int outputLayerId; Net net;
// loadNet(findDataFile("dnn/ResNet-50-model.caffemodel"), loadNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
// findDataFile("dnn/ResNet-50-deploy.prototxt"), "dnn/halide_scheduler_resnet_50.yml", 224, 224, "prob", "caffe",
// findDataFile("dnn/halide_scheduler_resnet_50.yml"), DNN_TARGET_CPU, &net);
// 224, 224, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId);
// TEST_CYCLE_N(10)
// TEST_CYCLE_N(10) {
// { net.forward();
// net.forward(outputLayerId); }
// } SANITY_CHECK_NOTHING();
// SANITY_CHECK_NOTHING(); } catch (SkipTestException& e) {
// } throw PerfSkipTestException();
}
// PERF_TEST(SqueezeNet_v1_1, HalidePerfTest) }
// {
// Net net; PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
// int outputLayerId; {
// loadNet(findDataFile("dnn/squeezenet_v1_1.caffemodel"), try {
// findDataFile("dnn/squeezenet_v1_1.prototxt"), Net net;
// findDataFile("dnn/halide_scheduler_squeezenet_v1_1.yml"), loadNet("dnn/squeezenet_v1_1.caffemodel", "dnn/squeezenet_v1_1.prototxt",
// 227, 227, "prob", "caffe", DNN_TARGET_CPU, &net, &outputLayerId); "dnn/halide_scheduler_squeezenet_v1_1.yml", 227, 227, "prob",
// "caffe", DNN_TARGET_CPU, &net);
// TEST_CYCLE_N(10)
// { TEST_CYCLE_N(10)
// net.forward(outputLayerId); {
// } net.forward();
// SANITY_CHECK_NOTHING(); }
// } SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
}
}
PERF_TEST(Inception_5h, HalidePerfTest) PERF_TEST(Inception_5h, HalidePerfTest)
{ {
Net net; try {
int outputLayerId; Net net;
loadNet(findDataFile("dnn/tensorflow_inception_graph.pb"), "", loadNet("dnn/tensorflow_inception_graph.pb", "",
findDataFile("dnn/halide_scheduler_inception_5h.yml"), "dnn/halide_scheduler_inception_5h.yml",
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU, 224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU, &net);
&net, &outputLayerId);
TEST_CYCLE_N(10)
TEST_CYCLE_N(10) {
{ net.forward("softmax2");
net.forward(outputLayerId); }
SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
} }
SANITY_CHECK_NOTHING();
} }
PERF_TEST(ENet, HalidePerfTest) PERF_TEST(ENet, HalidePerfTest)
{ {
Net net; try {
int outputLayerId; Net net;
loadNet(findDataFile("dnn/Enet-model-best.net"), "", loadNet("dnn/Enet-model-best.net", "", "dnn/halide_scheduler_enet.yml",
findDataFile("dnn/halide_scheduler_enet.yml"), 512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU, &net);
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU,
&net, &outputLayerId); TEST_CYCLE_N(10)
{
TEST_CYCLE_N(10) net.forward("l367_Deconvolution");
{ }
net.forward(outputLayerId); SANITY_CHECK_NOTHING();
} catch (SkipTestException& e) {
throw PerfSkipTestException();
} }
SANITY_CHECK_NOTHING();
} }
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
......
...@@ -93,28 +93,22 @@ int main(int argc, char **argv) ...@@ -93,28 +93,22 @@ int main(int argc, char **argv)
//! [Prepare blob] //! [Prepare blob]
//! [Set input blob] //! [Set input blob]
net.setInput(inputBlob); // Set the network input. net.setInput(inputBlob); // Set the network input.
//! [Set input blob] //! [Set input blob]
//! [Enable Halide backend] //! [Enable Halide backend]
net.setPreferableBackend(DNN_BACKEND_HALIDE); // Tell engine to use Halide where it possible. net.setPreferableBackend(DNN_BACKEND_HALIDE); // Tell engine to use Halide where it possible.
//! [Enable Halide backend] //! [Enable Halide backend]
//! [Compile Halide pipeline]
// net.compileHalide(); // Compile Halide pipeline.
//! [Compile Halide pipeline]
//! [Make forward pass] //! [Make forward pass]
Mat prob = net.forward("prob"); // Compute output. Mat prob = net.forward("prob"); // Compute output.
//! [Make forward pass] //! [Make forward pass]
//! [Gather output] //! [Determine the best class]
// net.getBlob(); // Gather output of "prob" layer.
int classId; int classId;
double classProb; double classProb;
getMaxClass(prob, &classId, &classProb); // Find the best class. getMaxClass(prob, &classId, &classProb); // Find the best class.
//! [Gather output] //! [Determine the best class]
//! [Print results] //! [Print results]
std::vector<std::string> classNames = readClassNames(); std::vector<std::string> classNames = readClassNames();
......
...@@ -24,14 +24,11 @@ static void test(LayerParams& params, Mat& input) ...@@ -24,14 +24,11 @@ static void test(LayerParams& params, Mat& input)
int lid = net.addLayer(params.name, params.type, params); int lid = net.addLayer(params.name, params.type, params);
net.connect(0, 0, lid, 0); net.connect(0, 0, lid, 0);
net.setBlob("", input); net.setInput(input);
net.allocate(); Mat outputDefault = net.forward(params.name).clone();
net.forward();
Mat outputDefault = net.getBlob(params.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE); net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward(); Mat outputHalide = net.forward(params.name).clone();
Mat outputHalide = net.getBlob(params.name).clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
} }
...@@ -346,14 +343,12 @@ TEST(MaxPoolUnpool_Halide, Accuracy) ...@@ -346,14 +343,12 @@ TEST(MaxPoolUnpool_Halide, Accuracy)
Mat input({1, 1, 4, 4}, CV_32F); Mat input({1, 1, 4, 4}, CV_32F);
randu(input, -1.0f, 1.0f); randu(input, -1.0f, 1.0f);
net.setBlob("", input); net.setInput(input);
net.forward(); Mat outputDefault = net.forward("testUnpool").clone();
Mat outputDefault = net.getBlob("testUnpool").clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE); net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.setBlob("", input); net.setInput(input);
net.forward(); Mat outputHalide = net.forward("testUnpool").clone();
Mat outputHalide = net.getBlob("testUnpool").clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
} }
...@@ -381,14 +376,12 @@ void testInPlaceActivation(LayerParams& lp) ...@@ -381,14 +376,12 @@ void testInPlaceActivation(LayerParams& lp)
Mat input({1, kNumChannels, 10, 10}, CV_32F); Mat input({1, kNumChannels, 10, 10}, CV_32F);
randu(input, -1.0f, 1.0f); randu(input, -1.0f, 1.0f);
net.setBlob("", input); net.setInput(input);
net.forward(); Mat outputDefault = net.forward(lp.name).clone();
Mat outputDefault = net.getBlob(lp.name).clone();
net.setBlob("", input); net.setInput(input);
net.setPreferableBackend(DNN_BACKEND_HALIDE); net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward(); Mat outputHalide = net.forward(lp.name).clone();
Mat outputHalide = net.getBlob(lp.name).clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
} }
...@@ -555,13 +548,11 @@ TEST_P(Concat, Accuracy) ...@@ -555,13 +548,11 @@ TEST_P(Concat, Accuracy)
Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F); Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F);
randu(input, -1.0f, 1.0f); randu(input, -1.0f, 1.0f);
net.setBlob("", input); net.setInput(input);
net.forward(); Mat outputDefault = net.forward(concatParam.name).clone();
Mat outputDefault = net.getBlob(concatParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE); net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward(); Mat outputHalide = net.forward(concatParam.name).clone();
Mat outputHalide = net.getBlob(concatParam.name).clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
} }
...@@ -617,13 +608,11 @@ TEST_P(Eltwise, Accuracy) ...@@ -617,13 +608,11 @@ TEST_P(Eltwise, Accuracy)
Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F); Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F);
randu(input, -1.0f, 1.0f); randu(input, -1.0f, 1.0f);
net.setBlob("", input); net.setInput(input);
net.forward(); Mat outputDefault = net.forward(eltwiseParam.name).clone();
Mat outputDefault = net.getBlob(eltwiseParam.name).clone();
net.setPreferableBackend(DNN_BACKEND_HALIDE); net.setPreferableBackend(DNN_BACKEND_HALIDE);
net.forward(); Mat outputHalide = net.forward(eltwiseParam.name).clone();
Mat outputHalide = net.getBlob(eltwiseParam.name).clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
} }
......
...@@ -43,80 +43,76 @@ static void test(const std::string& weights, const std::string& proto, ...@@ -43,80 +43,76 @@ static void test(const std::string& weights, const std::string& proto,
loadNet(weights, proto, framework, &netDefault); loadNet(weights, proto, framework, &netDefault);
loadNet(weights, proto, framework, &netHalide); loadNet(weights, proto, framework, &netHalide);
netDefault.setBlob("", blobFromImage(input.clone(), 1.0f, false)); netDefault.setInput(blobFromImage(input.clone(), 1.0f, false));
netDefault.forward(netDefault.getLayerId(outputLayer)); outputDefault = netDefault.forward(outputLayer).clone();
outputDefault = netDefault.getBlob(outputLayer).clone();
netHalide.setBlob("", blobFromImage(input.clone(), 1.0f, false)); netHalide.setInput(blobFromImage(input.clone(), 1.0f, false));
netHalide.setPreferableBackend(DNN_BACKEND_HALIDE); netHalide.setPreferableBackend(DNN_BACKEND_HALIDE);
netHalide.compileHalide(scheduler); netHalide.setHalideScheduler(scheduler);
netHalide.forward(netHalide.getLayerId(outputLayer)); outputHalide = netHalide.forward(outputLayer).clone();
outputHalide = netHalide.getBlob(outputLayer).clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
// An extra test: change input. // An extra test: change input.
input *= 0.1f; input *= 0.1f;
netDefault.setBlob("", blobFromImage(input.clone(), 1.0, false)); netDefault.setInput(blobFromImage(input.clone(), 1.0, false));
netHalide.setBlob("", blobFromImage(input.clone(), 1.0, false)); netHalide.setInput(blobFromImage(input.clone(), 1.0, false));
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
// Swap backends. // Swap backends.
netHalide.setPreferableBackend(DNN_BACKEND_DEFAULT); netHalide.setPreferableBackend(DNN_BACKEND_DEFAULT);
netHalide.forward(netHalide.getLayerId(outputLayer)); outputDefault = netHalide.forward(outputLayer).clone();
netDefault.setPreferableBackend(DNN_BACKEND_HALIDE); netDefault.setPreferableBackend(DNN_BACKEND_HALIDE);
netDefault.compileHalide(scheduler); netDefault.setHalideScheduler(scheduler);
netDefault.forward(netDefault.getLayerId(outputLayer)); outputHalide = netDefault.forward(outputLayer).clone();
outputDefault = netHalide.getBlob(outputLayer).clone();
outputHalide = netDefault.getBlob(outputLayer).clone();
normAssert(outputDefault, outputHalide); normAssert(outputDefault, outputHalide);
} }
TEST(Reproducibility_GoogLeNet_Halide, Accuracy) TEST(Reproducibility_GoogLeNet_Halide, Accuracy)
{ {
test(findDataFile("dnn/bvlc_googlenet.caffemodel"), test(findDataFile("dnn/bvlc_googlenet.caffemodel", false),
findDataFile("dnn/bvlc_googlenet.prototxt"), findDataFile("dnn/bvlc_googlenet.prototxt", false),
"", 227, 227, "prob", "caffe", DNN_TARGET_CPU); "", 227, 227, "prob", "caffe", DNN_TARGET_CPU);
}; };
TEST(Reproducibility_AlexNet_Halide, Accuracy) TEST(Reproducibility_AlexNet_Halide, Accuracy)
{ {
test(getOpenCVExtraDir() + "/dnn/bvlc_alexnet.caffemodel", test(findDataFile("dnn/bvlc_alexnet.caffemodel", false),
getOpenCVExtraDir() + "/dnn/bvlc_alexnet.prototxt", findDataFile("dnn/bvlc_alexnet.prototxt", false),
getOpenCVExtraDir() + "/dnn/halide_scheduler_alexnet.yml", findDataFile("dnn/halide_scheduler_alexnet.yml", false),
227, 227, "prob", "caffe", DNN_TARGET_CPU); 227, 227, "prob", "caffe", DNN_TARGET_CPU);
}; };
// TEST(Reproducibility_ResNet_50_Halide, Accuracy) TEST(Reproducibility_ResNet_50_Halide, Accuracy)
// { {
// test(getOpenCVExtraDir() + "/dnn/ResNet-50-model.caffemodel", test(findDataFile("dnn/ResNet-50-model.caffemodel", false),
// getOpenCVExtraDir() + "/dnn/ResNet-50-deploy.prototxt", findDataFile("dnn/ResNet-50-deploy.prototxt", false),
// getOpenCVExtraDir() + "/dnn/halide_scheduler_resnet_50.yml", findDataFile("dnn/halide_scheduler_resnet_50.yml", false),
// 224, 224, "prob", "caffe", DNN_TARGET_CPU); 224, 224, "prob", "caffe", DNN_TARGET_CPU);
// }; };
// TEST(Reproducibility_SqueezeNet_v1_1_Halide, Accuracy) TEST(Reproducibility_SqueezeNet_v1_1_Halide, Accuracy)
// { {
// test(getOpenCVExtraDir() + "/dnn/squeezenet_v1_1.caffemodel", test(findDataFile("dnn/squeezenet_v1_1.caffemodel", false),
// getOpenCVExtraDir() + "/dnn/squeezenet_v1_1.prototxt", findDataFile("dnn/squeezenet_v1_1.prototxt", false),
// getOpenCVExtraDir() + "/dnn/halide_scheduler_squeezenet_v1_1.yml", findDataFile("dnn/halide_scheduler_squeezenet_v1_1.yml", false),
// 227, 227, "prob", "caffe", DNN_TARGET_CPU); 227, 227, "prob", "caffe", DNN_TARGET_CPU);
// }; };
TEST(Reproducibility_Inception_5h_Halide, Accuracy) TEST(Reproducibility_Inception_5h_Halide, Accuracy)
{ {
test(getOpenCVExtraDir() + "/dnn/tensorflow_inception_graph.pb", "", test(findDataFile("dnn/tensorflow_inception_graph.pb", false), "",
getOpenCVExtraDir() + "/dnn/halide_scheduler_inception_5h.yml", findDataFile("dnn/halide_scheduler_inception_5h.yml", false),
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU); 224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU);
}; };
TEST(Reproducibility_ENet_Halide, Accuracy) TEST(Reproducibility_ENet_Halide, Accuracy)
{ {
test(getOpenCVExtraDir() + "/dnn/Enet-model-best.net", "", test(findDataFile("dnn/Enet-model-best.net", false), "",
getOpenCVExtraDir() + "/dnn/halide_scheduler_enet.yml", findDataFile("dnn/halide_scheduler_enet.yml", false),
512, 512, "l367_Deconvolution", "torch", DNN_TARGET_CPU); 512, 512, "l367_Deconvolution", "torch", DNN_TARGET_CPU);
}; };
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
......
...@@ -111,12 +111,9 @@ Put these files into working dir of this program example. ...@@ -111,12 +111,9 @@ Put these files into working dir of this program example.
-# Pass the blob to the network -# Pass the blob to the network
@snippet dnn/samples/squeezenet_halide.cpp Set input blob @snippet dnn/samples/squeezenet_halide.cpp Set input blob
-# Enable using Halide backend for layers where it is implemented -# Enable Halide backend for layers where it is implemented
@snippet dnn/samples/squeezenet_halide.cpp Enable Halide backend @snippet dnn/samples/squeezenet_halide.cpp Enable Halide backend
-# Compile Halide functions to execute on CPU
@snippet dnn/samples/squeezenet_halide.cpp Compile Halide pipeline
-# Make forward pass -# Make forward pass
@snippet dnn/samples/squeezenet_halide.cpp Make forward pass @snippet dnn/samples/squeezenet_halide.cpp Make forward pass
Remember that the first forward pass after initialization require quite more Remember that the first forward pass after initialization require quite more
...@@ -124,7 +121,7 @@ time that the next ones. It's because of runtime compilation of Halide pipelines ...@@ -124,7 +121,7 @@ time that the next ones. It's because of runtime compilation of Halide pipelines
at the first invocation. at the first invocation.
-# Determine the best class -# Determine the best class
@snippet dnn/samples/squeezenet_halide.cpp Gather output @snippet dnn/samples/squeezenet_halide.cpp Determine the best class
-# Print results -# Print results
@snippet dnn/samples/squeezenet_halide.cpp Print results @snippet dnn/samples/squeezenet_halide.cpp Print results
......
...@@ -10,8 +10,10 @@ For better understanding of Halide scheduling you might want to read tutorials @ ...@@ -10,8 +10,10 @@ For better understanding of Halide scheduling you might want to read tutorials @
If it's your first meeting with Halide in OpenCV, we recommend to start from @ref tutorial_dnn_halide. If it's your first meeting with Halide in OpenCV, we recommend to start from @ref tutorial_dnn_halide.
## Configuration files ## Configuration files
When you call ```cv::dnn::Net::compileHalide```, you can pass a path to textual file You can schedule computations of Halide pipeline by writing textual configuration files.
contains scheduling directives for specific device. It means that you can easily vectorize, parallelize and manage loops order of
layers computation. Pass path to file with scheduling directives for specific
device into ```cv::dnn::Net::setHalideScheduler``` before the first ```cv::dnn::Net::forward``` call.
Scheduling configuration files represented as YAML files where each node is a Scheduling configuration files represented as YAML files where each node is a
scheduled function or a scheduling directive. scheduled function or a scheduling directive.
...@@ -74,10 +76,7 @@ fc8: ...@@ -74,10 +76,7 @@ fc8:
@endcode @endcode
## Automatic scheduling ## Automatic scheduling
Based on manual scheduling experience, proposed way to schedule layers You can let DNN to schedule layers automatically. Just skip call of ```cv::dnn::Net::setHalideScheduler```. Sometimes it might be even more efficient than manual scheduling.
automatically. Just skip scheduling file path argument at ```cv::dnn::Net::compileHalide``` But if specific layers require be scheduled manually, you would be able to
for let DNN schedule your network. Sometimes it might be even better mix both manual and automatic scheduling ways. Write scheduling file
than manual scheduling.
You can mix both manual and automatic scheduling ways. Write scheduling file
and skip layers that you want to be scheduled automatically. and skip layers that you want to be scheduled automatically.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment