Unverified Commit 2b3c140f authored by Alexander Alekhin's avatar Alexander Alekhin Committed by GitHub

Merge pull request #10436 from alalek:test_threads

parents ec32022b 9b131b5f
...@@ -77,8 +77,6 @@ OCL_PERF_TEST_P( ConvolutionPerfTest, perf, Combine( ...@@ -77,8 +77,6 @@ OCL_PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
std::vector<Mat*> inpBlobs(1, &inpBlob); std::vector<Mat*> inpBlobs(1, &inpBlob);
std::vector<Mat> outBlobs, internalBlobs; std::vector<Mat> outBlobs, internalBlobs;
cv::setNumThreads(cv::getNumberOfCPUs());
Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp); Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp);
std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals; std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals;
layer->getMemoryShapes(inputShapes, 0, outShapes, internals); layer->getMemoryShapes(inputShapes, 0, outShapes, internals);
...@@ -99,7 +97,7 @@ OCL_PERF_TEST_P( ConvolutionPerfTest, perf, Combine( ...@@ -99,7 +97,7 @@ OCL_PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
Mat inpBlob2D = inpBlob.reshape(1, outCn); Mat inpBlob2D = inpBlob.reshape(1, outCn);
Mat wgtBlob2D = wgtBlob.reshape(1, outCn*(inpCn/groups)); Mat wgtBlob2D = wgtBlob.reshape(1, outCn*(inpCn/groups));
Mat outBlob2D = outBlobs[0].reshape(1, outBlobs[0].size[0]); Mat outBlob2D = outBlobs[0].reshape(1, outBlobs[0].size[0]);
declare.in(inpBlob2D, wgtBlob2D, WARMUP_RNG).out(outBlob2D).tbb_threads(cv::getNumThreads()); declare.in(inpBlob2D, wgtBlob2D, WARMUP_RNG).out(outBlob2D);
// warmup // warmup
layer->forward(inpBlobs, outBlobs, internalBlobs); layer->forward(inpBlobs, outBlobs, internalBlobs);
......
...@@ -60,8 +60,6 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine( ...@@ -60,8 +60,6 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
std::vector<Mat*> inpBlobs(1, &inpBlob); std::vector<Mat*> inpBlobs(1, &inpBlob);
std::vector<Mat> outBlobs, internalBlobs; std::vector<Mat> outBlobs, internalBlobs;
cv::setNumThreads(cv::getNumberOfCPUs());
Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp); Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp);
std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals; std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals;
layer->getMemoryShapes(inputShapes, 0, outShapes, internals); layer->getMemoryShapes(inputShapes, 0, outShapes, internals);
...@@ -81,7 +79,7 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine( ...@@ -81,7 +79,7 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
Mat inpBlob2D = inpBlob.reshape(1, outCn); Mat inpBlob2D = inpBlob.reshape(1, outCn);
Mat wgtBlob2D = wgtBlob.reshape(1, outCn*(inpCn/groups)); Mat wgtBlob2D = wgtBlob.reshape(1, outCn*(inpCn/groups));
Mat outBlob2D = outBlobs[0].reshape(1, outBlobs[0].size[0]); Mat outBlob2D = outBlobs[0].reshape(1, outBlobs[0].size[0]);
declare.in(inpBlob2D, wgtBlob2D, WARMUP_RNG).out(outBlob2D).tbb_threads(cv::getNumThreads()); declare.in(inpBlob2D, wgtBlob2D, WARMUP_RNG).out(outBlob2D);
layer->forward(inpBlobs, outBlobs, internalBlobs); /// warmup layer->forward(inpBlobs, outBlobs, internalBlobs); /// warmup
......
...@@ -107,8 +107,6 @@ void testLayerUsingCaffeModels(String basename, int targetId = DNN_TARGET_CPU, ...@@ -107,8 +107,6 @@ void testLayerUsingCaffeModels(String basename, int targetId = DNN_TARGET_CPU,
String inpfile = (useCommonInputBlob) ? _tf("blob.npy") : _tf(basename + ".input.npy"); String inpfile = (useCommonInputBlob) ? _tf("blob.npy") : _tf(basename + ".input.npy");
String outfile = _tf(basename + ".npy"); String outfile = _tf(basename + ".npy");
cv::setNumThreads(cv::getNumberOfCPUs());
Net net = readNetFromCaffe(prototxt, (useCaffeModel) ? caffemodel : String()); Net net = readNetFromCaffe(prototxt, (useCaffeModel) ? caffemodel : String());
ASSERT_FALSE(net.empty()); ASSERT_FALSE(net.empty());
...@@ -537,8 +535,6 @@ void testLayerUsingDarknetModels(String basename, bool useDarknetModel = false, ...@@ -537,8 +535,6 @@ void testLayerUsingDarknetModels(String basename, bool useDarknetModel = false,
String inpfile = (useCommonInputBlob) ? _tf("blob.npy") : _tf(basename + ".input.npy"); String inpfile = (useCommonInputBlob) ? _tf("blob.npy") : _tf(basename + ".input.npy");
String outfile = _tf(basename + ".npy"); String outfile = _tf(basename + ".npy");
cv::setNumThreads(cv::getNumberOfCPUs());
Net net = readNetFromDarknet(cfg, (useDarknetModel) ? weights : String()); Net net = readNetFromDarknet(cfg, (useDarknetModel) ? weights : String());
ASSERT_FALSE(net.empty()); ASSERT_FALSE(net.empty());
......
...@@ -11,11 +11,13 @@ ...@@ -11,11 +11,13 @@
namespace cvtest { namespace cvtest {
void checkIppStatus(); void checkIppStatus();
extern bool skipUnstableTests; extern bool skipUnstableTests;
extern int testThreads;
} }
#define CV__TEST_INIT \ #define CV__TEST_INIT \
cv::ipp::setIppStatus(0); \ cv::ipp::setIppStatus(0); \
cv::theRNG().state = cvtest::param_seed; cv::theRNG().state = cvtest::param_seed; \
cv::setNumThreads(cvtest::testThreads);
#define CV__TEST_CLEANUP ::cvtest::checkIppStatus(); #define CV__TEST_CLEANUP ::cvtest::checkIppStatus();
#define CV__TEST_BODY_IMPL(name) \ #define CV__TEST_BODY_IMPL(name) \
{ \ { \
......
...@@ -695,12 +695,14 @@ void checkIppStatus() ...@@ -695,12 +695,14 @@ void checkIppStatus()
} }
bool skipUnstableTests = false; bool skipUnstableTests = false;
int testThreads = 0;
void parseCustomOptions(int argc, char **argv) void parseCustomOptions(int argc, char **argv)
{ {
const char * const command_line_keys = const char * const command_line_keys =
"{ ipp test_ipp_check |false |check whether IPP works without failures }" "{ ipp test_ipp_check |false |check whether IPP works without failures }"
"{ test_seed |809564 |seed for random numbers generator }" "{ test_seed |809564 |seed for random numbers generator }"
"{ test_threads |-1 |the number of worker threads, if parallel execution is enabled}"
"{ skip_unstable |false |skip unstable tests }" "{ skip_unstable |false |skip unstable tests }"
"{ h help |false |print help info }"; "{ h help |false |print help info }";
...@@ -721,6 +723,8 @@ void parseCustomOptions(int argc, char **argv) ...@@ -721,6 +723,8 @@ void parseCustomOptions(int argc, char **argv)
param_seed = parser.get<unsigned int>("test_seed"); param_seed = parser.get<unsigned int>("test_seed");
testThreads = parser.get<int>("test_threads");
skipUnstableTests = parser.get<bool>("skip_unstable"); skipUnstableTests = parser.get<bool>("skip_unstable");
} }
......
...@@ -39,7 +39,6 @@ static double param_max_deviation; ...@@ -39,7 +39,6 @@ static double param_max_deviation;
static unsigned int param_min_samples; static unsigned int param_min_samples;
static unsigned int param_force_samples; static unsigned int param_force_samples;
static double param_time_limit; static double param_time_limit;
static int param_threads;
static bool param_write_sanity; static bool param_write_sanity;
static bool param_verify_sanity; static bool param_verify_sanity;
#ifdef CV_COLLECT_IMPL_DATA #ifdef CV_COLLECT_IMPL_DATA
...@@ -1042,7 +1041,7 @@ void TestBase::Init(const std::vector<std::string> & availableImpls, ...@@ -1042,7 +1041,7 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
#ifdef HAVE_IPP #ifdef HAVE_IPP
test_ipp_check = !args.get<bool>("perf_ipp_check") ? getenv("OPENCV_IPP_CHECK") != NULL : true; test_ipp_check = !args.get<bool>("perf_ipp_check") ? getenv("OPENCV_IPP_CHECK") != NULL : true;
#endif #endif
param_threads = args.get<int>("perf_threads"); testThreads = args.get<int>("perf_threads");
#ifdef CV_COLLECT_IMPL_DATA #ifdef CV_COLLECT_IMPL_DATA
param_collect_impl = args.get<bool>("perf_collect_impl"); param_collect_impl = args.get<bool>("perf_collect_impl");
#endif #endif
...@@ -1160,7 +1159,7 @@ void TestBase::Init(const std::vector<std::string> & availableImpls, ...@@ -1160,7 +1159,7 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
void TestBase::RecordRunParameters() void TestBase::RecordRunParameters()
{ {
::testing::Test::RecordProperty("cv_implementation", param_impl); ::testing::Test::RecordProperty("cv_implementation", param_impl);
::testing::Test::RecordProperty("cv_num_threads", param_threads); ::testing::Test::RecordProperty("cv_num_threads", testThreads);
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
if (param_impl == "cuda") if (param_impl == "cuda")
...@@ -1851,8 +1850,8 @@ void TestBase::SetUp() ...@@ -1851,8 +1850,8 @@ void TestBase::SetUp()
{ {
cv::theRNG().state = param_seed; // this rng should generate same numbers for each run cv::theRNG().state = param_seed; // this rng should generate same numbers for each run
if (param_threads >= 0) if (testThreads >= 0)
cv::setNumThreads(param_threads); cv::setNumThreads(testThreads);
else else
cv::setNumThreads(-1); cv::setNumThreads(-1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment