Unverified Commit 2d161b66 authored by AnastasiaKazantaeva's avatar AnastasiaKazantaeva Committed by GitHub

Divide opset1 tests (#4273)

* Failed test if not v1 with error Unsupported ops detected

* Python test for execution unit-test

* Finilize printing csv file with opset1

* Finilize test development

* style-check

* Fixed ov_runtime tensor

* Fixed code style

* Apply suggestions from code review
Co-Authored-By: 's avatarGeoffrey Wenger <geoffrey.wenger@intel.com>

* Fixed comments
Co-authored-by: 's avatarIlya Churaev <ilyachur@gmail.com>
Co-authored-by: 's avatarGeoffrey Wenger <geoffrey.wenger@intel.com>
parent 924b2ff0
......@@ -62,6 +62,7 @@ namespace ngraph
insert<OP_TYPE>(OP_TYPE::type_info.name);
}
const std::set<NodeTypeInfo>& get_types_info() const { return m_op_types; }
/// \brief Create the op named name using it's factory
ngraph::Node* create(const std::string& name) const;
......
......@@ -378,12 +378,18 @@ if (NGRAPH_UNIT_TEST_OPENVINO_ENABLE)
backend/abs.in.cpp
backend/acos.in.cpp
backend/add.in.cpp
backend/aliased_output.in.cpp
backend/all.in.cpp
backend/any.in.cpp
backend/arg_reduce.in.cpp
backend/asin.in.cpp
backend/atan.in.cpp
backend/atan2.in.cpp
backend/auto_broadcast.in.cpp
backend/batch_mat_mul.in.cpp
backend/broadcast.in.cpp
backend/builder_flatten.in.cpp
backend/ceiling.in.cpp
backend/comparison.in.cpp
backend/computation_reuse.in.cpp
backend/concat.in.cpp
......@@ -391,11 +397,14 @@ if (NGRAPH_UNIT_TEST_OPENVINO_ENABLE)
backend/convolution.in.cpp
backend/cos.in.cpp
backend/cosh.in.cpp
backend/cum_sum.in.cpp
backend/divide.in.cpp
backend/dot.in.cpp
backend/dyn_broadcast.in.cpp
backend/dyn_replace_slice_reference.in.cpp
backend/dyn_reshape.in.cpp
backend/dyn_slice_reference.in.cpp
backend/strided_slice.in.cpp
backend/dynamic.in.cpp
backend/embedding_lookup.in.cpp
backend/erf.in.cpp
......@@ -404,13 +413,17 @@ if (NGRAPH_UNIT_TEST_OPENVINO_ENABLE)
backend/function_name.in.cpp
backend/fused_op.in.cpp
backend/gather.in.cpp
backend/generate_mask.in.cpp
backend/group_convolution.in.cpp
backend/layer_norm.in.cpp
backend/log.in.cpp
backend/logical_and.in.cpp
backend/logical_or.in.cpp
backend/logical_xor.in.cpp
backend/lrn.in.cpp
backend/max.in.cpp
backend/maximum.in.cpp
backend/min.in.cpp
backend/minimum.in.cpp
backend/multiple_backends.in.cpp
backend/multiple_result.in.cpp
......@@ -421,14 +434,18 @@ if (NGRAPH_UNIT_TEST_OPENVINO_ENABLE)
backend/numeric.in.cpp
backend/one_hot.in.cpp
backend/pad.in.cpp
backend/parameter_as_output.in.cpp
backend/partial_slice.in.cpp
backend/pool.in.cpp
backend/power.in.cpp
backend/product.in.cpp
backend/quantize_dequantize.in.cpp
backend/quantized_convolution.in.cpp
backend/quantized_dot.in.cpp
backend/random_uniform.in.cpp
backend/range.in.cpp
backend/relu.in.cpp
backend/replace_slice.in.cpp
backend/reshape.in.cpp
backend/reverse_sequence.in.cpp
backend/reverse.in.cpp
......@@ -443,27 +460,16 @@ if (NGRAPH_UNIT_TEST_OPENVINO_ENABLE)
backend/softmax.in.cpp
backend/sqrt.in.cpp
backend/subtract.in.cpp
backend/sum.in.cpp
backend/tan.in.cpp
backend/tanh.in.cpp
backend/tile.in.cpp
backend/transpose.in.cpp
backend/unhandled_op.in.cpp
backend/validate_call.in.cpp
backend/zero_sized.in.cpp
# This files should be commented,because they are not working for a while.
# Not v1 op:
# backend/all.in.cpp
# backend/any.in.cpp
# backend/broadcast.in.cpp
# backend/ceiling.in.cpp
# backend/dot.in.cpp
# backend/generate_mask.in.cpp
# backend/max.in.cpp
# backend/min.in.cpp
# backend/product.in.cpp
# backend/replace_slice.in.cpp
# backend/sum.in.cpp
# backend/unhandled_op.in.cpp
# Need to fix for IE plugin:
# backend/api.in.cpp
# backend/batch_norm.in.cpp
......@@ -474,10 +480,6 @@ if (NGRAPH_UNIT_TEST_OPENVINO_ENABLE)
# backend/autodiff.in.cpp
# backend/constant.in.cpp
# backend/convolution_reference.in.cpp
# Segmentation fault. Need to fix on IE side:
# backend/parameter_as_output.in.cpp
# backend/aliased_output.in.cpp
)
endif()
......
......@@ -189,3 +189,14 @@ TEST(opset, new_op)
fred = shared_ptr<Node>(opset1_copy.create("Fred"));
EXPECT_TRUE(fred);
}
TEST(opset, dump)
{
OpSet opset1_copy(get_opset1());
cout << "All opset1 operations: ";
for (const auto& t : opset1_copy.get_types_info())
{
std::cout << t.name << " ";
}
cout << endl;
}
This diff is collapsed.
These test executes 'unit-test'.
To run please do the following:
1. To run tests required installing some dependencies:
- pip3 install -r requirements.txt
2. Set environment variable:
a. Required:
export PATH_TO_EXE=<path where nGraph unit-test locates>
3. To run all tests:
a. cd folder where unit_test_executable.py locates
b. pytest --gtest_filter="*"
4. To run exact test:
a. cd folder where unit_test_executable.py locates
b. pytest --gtest_filter="<your test name>"
5. To get html report add "--html=report.html" to pytest cmd line
(but before install this module "pip install pytest-html")
6.This test get result of opset1 operation (passed and failed) and also creates csv file 'nodes_coverage.csv' and
'nodes_coverage.html' after execution. Here you may find name of operations and its passrate and coverage
for several plugins.
Example:
Operation | GPU passed / total | CPU passed / total
Abs | 1/2 | 1/2
Here operation 'Abs': 1 test of 2 passed on GPU and CPU
\ No newline at end of file
import logging as log
import sys
import subprocess
import os
import pytest
def pytest_addoption(parser):
parser.addoption(
"--gtest_filter",
help="Attributes to gtest",
type=str,
required=True,
)
@pytest.fixture(scope="session")
def gtest_filter(request):
return request.config.getoption('gtest_filter')
def shell(cmd, env=None):
"""
Run command execution in specified environment
:param cmd: list containing command and its parameters
:param env: set of environment variables to set for this command
:return:
"""
if sys.platform.startswith('linux') or sys.platform == 'darwin':
cmd = ['/bin/bash', '-c', "unset OMP_NUM_THREADS; " + cmd]
else:
cmd = " ".join(cmd)
sys.stdout.write("Running command:\n" + "".join(cmd) + "\n")
p = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
stdout = []
while True:
line = p.stdout.readline()
stdout.append(line)
print(line.rstrip())
if line == '' and p.poll() != None:
break
return p.returncode, ''.join(stdout)
def create_list_test(stdout):
# Example of stdout content:
# 'CPU'
# ' zero_sized_abs'
# ' zero_sized_ceiling'
# ...
# So, list of test will be concatenation of 'CPU' and the second part (starting with ' '):
# 'CPU.zero_sized_abs'
# 'CPU.zero_sized_ceiling'
list_test = []
first_name, second_name = [''] * 2
for line in stdout:
if not line.startswith(' '):
first_name = line
else:
second_name = line
# Several test has gtest mark 'DISABLED' inside test - no test will be executed
if not 'DISABLED' in line:
list_test.append(first_name + second_name.strip())
return list_test
def pytest_generate_tests(metafunc):
gtest_filter = metafunc.config.getoption(name='gtest_filter')
if 'gtest_filter' in metafunc.fixturenames and gtest_filter is not None:
executable = os.path.join(os.environ.get('PATH_TO_EXE'), "unit-test")
cmd_line = executable + ' --gtest_filter=' + gtest_filter + ' --gtest_list_tests'
log.info('Executing {} for getting list of test'.format(executable))
retcode, stdout = shell(cmd=cmd_line)
assert retcode == 0, "unit-test --gtest_list_tests execution failed. Return code: {}".format(retcode)
stdout = stdout.split('\n')
list_test = create_list_test(stdout)
# Find all opset1 operations: execute test 'opset.dump'
cmd_line_all_op = executable + ' --gtest_filter=opset.dump'
log.info('Executing {} for getting list of test'.format(cmd_line_all_op))
retcode_op1, stdout_op1 = shell(cmd=cmd_line_all_op)
assert retcode_op1 == 0, "unit-test --gtest_filter=opset.dump execution failed. Return code: {}".format(retcode)
# Parsing stdout to storing name of opset1 operations
stdout_op1 = stdout_op1.split('\n')
operation_opset1 = []
for line in stdout_op1:
if 'All opset1 operations:' in line:
operation_opset1 = list(set(line.replace('All opset1 operations:', '').strip().split(' ')))
for op in operation_opset1:
pytest.operation_dictionary[op] = {}
metafunc.parametrize(argnames="gtest_filter", argvalues=list_test)
pytest
\ No newline at end of file
import logging as log
import sys
import os
import csv
import pytest
import re
from conftest import shell
log.basicConfig(format="[ %(levelname)s ] %(msg)s", stream=sys.stdout, level=log.INFO)
pytest.operation_dictionary = {}
pytest.avaliable_plugins = []
def save_coverage_to_csv(csv_path, header):
with open(csv_path, 'w', newline='') as f:
csv_writer = csv.writer(f, delimiter='|', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(i for i in header)
i = 1
for key in sorted(pytest.operation_dictionary):
line = [i, key]
for plugin in pytest.avaliable_plugins:
if not plugin in pytest.operation_dictionary[key]:
line.append('0/0')
else:
line.append('/'.join(str(x) for x in pytest.operation_dictionary[key][plugin]))
csv_writer.writerow(line)
i += 1
def get_color(value):
if '/' in value:
passed, total = [int(x.strip()) for x in value.split('/')]
if passed == total and total != 0:
return "#d1ffd3"
elif passed == total and total == 0:
return "#dadada"
else:
return "#ffdbdb"
else:
return "white"
def csv_to_html_table(csv_path, html_path, headers=None, delimiter=","):
with open(csv_path) as f:
content = f.readlines()
# reading file content into list
rows = [x.strip() for x in content]
table = "<!DOCTYPE html><html><head><title>Opset1 operations results</title></head><body><table border=1>"
# creating HTML header row if header is provided
if headers is not None:
table += "<tr>"
table += "".join(["<th>" + cell + "</th>" for cell in headers])
table += "</tr>"
else:
table += "<tr>"
table += "".join(["<th>" + cell + "</th>" for cell in rows[0].split(delimiter)])
table += "</tr>"
rows = rows[1:]
# Converting csv to html row by row
for row in rows:
table += "<tr>" + "".join(["<td style=background-color:%s>" % (get_color(cell)) + cell + "</td>"
for cell in row.split(delimiter)]) + "</tr>" + "\n"
table += "</table></body></html><br>"
# Saving html file
fh = open(html_path, "w")
fh.write(table)
fh.close()
def setup_module():
try:
os.environ.get('PATH_TO_EXE')
except KeyError:
raise ImportError('PATH_TO_EXE is upsent in your environment variables. '
'Please, do "export PATH_TO_EXE=<path to unit-test>')
def teardown_module():
"""
Creating CSV file at the end of test with nGraph nodes coverage
:return:
"""
csv_path = "nodes_coverage.csv"
header = ["#", "Operation"] + [p + " passed / total" for p in pytest.avaliable_plugins]
save_coverage_to_csv(csv_path=csv_path, header=header)
# Convert csv file to html for better visualization
html_path = "nodes_coverage.html"
csv_to_html_table(csv_path=csv_path, html_path=html_path, delimiter="|")
def test(gtest_filter):
executable = os.path.join(os.environ.get('PATH_TO_EXE'), "unit-test")
cmd_line = executable + ' --gtest_filter=' + gtest_filter
retcode, stdout = shell(cmd=cmd_line)
# Parsing output of single test
stdout = stdout.split('\n')
nodes_list = []
for line in stdout:
if 'UNSUPPORTED OPS DETECTED!' in line:
pytest.skip('Skip from pytest because unit-test send error UNSUPPORTED OPS DETECTED!')
elif 'Nodes in test:' in line:
nodes_list = list(set(line.replace('Nodes in test:', '').strip().split(' ')))
if not nodes_list:
pytest.skip('Skip from pytest because inside test no one ngraph function created')
# Added one more loop, because condition below must be executed only if some nodes_list found
# (it means that test includes opset1 operations)
for line in stdout:
if re.match('.*1 test from\s([A-Z]+)', line):
matches = re.search(r'.*1 test from\s([A-Z]+)', line)
plugin = matches.group(1)
if plugin not in pytest.avaliable_plugins:
pytest.avaliable_plugins.append(plugin)
# Filling dictionary with operation coverage
# How many time one operation is tested
for n in nodes_list:
if plugin in pytest.operation_dictionary[n]:
numerator, denominator = pytest.operation_dictionary[n][plugin]
pytest.operation_dictionary[n][plugin] = (numerator if retcode != 0 else numerator + 1,
denominator + 1)
else:
pytest.operation_dictionary[n][plugin] = (0, 1) if retcode != 0 else (1, 1)
# This check is at the end, because with 99% it will return 0 or 1 (when function check of test failed)
# Because the same cmd line executed by pytest_generate_tests with --gtest_list_tests.
# So, most of the issue cached there.
assert retcode == 0, "unit-test execution failed. Gtest failed. Return code: {}".format(retcode)
if __name__ == '__main__':
log.warning("Please run {} by pytest like so:\npytest {} --gtest_filter=<attributes for gtest_filter>")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment