Unverified Commit e37b386e authored by Mateusz Bencer's avatar Mateusz Bencer Committed by GitHub

Extend NgraphTestCase to support dynamic backends (#4234)

* Switch to PartialShape in onnx_importer ValueInfo

* Construct dynamic dimensions out of ONNX dimensions defined as dim_param

* Validate the PartialShape of inputs created from an ONNX model with dynamic shapes

* Validate the output shape inference for a dynamic ONNX model

* Test the execution of an ONNX model with dynamic dimensions

* Test the Ax+B with more than one batch size

* Provenance tagging adjustments - PartialShape instead of Shape

* Correct translation of ONNX shapes to nG shapes

* Test the shape of Constant produced by scalar initializers

* Review comments & more strict assertions in UT

* UT checking a dynamic rank input

* Fully dynamic input inference test

* first dynamic version

* modified UTs

* Added assert checks

* Added specialised methods

* first verion of AvgPool

* code review remarks introduced

* Changed tests to use default BackendMode value

* Reverted not related changes

* code review remarks introduced

* Set static backend as default
Co-authored-by: 's avatarTomasz Dołbniak <tomasz.dolbniak@intel.com>
Co-authored-by: 's avatarSang Ik Lee <sang.ik.lee@intel.com>
parent 2c23cf20
......@@ -160,5 +160,6 @@ shared_ptr<Node> op::FakeQuantize::copy_with_new_args(const NodeVector& new_args
new_args.at(2), // input_high
new_args.at(3), // output_low
new_args.at(4), // output_high
m_levels);
m_levels,
m_auto_broadcast);
}
......@@ -18,12 +18,14 @@
#include "ngraph/file_util.hpp"
#include "ngraph/frontend/onnx_import/default_opset.hpp"
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
#include "util/type_prop.hpp"
using namespace ngraph;
using namespace ngraph::onnx_import;
using namespace ngraph::test;
static std::string s_manifest = "${MANIFEST}";
......@@ -69,11 +71,7 @@ NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, ab_plus_c_inference)
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/ab_plus_c.prototxt"));
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto executable = backend->compile(function);
auto out_tensor = backend->create_dynamic_tensor(function->get_output_element_type(0),
function->get_output_partial_shape(0));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
struct ExpectedValuesGenerator
{
......@@ -90,29 +88,21 @@ NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, ab_plus_c_inference)
for (size_t batch = 1; batch <= NUM_BATCHES_TO_TEST; ++batch)
{
const Shape input_shape = Shape{batch, 2};
const auto elems_in_tensor = shape_size(input_shape);
auto input_A = backend->create_tensor(element::i64, input_shape);
auto input_B = backend->create_tensor(element::i64, input_shape);
auto input_C = backend->create_tensor(element::i64, input_shape);
const Shape shape{batch, 2};
const auto elems_in_tensor = shape_size(shape);
std::vector<int64_t> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 1);
copy_data(input_A, input_values);
copy_data(input_B, input_values);
copy_data(input_C, input_values);
executable->call_with_validate({out_tensor}, {input_A, input_B, input_C});
const auto results = read_vector<int64_t>(out_tensor);
EXPECT_EQ(results.size(), elems_in_tensor);
test_case.add_input<int64_t>(shape, input_values);
test_case.add_input<int64_t>(shape, input_values);
test_case.add_input<int64_t>(shape, input_values);
std::vector<int64_t> expected_values(elems_in_tensor);
std::generate(expected_values.begin(), expected_values.end(), ExpectedValuesGenerator{});
test_case.add_expected_output<int64_t>(shape, expected_values);
EXPECT_TRUE(results == expected_values);
test_case.run();
}
}
......@@ -162,37 +152,26 @@ NGRAPH_TEST(onnx_dyn_shapes_${BACKEND_NAME}, dynamic_rank_input_inference)
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/a_plus_b_dyn_rank.prototxt"));
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto executable = backend->compile(function);
auto out_tensor = backend->create_dynamic_tensor(function->get_output_element_type(0),
function->get_output_partial_shape(0));
auto test_case = NgraphTestCase(function, "${BACKEND_NAME}", BackendMode::DYNAMIC);
const size_t RANKS_TO_TEST = 3;
const int64_t SCALAR_INPUT_VAL = 5;
for (size_t r = 0; r <= RANKS_TO_TEST; ++r)
{
const Shape input_a_shape = Shape(r, 2);
const auto elems_in_tensor = shape_size(input_a_shape);
auto input_A = backend->create_tensor(element::i64, input_a_shape);
auto input_B = backend->create_tensor(element::i64, Shape{});
const Shape shape(r, 2);
const auto elems_in_tensor = shape_size(shape);
std::vector<int64_t> input_values(elems_in_tensor);
std::iota(input_values.begin(), input_values.end(), 1);
copy_data(input_A, input_values);
copy_data<int64_t>(input_B, {SCALAR_INPUT_VAL});
executable->call_with_validate({out_tensor}, {input_A, input_B});
const auto results = read_vector<int64_t>(out_tensor);
EXPECT_EQ(results.size(), elems_in_tensor);
test_case.add_input<int64_t>(shape, input_values);
test_case.add_input<int64_t>(Shape{}, {SCALAR_INPUT_VAL});
std::vector<int64_t> expected_values(elems_in_tensor);
std::iota(expected_values.begin(), expected_values.end(), SCALAR_INPUT_VAL + 1);
test_case.add_expected_output<int64_t>(shape, expected_values);
EXPECT_TRUE(results == expected_values);
test_case.run();
}
}
......@@ -19,15 +19,38 @@
#include "gtest/gtest.h"
#include "ngraph/assertion.hpp"
ngraph::test::NgraphTestCase::NgraphTestCase(const std::shared_ptr<Function>& function,
const std::string& backend_name,
const BackendMode mode)
: m_function(function)
, m_backend(ngraph::runtime::Backend::create(backend_name, mode == BackendMode::DYNAMIC))
{
if (mode == BackendMode::STATIC)
{
NGRAPH_CHECK(!m_function->is_dynamic(),
"For dynamic function using dynamic backend is expected.");
}
m_executable = m_backend->compile(m_function);
for (auto i = 0; i < m_function->get_output_size(); ++i)
{
const auto& output_tensor =
(mode == BackendMode::DYNAMIC)
? m_backend->create_dynamic_tensor(m_function->get_output_element_type(i),
m_function->get_output_partial_shape(i))
: m_backend->create_tensor(m_function->get_output_element_type(i),
m_function->get_output_shape(i));
m_result_tensors.emplace_back(output_tensor);
}
}
void ngraph::test::NgraphTestCase::run(size_t tolerance_bits)
{
m_tolerance_bits = tolerance_bits;
const auto& function_results = m_function->get_results();
NGRAPH_CHECK(m_expected_outputs.size() == function_results.size(),
"Expected number of outputs is different from the function's number of results.");
auto handle = m_backend->compile(m_function);
handle->call_with_validate(m_result_tensors, m_input_tensors);
m_executable->call_with_validate(m_result_tensors, m_input_tensors);
for (size_t i = 0; i < m_expected_outputs.size(); ++i)
{
......@@ -35,8 +58,10 @@ void ngraph::test::NgraphTestCase::run(size_t tolerance_bits)
const auto& expected_result_constant = m_expected_outputs.at(i);
const auto& element_type = result_tensor->get_element_type();
auto expected_shape = expected_result_constant->get_shape();
auto result_shape = result_tensor->get_shape();
EXPECT_EQ(expected_result_constant->get_output_size(), 1);
const auto& expected_shape = expected_result_constant->get_shape();
const auto& result_shape = result_tensor->get_shape();
EXPECT_EQ(expected_shape, result_shape);
if (m_value_comparators.count(element_type) == 0)
......@@ -51,6 +76,10 @@ void ngraph::test::NgraphTestCase::run(size_t tolerance_bits)
EXPECT_TRUE(values_match(expected_result_constant, result_tensor));
}
}
m_input_index = 0;
m_output_index = 0;
m_expected_outputs.clear();
m_input_tensors.clear();
}
ngraph::test::NgraphTestCase& ngraph::test::NgraphTestCase::dump_results(bool dump)
......
......@@ -28,36 +28,46 @@ namespace ngraph
{
namespace test
{
/// \brief Indicates which version of backend (dynamic or static) should be using in
/// NgraphTestCase
enum class BackendMode
{
// Use static version of backend
STATIC,
// Use dynamic version of backend
DYNAMIC
};
class NgraphTestCase
{
public:
NgraphTestCase(const std::shared_ptr<Function>& function,
const std::string& backend_name)
: m_function(function)
, m_backend(ngraph::runtime::Backend::create(backend_name))
{
}
const std::string& backend_name,
BackendMode mode = BackendMode::STATIC);
/// \brief Makes the test case print the expected and computed values to the console.
/// This should only be used for debugging purposes.
///
/// Just before the assertion is done, the current test case will gather expected and
/// computed values, format them as 2 columns and print out to the console along with
// a corresponding index in the vector.
/// a corresponding index in the vector.
///
/// \param dump - Indicates if the test case should perform the console printout
NgraphTestCase& dump_results(bool dump = true);
template <typename T>
void add_input(const std::vector<T>& values)
void add_input(const Shape& shape, const std::vector<T>& values)
{
auto params = m_function->get_parameters();
const auto params = m_function->get_parameters();
NGRAPH_CHECK(m_input_index < params.size(),
"All function parameters already have inputs.");
auto tensor = m_backend->create_tensor(params.at(m_input_index)->get_element_type(),
params.at(m_input_index)->get_shape());
const auto& input_pshape = params.at(m_input_index)->get_partial_shape();
NGRAPH_CHECK(input_pshape.compatible(shape),
"Passed input shape is not compatible with nGraph function.");
auto tensor =
m_backend->create_tensor(params.at(m_input_index)->get_element_type(), shape);
copy_data(tensor, values);
m_input_tensors.push_back(tensor);
......@@ -65,6 +75,27 @@ namespace ngraph
++m_input_index;
}
template <typename T>
void add_input(const std::vector<T>& values)
{
const auto& input_pshape =
m_function->get_parameters().at(m_input_index)->get_partial_shape();
NGRAPH_CHECK(input_pshape.is_static(),
"Input data shape must be provided, if shape defined in Functions is "
"not fully known.");
return add_input<T>(input_pshape.to_shape(), values);
}
template <typename T>
void add_input_from_file(const Shape& shape,
const std::string& basepath,
const std::string& filename)
{
auto filepath = ngraph::file_util::path_join(basepath, filename);
add_input_from_file<T>(shape, filepath);
}
template <typename T>
void add_input_from_file(const std::string& basepath, const std::string& filename)
{
......@@ -72,11 +103,31 @@ namespace ngraph
add_input_from_file<T>(filepath);
}
template <typename T>
void add_input_from_file(const Shape& shape, const std::string& filepath)
{
auto value = read_binary_file<T>(filepath);
add_input<T>(shape, value);
}
template <typename T>
void add_input_from_file(const std::string& filepath)
{
auto value = read_binary_file<T>(filepath);
add_input(value);
add_input<T>(value);
}
template <typename T>
void add_multiple_inputs(const std::vector<Shape> shapes,
const std::vector<std::vector<T>>& vector_of_values)
{
NGRAPH_CHECK(shapes.size() == vector_of_values.size(),
"Size of shapes and vector_of_values vectors must be the same.");
for (auto i = 0; i < vector_of_values.size(); ++i)
{
add_input<T>(shapes[i], vector_of_values[i]);
}
}
template <typename T>
......@@ -84,7 +135,7 @@ namespace ngraph
{
for (const auto& value : vector_of_values)
{
add_input(value);
add_input<T>(value);
}
}
......@@ -97,8 +148,10 @@ namespace ngraph
"All function results already have expected outputs.");
auto function_output_type = results.at(m_output_index)->get_element_type();
m_result_tensors.emplace_back(
m_backend->create_tensor(function_output_type, expected_shape));
const auto& output_pshape = results.at(m_output_index)->get_output_partial_shape(0);
NGRAPH_CHECK(output_pshape.compatible(expected_shape),
"Passed output shape is not compatible with nGraph function.");
m_expected_outputs.emplace_back(std::make_shared<ngraph::op::Constant>(
function_output_type, expected_shape, values));
......@@ -110,7 +163,7 @@ namespace ngraph
void add_expected_output(const std::vector<T>& values)
{
auto shape = m_function->get_results().at(m_output_index)->get_shape();
add_expected_output(shape, values);
add_expected_output<T>(shape, values);
}
template <typename T>
......@@ -127,7 +180,7 @@ namespace ngraph
const std::string& filepath)
{
auto value = read_binary_file<T>(filepath);
add_expected_output(expected_shape, value);
add_expected_output<T>(expected_shape, value);
}
void run(size_t tolerance_bits = DEFAULT_FLOAT_TOLERANCE_BITS);
......@@ -195,6 +248,7 @@ namespace ngraph
protected:
std::shared_ptr<Function> m_function;
std::shared_ptr<runtime::Backend> m_backend;
std::shared_ptr<ngraph::runtime::Executable> m_executable;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> m_input_tensors;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> m_result_tensors;
std::vector<std::shared_ptr<ngraph::op::Constant>> m_expected_outputs;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment