Unverified Commit e7cf2662 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Add backend call validation and unit tests (#857)

parent 026bede0
......@@ -14,9 +14,12 @@
* limitations under the License.
*******************************************************************************/
#include <sstream>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view.hpp"
#include "ngraph/runtime/manager.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
......@@ -46,3 +49,64 @@ vector<size_t> runtime::Backend::get_subdevices(const string& type)
void runtime::Backend::remove_compiled_function(std::shared_ptr<Function> func)
{
}
void runtime::Backend::validate_call(shared_ptr<const Function> function,
const vector<shared_ptr<runtime::TensorView>>& outputs,
const vector<shared_ptr<runtime::TensorView>>& inputs)
{
const op::ParameterVector& input_parameters = function->get_parameters();
if (input_parameters.size() != inputs.size())
{
stringstream ss;
ss << "Call input count " << inputs.size() << " does not match Function's Parameter count "
<< input_parameters.size();
throw runtime_error(ss.str());
}
if (function->get_output_size() != outputs.size())
{
stringstream ss;
ss << "Call output count " << outputs.size() << " does not match Function's Result count "
<< function->get_output_size();
throw runtime_error(ss.str());
}
for (size_t i = 0; i < input_parameters.size(); i++)
{
if (input_parameters[i]->get_element_type() != inputs[i]->get_tensor().get_element_type())
{
stringstream ss;
ss << "Input " << i << " type '" << inputs[i]->get_tensor().get_element_type()
<< "' does not match Parameter type '" << input_parameters[i]->get_element_type()
<< "'";
throw runtime_error(ss.str());
}
if (input_parameters[i]->get_shape() != inputs[i]->get_shape())
{
stringstream ss;
ss << "Input " << i << " shape {" << join(inputs[i]->get_shape())
<< "} does not match Parameter shape {" << join(input_parameters[i]->get_shape())
<< "}";
throw runtime_error(ss.str());
}
}
for (size_t i = 0; i < function->get_output_size(); i++)
{
if (function->get_output_element_type(i) != outputs[i]->get_tensor().get_element_type())
{
stringstream ss;
ss << "Output " << i << " type '" << outputs[i]->get_tensor().get_element_type()
<< "' does not match Parameter type '" << function->get_output_element_type(i)
<< "'";
throw runtime_error(ss.str());
}
if (function->get_output_shape(i) != outputs[i]->get_shape())
{
stringstream ss;
ss << "Output " << i << " shape {" << join(outputs[i]->get_shape())
<< "} does not match Parameter shape {" << join(function->get_output_shape(i))
<< "}";
throw runtime_error(ss.str());
}
}
}
......@@ -100,6 +100,11 @@ namespace ngraph
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs) = 0;
virtual void remove_compiled_function(std::shared_ptr<Function> func);
protected:
void validate_call(std::shared_ptr<const Function> func,
const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs);
};
}
}
......@@ -65,6 +65,9 @@ bool runtime::cpu::CPU_Backend::call(std::shared_ptr<Function> func,
const vector<shared_ptr<runtime::TensorView>>& inputs)
{
bool rc = true;
validate_call(func, outputs, inputs);
auto it = m_function_map.find(func);
if (it == m_function_map.end())
{
......
......@@ -66,6 +66,9 @@ bool runtime::gpu::GPU_Backend::call(
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs)
{
bool rc = true;
validate_call(func, outputs, inputs);
auto it = m_function_map.find(func);
if (it == m_function_map.end())
{
......
......@@ -73,6 +73,9 @@ bool runtime::interpreter::INT_Backend::call(std::shared_ptr<Function> func,
const vector<shared_ptr<runtime::TensorView>>& inputs)
{
bool rc = true;
validate_call(func, outputs, inputs);
auto it = m_function_map.find(func);
if (it == m_function_map.end())
{
......
......@@ -1793,8 +1793,8 @@ TEST(${BACKEND_NAME}, reduce_trivial)
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto b = backend->create_tensor(element::f32, shape);
copy_data(b, vector<float>{0, 0, 0, 0});
auto b = backend->create_tensor(element::f32, {});
copy_data(b, vector<float>{0});
auto result = backend->create_tensor(element::f32, shape);
backend->call(g, {result}, {a, b});
......@@ -4858,7 +4858,7 @@ TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_1d_1channel_1image)
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a,
test::NDArray<float, 3>{{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}}}.get_vector());
auto b = backend->create_tensor(element::f32, shape_a);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
......@@ -4900,7 +4900,7 @@ TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_1d_1channel_2image)
test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}},
{{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_a);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
......@@ -4947,7 +4947,7 @@ TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_1d_2channel_2image)
{{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2},
{2, 1, 0, 0, 1, 0, 2, 0, 0, 0, 1, 1, 2, 0}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_a);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
......@@ -5014,7 +5014,7 @@ TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_2d_2channel_2image)
{1, 1, 1, 0, 1},
{1, 0, 0, 0, 2}}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_a);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
......@@ -5081,7 +5081,7 @@ TEST(${BACKEND_NAME}, reduce_window_emulating_max_pool_2d_1channel_1image_stride
{1, 2, 0, 0, 0, 1, 2, 0},
{1, 0, 2, 0, 0, 0, 1, 0}}}})
.get_vector());
auto b = backend->create_tensor(element::f32, shape_a);
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(
b,
vector<float>{
......@@ -7805,3 +7805,106 @@ TEST(${BACKEND_NAME}, tensorview_custom_mem)
cf->call({result}, {a, b});
EXPECT_EQ((vector<float>{2, 2, 2, 2}), rv);
}
TEST(${BACKEND_NAME}, validate_call_input_count)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto a = backend->create_tensor(element::f32, shape);
auto b = backend->create_tensor(element::f32, shape);
auto c = backend->create_tensor(element::f32, shape);
EXPECT_ANY_THROW(backend->call(f, {c}, {a}));
}
TEST(${BACKEND_NAME}, validate_call_input_type)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto a = backend->create_tensor(element::i32, shape);
auto b = backend->create_tensor(element::f32, shape);
auto c = backend->create_tensor(element::f32, shape);
EXPECT_ANY_THROW(backend->call(f, {c}, {a, b}));
}
TEST(${BACKEND_NAME}, validate_call_input_shape)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto a = backend->create_tensor(element::f32, {2, 3});
auto b = backend->create_tensor(element::f32, shape);
auto c = backend->create_tensor(element::f32, shape);
EXPECT_ANY_THROW(backend->call(f, {c}, {a, b}));
}
TEST(${BACKEND_NAME}, validate_call_output_count)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto a = backend->create_tensor(element::f32, shape);
auto b = backend->create_tensor(element::f32, shape);
auto c = backend->create_tensor(element::f32, shape);
auto d = backend->create_tensor(element::f32, shape);
EXPECT_ANY_THROW(backend->call(f, {c, d}, {a, b}));
}
TEST(${BACKEND_NAME}, validate_call_output_type)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto a = backend->create_tensor(element::i32, shape);
auto b = backend->create_tensor(element::f32, shape);
auto c = backend->create_tensor(element::f32, shape);
EXPECT_ANY_THROW(backend->call(f, {a}, {b, c}));
}
TEST(${BACKEND_NAME}, validate_call_output_shape)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), op::ParameterVector{A, B});
auto a = backend->create_tensor(element::f32, {2, 3});
auto b = backend->create_tensor(element::f32, shape);
auto c = backend->create_tensor(element::f32, shape);
EXPECT_ANY_THROW(backend->call(f, {a}, {c, b}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment