Commit 5b0c6a32 authored by Adam Procter's avatar Adam Procter Committed by Robert Kimball

Dynamic-shape backend API change, with prototype implementation (#2844)

* Add create_dynamic_tensor function to Backend signature

* Add skeleton of dynamic wrapper class

* Simple test for construction

* Some progress on dynamic tensor creation

* Compile dynamic wrapper into core; change config string to a factory option

* Add wrapped executable, working on simple example

* Add wrapped dynamic tensor

* Update to match new tensor API

* Update wrapper to clone the graph; basic dyn test now passes

* Update call_with_validate to grok dynamic

* Make the unit tests a little prettier

* Make get_element_type virtual

* Change dynamic tests to be backend-parametric; fix get_element_type check for dyn tensors

* Add dynamic.in.cpp

* CODEOWNERS entry for dynamic_wrapper

* Remove overly-accommodating behavior for create_dynamic_tensor on non-dynamic BEs

* Pass enable_performance_collection to wrapped compile() calls

* More comments in the 'abc' test

* Fix 'create' wrapper in pybind11

* Remove unnecessary WrappedStaticTensor class

* Better names for the classes, and add some docstrings

* Fix pybind for 'create' to return shared_ptr now
parent f07b95a2
......@@ -43,6 +43,7 @@
/src/ngraph/runtime/ @rkimballn1 @jbobba
/src/ngraph/runtime/cpu/ @jbobba
/src/ngraph/runtime/cpu/builder/allreduce.*pp @wenzhe-nrv @jbobba @avijit-nervana
/src/ngraph/runtime/dynamic_wrapper/ @aprocter
/src/ngraph/runtime/gpu/ @rkimballn1
/src/ngraph/runtime/hybrid/ @rkimballn1
/src/ngraph/runtime/intelgpu/ @dmyershov
......
......@@ -30,12 +30,18 @@ static std::shared_ptr<ngraph::runtime::Executable> compile(ngraph::runtime::Bac
return self->compile(func, enable_performance_data);
}
static std::shared_ptr<ngraph::runtime::Backend> create(const std::string& type)
{
bool must_support_dynamic = false;
return ngraph::runtime::Backend::create(type, must_support_dynamic);
}
void regclass_pyngraph_runtime_Backend(py::module m)
{
py::class_<ngraph::runtime::Backend, std::shared_ptr<ngraph::runtime::Backend>> backend(
m, "Backend");
backend.doc() = "ngraph.impl.runtime.Backend wraps ngraph::runtime::Backend";
backend.def_static("create", &ngraph::runtime::Backend::create);
backend.def_static("create", &create);
backend.def_static("get_registered_devices", &ngraph::runtime::Backend::get_registered_devices);
backend.def("create_tensor",
(std::shared_ptr<ngraph::runtime::Tensor>(ngraph::runtime::Backend::*)(
......
......@@ -445,6 +445,11 @@ set(SRC ${SRC}
runtime/hybrid/pass/memory_layout.hpp
)
set(SRC ${SRC}
runtime/dynamic/dynamic_backend.cpp
runtime/dynamic/dynamic_backend.hpp
)
if(NGRAPH_JSON_ENABLE)
list(APPEND SRC serializer.cpp serializer.hpp event_tracing.cpp event_tracing.hpp)
endif()
......
......@@ -20,6 +20,7 @@
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend_manager.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view.hpp"
#include "ngraph/runtime/dynamic/dynamic_backend.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -35,9 +36,19 @@ std::shared_ptr<ngraph::Node> runtime::Backend::get_backend_op(const std::string
return dummy_node;
}
shared_ptr<runtime::Backend> runtime::Backend::create(const string& type)
std::shared_ptr<runtime::Backend> runtime::Backend::create(const string& type,
bool must_support_dynamic)
{
return BackendManager::create_backend(type);
auto inner_backend = BackendManager::create_backend(type);
if (!must_support_dynamic || inner_backend->supports_dynamic_tensors())
{
return inner_backend;
}
else
{
return make_shared<runtime::dynamic::DynamicBackend>(inner_backend);
}
}
vector<string> runtime::Backend::get_registered_devices()
......@@ -45,6 +56,13 @@ vector<string> runtime::Backend::get_registered_devices()
return BackendManager::get_registered_backends();
}
std::shared_ptr<ngraph::runtime::Tensor>
runtime::Backend::create_dynamic_tensor(const ngraph::element::Type& element_type,
const PartialShape& shape)
{
throw std::invalid_argument("This backend does not support dynamic tensors");
}
std::shared_ptr<runtime::Executable>
runtime::Backend::compile(std::shared_ptr<Function> func,
ngraph::pass::PassConfig& pass_config,
......
......@@ -44,9 +44,15 @@ public:
/// \brief Create a new Backend object
/// \param type The name of a registered backend, such as "CPU" or "GPU".
/// To select a subdevice use "GPU:N" where s`N` is the subdevice number.
/// \param must_support_dynamic If `true`, the returned `Backend` object
/// will support dynamic tensors. If the underlying backend has native
/// support for dynamic tensors, then that backend object will be
/// returned directly. Otherwise, it will be wrapped with
/// DynamicWrapperBackend. This feature is EXPERIMENTAL.
/// \returns shared_ptr to a new Backend or nullptr if the named backend
/// does not exist.
static std::shared_ptr<Backend> create(const std::string& type);
static std::shared_ptr<Backend> create(const std::string& type,
bool must_support_dynamic = false);
/// \brief Query the list of registered devices
/// \returns A vector of all registered devices.
......@@ -78,6 +84,17 @@ public:
return create_tensor(element::from<T>(), shape);
}
/// \brief Create a dynamic tensor specific to this backend, if the backend supports dynamic
/// tensors.
/// \param element_type The type of the tensor element
/// \param shape The shape of the tensor
/// \returns shared_ptr to a new backend-specific tensor
/// \throws std::invalid_argument if the backend does not support dynamic tensors
virtual std::shared_ptr<ngraph::runtime::Tensor>
create_dynamic_tensor(const ngraph::element::Type& element_type, const PartialShape& shape);
/// \returns `true` if this backend supports dynamic tensors, else `false`.
virtual bool supports_dynamic_tensors() { return false; }
/// \brief Compiles a Function.
/// \param func The function to compile
/// \returns compiled function or nullptr on failure
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/dynamic/dynamic_backend.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/shape_relevance.hpp"
#include "ngraph/specialize_shapes.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
runtime::dynamic::DynamicBackend::DynamicBackend(shared_ptr<runtime::Backend> wrapped_backend)
: m_wrapped_backend(std::move(wrapped_backend))
{
}
shared_ptr<runtime::Tensor>
runtime::dynamic::DynamicBackend::create_tensor(const element::Type& type, const Shape& shape)
{
return m_wrapped_backend->create_tensor(type, shape);
}
shared_ptr<runtime::Tensor> runtime::dynamic::DynamicBackend::create_tensor(
const element::Type& type, const Shape& shape, void* memory_pointer)
{
return m_wrapped_backend->create_tensor(type, shape, memory_pointer);
}
std::shared_ptr<runtime::Tensor>
runtime::dynamic::DynamicBackend::create_dynamic_tensor(const element::Type& type,
const PartialShape& shape)
{
return make_shared<DynamicTensor>(type, shape, m_wrapped_backend);
}
shared_ptr<runtime::Executable>
runtime::dynamic::DynamicBackend::compile(shared_ptr<Function> function,
bool enable_performance_collection)
{
return make_shared<runtime::dynamic::DynamicExecutable>(
function, m_wrapped_backend, enable_performance_collection);
}
runtime::dynamic::DynamicExecutable::DynamicExecutable(shared_ptr<Function> wrapped_function,
shared_ptr<runtime::Backend> wrapped_backend,
bool enable_performance_collection)
: m_wrapped_function(wrapped_function)
, m_wrapped_backend(wrapped_backend)
, m_enable_performance_collection(enable_performance_collection)
{
pass::Manager passes;
passes.register_pass<pass::ShapeRelevance>();
passes.run_passes(m_wrapped_function);
set_parameters_and_results(*wrapped_function);
}
bool runtime::dynamic::DynamicExecutable::call(
const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs)
{
// TODO: Get cached executable out if it exists.
// We will cache on:
// (1) all shapes;
// (2) all values of shape-relevant input tensors.
std::vector<std::shared_ptr<runtime::Tensor>> wrapped_inputs;
std::vector<element::Type> arg_element_types;
std::vector<PartialShape> arg_shapes;
for (auto& input : inputs)
{
if (auto dynamic_tensor = std::dynamic_pointer_cast<runtime::dynamic::DynamicTensor>(input))
{
NGRAPH_CHECK(dynamic_tensor->has_storage());
arg_element_types.push_back(dynamic_tensor->get_wrapped_tensor()->get_element_type());
arg_shapes.push_back(dynamic_tensor->get_wrapped_tensor()->get_shape());
wrapped_inputs.push_back(dynamic_tensor->get_wrapped_tensor());
}
else
{
arg_element_types.push_back(input->get_element_type());
arg_shapes.push_back(input->get_shape());
wrapped_inputs.push_back(input);
}
}
// TODO: specialize_shapes needs to fill in values of shape-relevant params.
auto clone = specialize_shapes(m_wrapped_function, arg_element_types, arg_shapes);
// TODO: run constant folding and de-dynification on clone.
const ResultVector& results = clone->get_results();
NGRAPH_CHECK(results.size() == outputs.size());
std::vector<std::shared_ptr<runtime::Tensor>> wrapped_outputs;
auto results_it = results.begin();
for (auto& output : outputs)
{
if (auto dynamic_tensor =
std::dynamic_pointer_cast<runtime::dynamic::DynamicTensor>(output))
{
dynamic_tensor->make_storage((*results_it)->get_output_element_type(0),
(*results_it)->get_output_shape(0));
wrapped_outputs.push_back(dynamic_tensor->get_wrapped_tensor());
}
else
{
wrapped_outputs.push_back(output);
}
}
// TODO: Put compiled executable in the cache.
auto compiled_executable = m_wrapped_backend->compile(clone, m_enable_performance_collection);
auto result = compiled_executable->call(wrapped_outputs, wrapped_inputs);
return result;
}
runtime::dynamic::DynamicTensor::DynamicTensor(
const element::Type& element_type,
const PartialShape& shape,
const std::shared_ptr<runtime::Backend>& wrapped_backend)
: Tensor(make_shared<descriptor::Tensor>(element_type, shape, "wrapped_dynamic"))
, m_wrapped_tensor(nullptr)
, m_wrapped_backend(wrapped_backend)
{
}
const element::Type& runtime::dynamic::DynamicTensor::get_element_type() const
{
if (m_wrapped_tensor == nullptr)
{
return m_descriptor->get_element_type();
}
else
{
return m_wrapped_tensor->get_element_type();
}
}
const ngraph::Shape& runtime::dynamic::DynamicTensor::get_shape() const
{
NGRAPH_CHECK(m_wrapped_tensor != nullptr,
"asked for shape of a dynamic tensor with no allocated storage");
return m_wrapped_tensor->get_shape();
}
void runtime::dynamic::DynamicTensor::write(const void* p, size_t offset, size_t n)
{
NGRAPH_CHECK(m_wrapped_tensor != nullptr,
"tried to write to a dynamic tensor with no allocated storage");
m_wrapped_tensor->write(p, offset, n);
}
void runtime::dynamic::DynamicTensor::read(void* p, size_t offset, size_t n) const
{
NGRAPH_CHECK(m_wrapped_tensor != nullptr,
"tried to read from a dynamic tensor with no allocated storage");
m_wrapped_tensor->read(p, offset, n);
}
void runtime::dynamic::DynamicTensor::copy_from(const ngraph::runtime::Tensor& source)
{
NGRAPH_CHECK(m_wrapped_tensor != nullptr,
"tried to copy_from to a dynamic tensor with no allocated storage");
m_wrapped_tensor->copy_from(source);
}
bool runtime::dynamic::DynamicTensor::has_storage() const
{
return m_wrapped_tensor != nullptr;
}
void runtime::dynamic::DynamicTensor::release_storage()
{
m_wrapped_tensor = nullptr;
}
void runtime::dynamic::DynamicTensor::make_storage(const element::Type& element_type,
const Shape& shape)
{
NGRAPH_CHECK(element_type.is_static(), "make_storage requires a static element type");
NGRAPH_CHECK(get_element_type().is_dynamic() || get_element_type() == element_type,
"tried to make storage with element type ",
element_type,
" which is incompatible with dynamic tensor element_type ",
get_element_type());
NGRAPH_CHECK(get_partial_shape().relaxes(shape),
"tried to make storage with shape ",
shape,
" which is incompatible with dynamic tensor shape ",
get_partial_shape());
m_wrapped_tensor = m_wrapped_backend->create_tensor(element_type, shape);
}
const std::shared_ptr<ngraph::runtime::Tensor>&
runtime::dynamic::DynamicTensor::get_wrapped_tensor() const
{
return m_wrapped_tensor;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
namespace ngraph
{
namespace runtime
{
namespace dynamic
{
class DynamicBackend;
class DynamicExecutable;
class DynamicTensor;
}
}
}
///
/// \brief Wrapper class used to provide dynamic tensor support on backends
/// that otherwise do not support dynamic tensors.
///
/// The main function of this class is to intercept `create_dynamic_tensor`
/// and `compile`:
///
/// * `create_dynamic_tensor` will return a special `DynamicTensor` object
/// whose shape can be updated after creation. Internally, `DynamicTensor`
/// wraps static tensors managed by the wrapped backend.
/// * `compile` will return a special `DynamicExecutable` object, which allows
/// dynamic shapes to be supported via graph cloning.
///
/// This class is instantiated by `ngraph::runtime::Backend::create`.
///
class ngraph::runtime::dynamic::DynamicBackend : public Backend
{
public:
DynamicBackend(std::shared_ptr<ngraph::runtime::Backend> wrapped_backend);
std::shared_ptr<Tensor>
create_tensor(const element::Type& type, const Shape& shape, void* memory_pointer) override;
std::shared_ptr<Tensor> create_tensor(const element::Type& type, const Shape& shape) override;
std::shared_ptr<Tensor> create_dynamic_tensor(const element::Type& type,
const PartialShape& shape) override;
bool supports_dynamic_tensors() override { return true; }
std::shared_ptr<Executable> compile(std::shared_ptr<Function> function,
bool enable_performance_data = false) override;
private:
std::shared_ptr<ngraph::runtime::Backend> m_wrapped_backend;
};
///
/// \brief Wrapper class used to provide an Executable that supports dynamic
/// tensors on top of a backend that does not support dynamic tensors
/// natively.
///
/// This class intercepts `call` and:
///
/// 1. creates a clone of the stored function with shapes tailored to the
/// actual runtime inputs;
/// 2. compiles the clone using the wrapped backend;
/// 3. fowards the input tensors to the clone executable for actual execution.
///
/// `DynamicExecutable` objects are produced by `DynamicBackend::compile()`.
///
class ngraph::runtime::dynamic::DynamicExecutable : public ngraph::runtime::Executable
{
public:
DynamicExecutable(std::shared_ptr<Function> wrapped_function,
std::shared_ptr<ngraph::runtime::Backend> wrapped_backend,
bool enable_performance_collection = false);
virtual bool call(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) override;
private:
std::shared_ptr<ngraph::Function> m_wrapped_function;
std::shared_ptr<ngraph::runtime::Backend> m_wrapped_backend;
bool m_enable_performance_collection;
};
///
/// \brief Wrapper class used to emulate dynamic tensors on top of a backend
/// that does not support dynamic tensors natively.
///
/// The behavior of a dynamic tensor extends that of `runtime::Tensor` as
/// follows:
///
/// 1. `get_partial_shape()` returns a `PartialShape` representing all shapes
/// this tensor could possibly take on at execution time.
/// 2. `get_shape()` returns a `Shape` representing the _exact_ shape of this
/// tensor's current value. (If the tensor has not yet been assigned a
/// value, `get_shape()` throws an exception.)
/// 3. `make_storage()` allocates storage for a value of a specific element
/// type and shape, which must be consistent with the partial shape/element
/// type. Once storage has been allocated, `get_shape()` can safely be
/// called until the storage has been released via `release_storage()`.
/// 4. `release_storage()` unassigns previously assigned storage.
///
class ngraph::runtime::dynamic::DynamicTensor : public ngraph::runtime::Tensor
{
public:
DynamicTensor(const element::Type& element_type,
const PartialShape& shape,
const std::shared_ptr<runtime::Backend>& wrapped_backend);
virtual const element::Type& get_element_type() const override;
virtual const ngraph::Shape& get_shape() const override;
virtual void write(const void* p, size_t offset, size_t n) override;
virtual void read(void* p, size_t offset, size_t n) const override;
virtual void copy_from(const ngraph::runtime::Tensor& source) override;
bool has_storage() const;
void release_storage();
void make_storage(const element::Type& element_type, const Shape& shape);
const std::shared_ptr<ngraph::runtime::Tensor>& get_wrapped_tensor() const;
private:
std::shared_ptr<ngraph::runtime::Tensor> m_wrapped_tensor;
std::shared_ptr<ngraph::runtime::Backend> m_wrapped_backend;
};
......@@ -61,36 +61,38 @@ void runtime::Executable::validate(const vector<std::shared_ptr<runtime::Tensor>
for (size_t i = 0; i < parameters.size(); i++)
{
if (parameters[i]->get_element_type() != inputs[i]->get_element_type())
if (parameters[i]->get_element_type().is_static() &&
parameters[i]->get_element_type() != inputs[i]->get_element_type())
{
stringstream ss;
ss << "Input " << i << " type '" << inputs[i]->get_element_type()
<< "' does not match Parameter type '" << parameters[i]->get_element_type() << "'";
throw runtime_error(ss.str());
}
if (parameters[i]->get_shape() != inputs[i]->get_shape())
if (!(parameters[i]->get_output_partial_shape(0).relaxes(inputs[i]->get_partial_shape())))
{
stringstream ss;
ss << "Input " << i << " shape {" << join(inputs[i]->get_shape())
<< "} does not match Parameter shape {" << join(parameters[i]->get_shape()) << "}";
ss << "Input " << i << " shape " << inputs[i]->get_partial_shape()
<< " does not match Parameter shape " << parameters[i]->get_output_partial_shape(0);
throw runtime_error(ss.str());
}
}
for (size_t i = 0; i < results.size(); i++)
{
if (results[i]->get_element_type() != outputs[i]->get_element_type())
if (outputs[i]->get_element_type().is_static() &&
results[i]->get_element_type() != outputs[i]->get_element_type())
{
stringstream ss;
ss << "Output " << i << " type '" << outputs[i]->get_element_type()
<< "' does not match Result type '" << results[i]->get_element_type() << "'";
throw runtime_error(ss.str());
}
if (results[i]->get_shape() != outputs[i]->get_shape())
if (!(outputs[i]->get_partial_shape()).relaxes(results[i]->get_output_partial_shape(0)))
{
stringstream ss;
ss << "Output " << i << " shape {" << join(outputs[i]->get_shape())
<< "} does not match Result shape {" << join(results[i]->get_shape()) << "}";
ss << "Output " << i << " shape " << outputs[i]->get_partial_shape()
<< " does not match Result shape " << results[i]->get_output_partial_shape(0);
throw runtime_error(ss.str());
}
}
......
......@@ -28,6 +28,11 @@ const Shape& runtime::Tensor::get_shape() const
return m_descriptor->get_shape();
}
const PartialShape& runtime::Tensor::get_partial_shape() const
{
return m_descriptor->get_partial_shape();
}
Strides runtime::Tensor::get_strides() const
{
return m_descriptor->get_tensor_layout()->get_strides();
......
......@@ -50,7 +50,11 @@ namespace ngraph
/// \brief Get tensor shape
/// \return const reference to a Shape
const ngraph::Shape& get_shape() const;
virtual const ngraph::Shape& get_shape() const;
/// \brief Get tensor partial shape
/// \return const reference to a PartialShape
const ngraph::PartialShape& get_partial_shape() const;
/// \brief Get tensor strides
/// \return Strides
......@@ -58,7 +62,7 @@ namespace ngraph
/// \brief Get tensor element type
/// \return element::Type
const element::Type& get_element_type() const;
virtual const element::Type& get_element_type() const;
/// \brief Get number of elements in the tensor
/// \return number of elements in the tensor
......
......@@ -133,7 +133,9 @@ add_subdirectory(util)
# backend specific test files must meet the following requirements:
# 1) The must be named <name>.in.cpp
# 2) They must be in the test directory
# 3) add the line `static string s_manifest = "${MANIFEST}";` to your cpp file
# 3) Include "util/test_control.hpp" in your cpp file
# 4) add the line `static string s_manifest = "${MANIFEST}";` to your cpp file
# 5) Use the `NGRAPH_TEST` macro in place of `TEST`.
# All such files are configured via cmake which replaces all instances of cmake variables
# such as ${BACKEND_NAME} with their values, such as CPU, GPU, or INTERPRETER.
set(MULTI_TEST_SRC
......@@ -157,6 +159,7 @@ set(MULTI_TEST_SRC
backend_test.in.cpp
backend_unary_elementwise.in.cpp
convolution_test.in.cpp
dynamic.in.cpp
)
if(NGRAPH_DISTRIBUTED_ENABLE)
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(dynamic_${BACKEND_NAME}, create)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
ASSERT_NE(backend, nullptr);
ASSERT_TRUE(backend->supports_dynamic_tensors());
}
NGRAPH_TEST(dynamic_${BACKEND_NAME}, create_no_dynamic)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}");
ASSERT_NE(backend, nullptr);
ASSERT_FALSE(backend->supports_dynamic_tensors());
}
NGRAPH_TEST(dynamic_${BACKEND_NAME}, create_dynamic_tensor)
{
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto t = backend->create_dynamic_tensor(element::f32, PartialShape{2, Dimension::dynamic(), 3});
ASSERT_TRUE(t->get_partial_shape().same_scheme(PartialShape{2, Dimension::dynamic(), 3}));
}
NGRAPH_TEST(dynamic_${BACKEND_NAME}, abc)
{
//
// Create a graph for f(a,b,c) = (a+b)*c, where a, b, c all have shape {2,?,3}.
//
auto a = make_shared<op::Parameter>(element::f32, PartialShape{2, Dimension::dynamic(), 3});
auto b = make_shared<op::Parameter>(element::f32, PartialShape{2, Dimension::dynamic(), 3});
auto c = make_shared<op::Parameter>(element::f32, PartialShape{2, Dimension::dynamic(), 3});
auto a_plus_b_times_c = (a + b) * c;
auto f = make_shared<Function>(NodeVector{a_plus_b_times_c}, ParameterVector{a, b, c});
//
// Get a backend with dynamic support, and compile f.
//
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
auto ex = backend->compile(f);
//
// Create a dynamic output tensor with shape {2,?,3}.
//
auto t_r =
backend->create_dynamic_tensor(element::f32, PartialShape{2, Dimension::dynamic(), 3});
//
// For each of n=[0,...,5), run the compiled executable against a test vector of shape
// {2,n,3}, and check the results.
//
for (size_t middle_dim = 0; middle_dim < 5; middle_dim++)
{
// Fill in some test input values, which we'll use for a, b, and c.
vector<float> inputs(2 * middle_dim * 3);
for (size_t i = 0; i < 2 * middle_dim * 3; i++)
{
inputs[i] = i;
}
// Create static tensors for the inputs and copy data.
auto t_a = backend->create_tensor(element::f32, Shape{2, middle_dim, 3});
auto t_b = backend->create_tensor(element::f32, Shape{2, middle_dim, 3});
auto t_c = backend->create_tensor(element::f32, Shape{2, middle_dim, 3});
copy_data(t_a, inputs);
copy_data(t_b, inputs);
copy_data(t_c, inputs);
// Call ex, writing result into t_r (note we're using the same t_r from outside the loop.)
ex->call_with_validate({t_r}, {t_a, t_b, t_c});
// After call, t_r should have a shape of {2,n,3}.
ASSERT_EQ(t_r->get_shape(), (Shape{2, middle_dim, 3}));
// Read out the results, and compare them against expected values.
auto results = read_vector<float>(t_r);
vector<float> expected_values(2 * middle_dim * 3);
for (size_t i = 0; i < 2 * middle_dim * 3; i++)
{
expected_values[i] = (i + i) * i;
}
EXPECT_TRUE(test::all_close_f(results, expected_values));
}
}
......@@ -54,7 +54,7 @@ void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>
template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv)
{
if (ngraph::element::from<T>() != tv->get_tensor_layout()->get_element_type())
if (ngraph::element::from<T>() != tv->get_element_type())
{
throw std::invalid_argument("read_vector type must match Tensor type");
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment