Commit 2f79f707 authored by Scott Cyphers's avatar Scott Cyphers Committed by Robert Kimball

Cyphers/layout (#1602)

* Remove "view"
Simplify layout

* Fix merge error

* fix build error

* PR1602. IntelGPU backend. Compilation fixed.
parent d2b73f99
......@@ -25,8 +25,8 @@ set (SRC
coordinate_diff.cpp
coordinate_transform.cpp
descriptor/input.cpp
descriptor/layout/dense_tensor_view_layout.cpp
descriptor/layout/tensor_view_layout.cpp
descriptor/layout/dense_tensor_layout.cpp
descriptor/layout/tensor_layout.cpp
descriptor/output.cpp
descriptor/tensor.cpp
file_util.cpp
......
......@@ -69,14 +69,14 @@ Tensor& Input::get_tensor()
return m_output->get_tensor();
}
std::shared_ptr<const TensorView> Input::get_tensor_view() const
std::shared_ptr<const Tensor> Input::get_tensor_ptr() const
{
return m_output->get_tensor_view();
return m_output->get_tensor_ptr();
}
std::shared_ptr<TensorView> Input::get_tensor_view()
std::shared_ptr<Tensor> Input::get_tensor_ptr()
{
return m_output->get_tensor_view();
return m_output->get_tensor_ptr();
}
const Shape& Input::get_shape() const
......
......@@ -59,10 +59,10 @@ namespace ngraph
protected:
/// \return the tensor view for the connected output
std::shared_ptr<const TensorView> get_tensor_view() const;
std::shared_ptr<const Tensor> get_tensor_ptr() const;
/// \return the tensor view for the connected output
std::shared_ptr<TensorView> get_tensor_view();
std::shared_ptr<Tensor> get_tensor_ptr();
public:
/// \return the shape of the connected output
......
......@@ -14,49 +14,51 @@
// limitations under the License.
//*****************************************************************************
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/except.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
using namespace ngraph;
descriptor::layout::DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view)
: TensorViewLayout(tensor_view)
descriptor::layout::DenseTensorLayout::DenseTensorLayout(const Tensor& tensor)
: TensorLayout(tensor)
{
const Shape& shape = tensor_view.get_shape();
m_size = ngraph::shape_size(shape);
m_strides = ngraph::row_major_strides(shape);
}
size_t
descriptor::layout::DenseTensorViewLayout::get_index_offset(const std::vector<size_t>& indices)
size_t descriptor::layout::DenseTensorLayout::get_index_offset(const std::vector<size_t>& indices)
{
if (indices.size() != m_strides.size())
auto strides = get_strides();
if (indices.size() != strides.size())
{
throw ngraph_error("Indices have the incorrect rank.");
}
size_t result = 0;
for (int i = 0; i < indices.size(); i++)
{
result += m_strides[i] + indices[i];
result += strides[i] * indices[i];
}
return result;
}
bool descriptor::layout::DenseTensorViewLayout::operator==(const TensorViewLayout& other) const
Strides descriptor::layout::DenseTensorLayout::get_strides() const
{
const DenseTensorViewLayout* p_other = dynamic_cast<const DenseTensorViewLayout*>(&other);
return ngraph::row_major_strides(get_shape());
}
bool descriptor::layout::DenseTensorLayout::operator==(const TensorLayout& other) const
{
const DenseTensorLayout* p_other = dynamic_cast<const DenseTensorLayout*>(&other);
if (nullptr == p_other)
return false;
if (get_element_type() != p_other->get_element_type())
return false;
if (m_strides != p_other->m_strides)
if (get_strides() != p_other->get_strides())
return false;
if (m_offset != p_other->m_offset)
if (get_offset() != p_other->get_offset())
return false;
return true;
......
......@@ -19,7 +19,7 @@
#include <cstddef>
#include <vector>
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
namespace ngraph
{
......@@ -32,23 +32,19 @@ namespace ngraph
/// \brief The standard strided layout, used for row-major and column-major, their permutations and slices.
///
/// The linearized offset of an index I is dot(I, strides) + offset.
class DenseTensorViewLayout : public TensorViewLayout
class DenseTensorLayout : public TensorLayout
{
public:
~DenseTensorViewLayout() override {}
DenseTensorViewLayout(const Tensor& tensor);
~DenseTensorLayout() override {}
DenseTensorLayout(const Tensor& tensor);
virtual size_t get_size() override { return m_size; }
size_t get_offset() const { return m_offset; }
virtual size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const override { return m_strides; }
virtual bool operator==(const TensorViewLayout& other) const override;
Strides get_strides() const override;
virtual bool operator==(const TensorLayout& other) const override;
protected:
Strides m_strides;
size_t m_offset{0};
size_t m_size;
};
}
}
......
......@@ -14,36 +14,34 @@
// limitations under the License.
//*****************************************************************************
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/type/element_type.hpp"
using namespace ngraph;
descriptor::layout::TensorViewLayout::TensorViewLayout(const descriptor::TensorView& tensor_view)
: m_element_type(tensor_view.get_element_type())
, m_shape(tensor_view.get_shape())
descriptor::layout::TensorLayout::TensorLayout(const descriptor::Tensor& tensor)
: m_element_type(tensor.get_element_type())
, m_shape(tensor.get_shape())
{
}
const element::Type& descriptor::layout::TensorViewLayout::get_element_type() const
const element::Type& descriptor::layout::TensorLayout::get_element_type() const
{
return m_element_type;
}
const Shape& descriptor::layout::TensorViewLayout::get_shape() const
const Shape& descriptor::layout::TensorLayout::get_shape() const
{
return m_shape;
}
void descriptor::layout::TensorViewLayout::set_tensor_view_type(const element::Type& element_type,
const Shape& shape)
size_t descriptor::layout::TensorLayout::get_size() const
{
m_element_type = element_type;
m_shape = shape;
return ngraph::shape_size(get_shape());
}
size_t descriptor::layout::TensorViewLayout::get_allocated_size()
size_t descriptor::layout::TensorLayout::get_allocated_size()
{
return get_size() * get_element_type().size();
}
......@@ -35,19 +35,19 @@ namespace ngraph
/// \brief Interface for describing implementations of tensor views.
///
/// Kernel selection will need to pay attention to the layout.
class TensorViewLayout
class TensorLayout
{
protected:
TensorViewLayout(const ngraph::descriptor::TensorView& tensor_view);
TensorViewLayout(const TensorViewLayout&) = delete;
TensorViewLayout& operator=(const TensorViewLayout&) = delete;
TensorLayout(const ngraph::descriptor::Tensor& tensor);
TensorLayout(const TensorLayout&) = delete;
TensorLayout& operator=(const TensorLayout&) = delete;
public:
virtual ~TensorViewLayout() {}
virtual ~TensorLayout() {}
/// Extent of this view in buffer.
///
/// When we support non-linear buffers, this will need to be something other than size_t.
virtual size_t get_size() = 0;
size_t get_size() const;
virtual size_t get_allocated_size();
/// Offset of an index; useful for slice implementation.
///
......@@ -56,15 +56,13 @@ namespace ngraph
const element::Type& get_element_type() const;
const Shape& get_shape() const;
virtual const Strides& get_strides() const = 0;
virtual Strides get_strides() const = 0;
/// \brief Return true if this and other have the same element interpretation
virtual bool operator==(const TensorViewLayout& other) const = 0;
bool operator!=(const TensorViewLayout& other) const { return !(*this == other); }
void set_tensor_view_type(const element::Type& element_type, const Shape& shape);
virtual bool operator==(const TensorLayout& other) const = 0;
bool operator!=(const TensorLayout& other) const { return !(*this == other); }
protected:
element::Type m_element_type;
Shape m_shape;
const element::Type m_element_type;
const Shape m_shape;
};
}
}
......
......@@ -44,8 +44,8 @@ namespace ngraph
std::shared_ptr<Node> get_node() const;
size_t get_index() const { return m_index; }
std::shared_ptr<Tensor> get_tensor_view() const { return m_tensor; }
void set_tensor_view(const std::shared_ptr<Tensor>& tensor) { m_tensor = tensor; }
std::shared_ptr<Tensor> get_tensor_ptr() const { return m_tensor; }
void set_tensor_ptr(const std::shared_ptr<Tensor>& tensor) { m_tensor = tensor; }
void add_input(Input* input);
void remove_input(Input* input);
const std::set<Input*>& get_inputs() const { return m_inputs; }
......
......@@ -15,7 +15,7 @@
//*****************************************************************************
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/node.hpp"
using namespace ngraph;
......@@ -34,10 +34,6 @@ void descriptor::Tensor::set_tensor_view_type(const element::Type& element_type,
{
m_shape = shape;
m_element_type = element_type;
if (nullptr != m_tensor_view_layout)
{
m_tensor_view_layout->set_tensor_view_type(element_type, shape);
}
}
void descriptor::Tensor::set_pool_offset(size_t offset)
......@@ -52,7 +48,7 @@ size_t descriptor::Tensor::get_pool_offset() const
size_t descriptor::Tensor::size() const
{
if (auto tvl = get_tensor_view_layout())
if (auto tvl = get_tensor_layout())
{
return tvl->get_allocated_size();
}
......@@ -62,6 +58,20 @@ size_t descriptor::Tensor::size() const
}
}
void descriptor::Tensor::set_tensor_layout(
const std::shared_ptr<layout::TensorLayout>& tensor_layout)
{
if (tensor_layout->get_shape() != get_shape())
{
throw ngraph_error("Setting tensor's layout to a layout with a different shape.");
}
if (tensor_layout->get_element_type() != get_element_type())
{
throw ngraph_error("Setting tensor's layout to a layout with a different element type.");
}
m_tensor_layout = tensor_layout;
}
ostream& operator<<(ostream& out, const descriptor::Tensor& tensor)
{
out << "Tensor(" << tensor.get_name() << ")";
......
......@@ -31,7 +31,7 @@ namespace ngraph
{
namespace layout
{
class TensorViewLayout;
class TensorLayout;
}
/// \brief Compile-time descriptor of a first-class value that is a view of a tensor.
......@@ -48,37 +48,29 @@ namespace ngraph
const element::Type& get_element_type() const { return m_element_type; }
const Shape& get_shape() const { return m_shape; }
const std::shared_ptr<layout::TensorViewLayout>& get_tensor_view_layout() const
const std::shared_ptr<layout::TensorLayout>& get_tensor_layout() const
{
return m_tensor_view_layout;
return m_tensor_layout;
}
void set_tensor_view_layout(
const std::shared_ptr<layout::TensorViewLayout>& tensor_view_layout)
{
m_tensor_view_layout = tensor_view_layout;
}
void set_tensor_layout(const std::shared_ptr<layout::TensorLayout>& tensor_layout);
void set_pool_offset(size_t);
size_t get_pool_offset() const;
size_t size() const;
const Tensor& get_tensor() const { return *this; }
Tensor& get_tensor() { return *this; }
const Tensor& get_tensor_view() const { return *this; }
Tensor& get_tensor_view() { return *this; }
protected:
element::Type m_element_type;
Shape m_shape;
std::string m_name;
std::shared_ptr<layout::TensorViewLayout> m_tensor_view_layout;
size_t m_pool_offset;
std::shared_ptr<layout::TensorLayout> m_tensor_layout;
size_t m_pool_offset{0};
};
using TensorView = Tensor;
using TensorViewPtrs = std::vector<std::shared_ptr<TensorView>>;
using TensorViewPtrs = std::vector<std::shared_ptr<Tensor>>;
std::ostream& operator<<(std::ostream&, const ngraph::descriptor::Tensor&);
}
}
......@@ -49,8 +49,8 @@
#include "ngraph/builder/tensor_mask.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/except.hpp"
......
......@@ -20,7 +20,7 @@
#include <typeinfo>
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/result.hpp"
......@@ -74,9 +74,9 @@ void Node::set_output_size(size_t n)
m_outputs.clear();
for (size_t i = m_outputs.size(); i < n; ++i)
{
auto tensor_view_descriptor = make_shared<descriptor::TensorView>(
auto tensor_descriptor = make_shared<descriptor::Tensor>(
element::unspecified, Shape(), get_name() + "_" + to_string(i));
m_outputs.emplace_back(this, i, tensor_view_descriptor);
m_outputs.emplace_back(this, i, tensor_descriptor);
}
}
......@@ -86,7 +86,7 @@ void Node::validate_and_infer_types()
void Node::set_output_type(size_t i, const element::Type& element_type, const Shape& shape)
{
m_outputs.at(i).get_tensor_view()->set_tensor_view_type(element_type, shape);
m_outputs.at(i).get_tensor_ptr()->set_tensor_view_type(element_type, shape);
}
std::deque<descriptor::Output>& Node::get_outputs()
......@@ -260,19 +260,19 @@ const Shape& Node::get_shape() const
return get_output_shape(0);
}
shared_ptr<descriptor::TensorView> Node::get_output_tensor_view(size_t i) const
shared_ptr<descriptor::Tensor> Node::get_output_tensor_ptr(size_t i) const
{
return m_outputs.at(i).get_tensor_view();
return m_outputs.at(i).get_tensor_ptr();
}
shared_ptr<descriptor::TensorView> Node::get_output_tensor_view() const
shared_ptr<descriptor::Tensor> Node::get_output_tensor_ptr() const
{
if (get_output_size() != 1)
{
throw ngraph_error(
"get_output_tensor_view() must be called on a node with exactly one output.");
"get_output_tensor_ptr() must be called on a node with exactly one output.");
}
return get_output_tensor_view(0);
return get_output_tensor_ptr(0);
}
const std::set<descriptor::Input*>& Node::get_output_inputs(size_t i) const
......
......@@ -178,10 +178,10 @@ namespace ngraph
descriptor::Tensor& get_output_tensor() const;
/// Returns the tensor view of output i
std::shared_ptr<descriptor::TensorView> get_output_tensor_view(size_t i) const;
std::shared_ptr<descriptor::Tensor> get_output_tensor_ptr(size_t i) const;
/// Checks that there is exactly one output and returns its tensor view.
std::shared_ptr<descriptor::TensorView> get_output_tensor_view() const;
std::shared_ptr<descriptor::Tensor> get_output_tensor_ptr() const;
/// Returns the set of inputs using output i
const std::set<descriptor::Input*>& get_output_inputs(size_t i) const;
......
......@@ -36,11 +36,11 @@ namespace ngraph
{
for (size_t i = 0; i < node->get_output_size(); ++i)
{
auto tv = node->get_output_tensor_view(i);
if (nullptr == tv->get_tensor_view_layout())
auto tv = node->get_output_tensor_ptr(i);
if (nullptr == tv->get_tensor_layout())
{
auto layout = std::make_shared<LT>(*tv);
tv->set_tensor_view_layout(layout);
tv->set_tensor_layout(layout);
}
}
}
......
......@@ -95,7 +95,7 @@ void runtime::cpu::CPU_CallFrame::propagate_layouts(
throw ngraph_error(
"Error propagating layouts - layout information missing from tensor view");
}
tvs[i]->get_descriptor()->set_tensor_view_layout(layouts[i]);
tvs[i]->get_descriptor()->set_tensor_layout(layouts[i]);
}
}
......
......@@ -552,12 +552,12 @@ using namespace ngraph::runtime;
if (c)
{
m_active_constants.push_back(node);
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view();
string type = tv->get_tensor().get_element_type().c_type_string();
writer << "static " << type << "* " << tv->get_tensor().get_name() << " = (("
<< type << "*)(" << c->get_data_ptr() << "));\n";
m_variable_name_map[tv->get_tensor().get_name()] = tv->get_tensor().get_name();
m_tensor_roles[tv->get_tensor().get_name()] = CPUTensorRole::CONSTANT;
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_ptr();
string type = tv->get_element_type().c_type_string();
writer << "static " << type << "* " << tv->get_name() << " = ((" << type << "*)("
<< c->get_data_ptr() << "));\n";
m_variable_name_map[tv->get_name()] = tv->get_name();
m_tensor_roles[tv->get_name()] = CPUTensorRole::CONSTANT;
}
}
}
......@@ -578,15 +578,15 @@ using namespace ngraph::runtime;
set<string> output_names;
for (shared_ptr<Node> op : current_function->get_results())
{
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
output_names.insert(tv->get_tensor().get_name());
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_ptr();
output_names.insert(tv->get_name());
}
set<descriptor::TensorView*> constants;
for (shared_ptr<Node> node : ordered_ops)
{
if (dynamic_cast<ngraph::op::Constant*>(node.get()))
{
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view();
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_ptr();
constants.insert(tv.get());
}
}
......@@ -620,8 +620,8 @@ using namespace ngraph::runtime;
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
tensor_index_map.insert({tv->get_tensor().get_name(), tensor_index++});
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
tensor_index_map.insert({tv->get_name(), tensor_index++});
}
}
}
......@@ -680,14 +680,14 @@ using namespace ngraph::runtime;
{
for (size_t i = 0; i < param->get_output_size(); ++i)
{
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_view(i);
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_ptr(i);
const element::Type& et = tv->get_element_type();
string type = et.c_type_string();
stringstream ss;
ss << "((" << type << "*)(inputs[" << arg_index << "]))";
m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
m_tensor_roles[tv->get_tensor().get_name()] = CPUTensorRole::INPUT;
param_index_map[tv->get_tensor().get_name()] = arg_index;
m_variable_name_map[tv->get_name()] = ss.str();
m_tensor_roles[tv->get_name()] = CPUTensorRole::INPUT;
param_index_map[tv->get_name()] = arg_index;
propagate_in_place_input(&param->get_outputs().at(i), ss.str(), false);
arg_index++;
}
......@@ -697,12 +697,12 @@ using namespace ngraph::runtime;
for (size_t i = 0; i < current_function->get_output_size(); ++i)
{
shared_ptr<Node> op = current_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_ptr();
string type = tv->get_element_type().c_type_string();
stringstream ss;
ss << "((" << type << "*)(outputs[" << i << "]))";
m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
m_tensor_roles[tv->get_tensor().get_name()] = CPUTensorRole::OUTPUT;
m_variable_name_map[tv->get_name()] = ss.str();
m_tensor_roles[tv->get_name()] = CPUTensorRole::OUTPUT;
//keep assigning different outputs to a result descriptor
//op::Result emitter will check if in and out descriptors are the same
......@@ -712,10 +712,10 @@ using namespace ngraph::runtime;
if (!input_node->is_constant() && !input_node->is_parameter())
{
shared_ptr<descriptor::TensorView> itv =
res->get_inputs().at(0).get_output().get_tensor_view();
res->get_inputs().at(0).get_output().get_tensor_ptr();
auto output_name = ss.str();
m_variable_name_map[itv->get_tensor().get_name()] = ss.str();
m_tensor_roles[itv->get_tensor().get_name()] = CPUTensorRole::OUTPUT;
m_variable_name_map[itv->get_name()] = ss.str();
m_tensor_roles[itv->get_name()] = CPUTensorRole::OUTPUT;
propagate_in_place_output(
&(res->get_inputs().at(0).get_output()), output_name, false);
}
......@@ -736,18 +736,16 @@ using namespace ngraph::runtime;
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
in.push_back(
TensorViewWrapper(tv, m_variable_name_map[tv->get_tensor().get_name()]));
node_input_names.emplace_back(tv->get_tensor().get_name());
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
in.push_back(TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
node_input_names.emplace_back(tv->get_name());
}
vector<TensorViewWrapper> out;
for (const descriptor::Output& output : node->get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
out.push_back(
TensorViewWrapper(tv, m_variable_name_map[tv->get_tensor().get_name()]));
node_output_names.emplace_back(tv->get_tensor().get_name());
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
out.push_back(TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
node_output_names.emplace_back(tv->get_name());
}
// Emit operation prologue
......@@ -799,8 +797,8 @@ using namespace ngraph::runtime;
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
auto input_name = tv->get_tensor().get_name();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
auto input_name = tv->get_name();
if (output.get_node()->is_parameter())
{
......@@ -972,14 +970,14 @@ using namespace ngraph::runtime;
{
for (size_t i = 0; i < parameter->get_output_size(); ++i)
{
auto tv = parameter->get_output_tensor_view(i);
if (tv->get_tensor_view_layout() == nullptr)
auto tv = parameter->get_output_tensor_ptr(i);
if (tv->get_tensor_layout() == nullptr)
{
throw ngraph_error("layout missing on function parameter's tensor view: " +
tv->get_name());
}
parameter_layout_descriptors.emplace_back(
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout()));
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout()));
}
}
......@@ -993,13 +991,13 @@ using namespace ngraph::runtime;
const auto& output = m_function->get_output_op(i);
for (size_t j = 0; j < output->get_output_size(); ++j)
{
auto tv = output->get_output_tensor_view(j);
if (tv->get_tensor_view_layout() == nullptr)
auto tv = output->get_output_tensor_ptr(j);
if (tv->get_tensor_layout() == nullptr)
{
throw ngraph_error("layout missing on function output tensor: " + tv->get_name());
}
result_layout_descriptors.emplace_back(
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout()));
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout()));
}
}
......@@ -1163,14 +1161,14 @@ void runtime::cpu::CPU_ExternalFunction::build()
{
for (size_t i = 0; i < parameter->get_output_size(); ++i)
{
auto tv = parameter->get_output_tensor_view(i);
if (tv->get_tensor_view_layout() == nullptr)
auto tv = parameter->get_output_tensor_ptr(i);
if (tv->get_tensor_layout() == nullptr)
{
throw ngraph_error("layout missing on function parameter's tensor view: " +
tv->get_name());
}
parameter_layout_descriptors.emplace_back(
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout()));
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout()));
}
}
......@@ -1184,13 +1182,13 @@ void runtime::cpu::CPU_ExternalFunction::build()
const auto& output = m_function->get_output_op(i);
for (size_t j = 0; j < output->get_output_size(); ++j)
{
auto tv = output->get_output_tensor_view(j);
if (tv->get_tensor_view_layout() == nullptr)
auto tv = output->get_output_tensor_ptr(j);
if (tv->get_tensor_layout() == nullptr)
{
throw ngraph_error("layout missing on function output tensor: " + tv->get_name());
}
result_layout_descriptors.emplace_back(
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout()));
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout()));
}
}
......@@ -1216,10 +1214,10 @@ void runtime::cpu::CPU_ExternalFunction::build()
{
if (node->is_constant())
{
auto tv = node->get_outputs()[0].get_tensor_view();
tensor_data[tv->get_tensor().get_name()] =
auto tv = node->get_outputs()[0].get_tensor_ptr();
tensor_data[tv->get_name()] =
const_cast<void*>(static_pointer_cast<ngraph::op::Constant>(node)->get_data_ptr());
m_tensor_roles[tv->get_tensor().get_name()] = CPUTensorRole::CONSTANT;
m_tensor_roles[tv->get_name()] = CPUTensorRole::CONSTANT;
}
}
......@@ -1229,13 +1227,11 @@ void runtime::cpu::CPU_ExternalFunction::build()
{
for (size_t i = 0; i < param->get_output_size(); ++i)
{
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_view(i);
function_input_index.emplace_back(tensor_data[tv->get_tensor().get_name()],
arg_index,
tensor_stale[tv->get_tensor().get_name()]);
m_tensor_roles[tv->get_tensor().get_name()] = CPUTensorRole::INPUT;
propagate_in_place_input(
&param->get_outputs().at(i), tv->get_tensor().get_name(), true);
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_ptr(i);
function_input_index.emplace_back(
tensor_data[tv->get_name()], arg_index, tensor_stale[tv->get_name()]);
m_tensor_roles[tv->get_name()] = CPUTensorRole::INPUT;
propagate_in_place_input(&param->get_outputs().at(i), tv->get_name(), true);
arg_index++;
}
}
......@@ -1244,9 +1240,9 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (size_t i = 0; i < m_function->get_output_size(); ++i)
{
shared_ptr<Node> op = m_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
function_output_index.emplace_back(tensor_data[tv->get_tensor().get_name()], i);
m_tensor_roles[tv->get_tensor().get_name()] = CPUTensorRole::OUTPUT;
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_ptr();
function_output_index.emplace_back(tensor_data[tv->get_name()], i);
m_tensor_roles[tv->get_name()] = CPUTensorRole::OUTPUT;
//keep assigning different outputs to a result descriptor
//op::Result emitter will check if in and out descriptors are the same
......@@ -1256,12 +1252,12 @@ void runtime::cpu::CPU_ExternalFunction::build()
if (!input_node->is_constant() && !input_node->is_parameter())
{
shared_ptr<descriptor::TensorView> itv =
res->get_inputs().at(0).get_output().get_tensor_view();
function_output_index.emplace_back(tensor_data[itv->get_tensor().get_name()], i);
m_tensor_roles[itv->get_tensor().get_name()] = CPUTensorRole::OUTPUT;
tensor_alias[itv->get_tensor().get_name()] = tv->get_tensor().get_name();
res->get_inputs().at(0).get_output().get_tensor_ptr();
function_output_index.emplace_back(tensor_data[itv->get_name()], i);
m_tensor_roles[itv->get_name()] = CPUTensorRole::OUTPUT;
tensor_alias[itv->get_name()] = tv->get_name();
propagate_in_place_output(
&(res->get_inputs().at(0).get_output()), tv->get_tensor().get_name(), true);
&(res->get_inputs().at(0).get_output()), tv->get_name(), true);
}
}
......@@ -1283,18 +1279,18 @@ void runtime::cpu::CPU_ExternalFunction::build()
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
in.push_back(TensorViewWrapper(tv, tv->get_tensor().get_name()));
in_names.push_back(tv->get_tensor().get_name());
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
in.push_back(TensorViewWrapper(tv, tv->get_name()));
in_names.push_back(tv->get_name());
}
vector<TensorViewWrapper> out;
vector<string> out_names;
for (const descriptor::Output& output : node->get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
out.push_back(TensorViewWrapper(tv, tv->get_tensor().get_name()));
out_names.push_back(tv->get_tensor().get_name());
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
out.push_back(TensorViewWrapper(tv, tv->get_name()));
out_names.push_back(tv->get_name());
}
m_op_attrs.emplace_back(node->description(), out_names, in_names);
......@@ -1628,7 +1624,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
for (const descriptor::Input& input : node.get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)};
if (!contains(arg_names, tvw.get_name()))
{
......@@ -1645,7 +1641,7 @@ string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
vector<TensorViewWrapper> out;
for (const descriptor::Output& output : node.get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_ptr();
TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)};
if (arg_index++ > 0)
{
......
......@@ -30,10 +30,9 @@ namespace ngraph
mkldnn::memory::f32,
mkldnn::memory::format::format_undef);
LayoutDescriptor::LayoutDescriptor(const ngraph::descriptor::TensorView& tv)
: TensorViewLayout(tv)
LayoutDescriptor::LayoutDescriptor(const ngraph::descriptor::Tensor& tv)
: TensorLayout(tv)
, m_offset(0)
, m_size(ngraph::shape_size(tv.get_shape()))
, m_mkldnn_md(LayoutDescriptor::DummyDesc)
{
auto shape = get_shape();
......@@ -63,7 +62,7 @@ namespace ngraph
}
bool LayoutDescriptor::
operator==(const ngraph::descriptor::layout::TensorViewLayout& other) const
operator==(const ngraph::descriptor::layout::TensorLayout& other) const
{
const LayoutDescriptor* p_other = dynamic_cast<const LayoutDescriptor*>(&other);
if (!p_other)
......
......@@ -23,7 +23,7 @@
#include <mkldnn.hpp>
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/shape.hpp"
namespace ngraph
......@@ -32,19 +32,18 @@ namespace ngraph
{
namespace cpu
{
class LayoutDescriptor : public ngraph::descriptor::layout::TensorViewLayout
class LayoutDescriptor : public ngraph::descriptor::layout::TensorLayout
{
public:
LayoutDescriptor(const ngraph::descriptor::TensorView& tv);
~LayoutDescriptor() override {}
size_t get_size() override { return m_size; }
virtual size_t get_allocated_size() override { return m_mkldnn_memory_size; }
size_t get_offset() const { return m_offset; }
size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const override { return m_strides; }
Strides get_strides() const override { return m_strides; }
void set_strides(Strides& strides) { m_strides = strides; }
bool operator==(const TensorViewLayout& other) const override;
bool operator==(const TensorLayout& other) const override;
const mkldnn::memory::desc& get_mkldnn_md() const { return m_mkldnn_md; }
void set_mkldnn_md(const mkldnn::memory::desc md);
......@@ -59,7 +58,6 @@ namespace ngraph
// Native row-major layout for now
Strides m_strides;
size_t m_offset;
size_t m_size;
// For tensor views that can be tracked with MKLDNN memory
// descriptors, this holds the physical layout information
......
......@@ -18,7 +18,7 @@
#include <memory>
#include "cpu_tensor_view.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/except.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp"
......@@ -43,7 +43,7 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
// TODO(jmenon): A fallback layout should not be needed but is required
// because of how some unit test functionality is written (ex. 'backprop_derivative')
// This needs to be removed
m_descriptor->set_tensor_view_layout(
m_descriptor->set_tensor_layout(
std::make_shared<runtime::cpu::LayoutDescriptor>(*m_descriptor));
buffer_size = shape_size(shape) * element_type.size();
......@@ -114,7 +114,7 @@ void runtime::cpu::CPUTensorView::read(void* target, size_t tensor_offset, size_
throw out_of_range("read access past end of tensor");
}
auto tvl = this->get_tensor_view_layout();
auto tvl = this->get_tensor_layout();
auto cpu_tvl = dynamic_cast<runtime::cpu::LayoutDescriptor*>(tvl.get());
auto needs_conversion = [&]() {
......
......@@ -15,44 +15,44 @@
//*****************************************************************************
#include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/descriptor/tensor.hpp"
using namespace std;
using namespace ngraph;
runtime::cpu::TensorViewWrapper::TensorViewWrapper(const shared_ptr<descriptor::TensorView>& tv,
runtime::cpu::TensorViewWrapper::TensorViewWrapper(const shared_ptr<descriptor::Tensor>& tv,
const string& alias)
: m_tensor_view(tv)
: m_tensor(tv)
, m_alias(alias)
{
}
size_t runtime::cpu::TensorViewWrapper::get_size() const
{
return m_tensor_view->get_tensor_view_layout()->get_size();
return m_tensor->get_tensor_layout()->get_size();
}
const Shape& runtime::cpu::TensorViewWrapper::get_shape() const
{
return m_tensor_view->get_tensor_view_layout()->get_shape();
return m_tensor->get_tensor_layout()->get_shape();
}
const Strides& runtime::cpu::TensorViewWrapper::get_strides() const
Strides runtime::cpu::TensorViewWrapper::get_strides() const
{
return m_tensor_view->get_tensor_view_layout()->get_strides();
return m_tensor->get_tensor_layout()->get_strides();
}
const element::Type& runtime::cpu::TensorViewWrapper::get_element_type() const
{
return m_tensor_view->get_tensor_view_layout()->get_element_type();
return m_tensor->get_tensor_layout()->get_element_type();
}
const std::string& runtime::cpu::TensorViewWrapper::get_name() const
{
if (m_alias.empty())
{
return m_tensor_view->get_tensor().get_name();
return m_tensor->get_name();
}
else
{
......@@ -65,8 +65,7 @@ const std::string& runtime::cpu::TensorViewWrapper::get_type() const
return get_element_type().c_type_string();
}
const std::shared_ptr<descriptor::TensorView>
runtime::cpu::TensorViewWrapper::get_tensor_view() const
const std::shared_ptr<descriptor::Tensor> runtime::cpu::TensorViewWrapper::get_tensor() const
{
return m_tensor_view;
return m_tensor;
}
......@@ -35,18 +35,17 @@ namespace ngraph
class ngraph::runtime::cpu::TensorViewWrapper
{
public:
TensorViewWrapper(const std::shared_ptr<descriptor::TensorView>&,
const std::string& alias = "");
TensorViewWrapper(const std::shared_ptr<descriptor::Tensor>&, const std::string& alias = "");
size_t get_size() const;
const Shape& get_shape() const;
const Strides& get_strides() const;
Strides get_strides() const;
const element::Type& get_element_type() const;
const std::string& get_name() const;
const std::string& get_type() const;
const std::shared_ptr<descriptor::TensorView> get_tensor_view() const;
const std::shared_ptr<descriptor::Tensor> get_tensor() const;
private:
std::shared_ptr<descriptor::TensorView> m_tensor_view;
std::shared_ptr<descriptor::Tensor> m_tensor;
std::string m_alias;
};
......@@ -213,14 +213,14 @@ const mkldnn::memory::desc& runtime::cpu::mkldnn_utils::get_input_mkldnn_md(cons
size_t index)
{
auto cpu_tvl = dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(
node->get_inputs()[index].get_output().get_tensor_view()->get_tensor_view_layout());
node->get_inputs()[index].get_output().get_tensor_ptr()->get_tensor_layout());
return cpu_tvl->get_mkldnn_md();
}
const mkldnn::memory::desc& runtime::cpu::mkldnn_utils::get_output_mkldnn_md(const Node* node,
size_t index)
{
auto tvl = node->get_output_tensor_view(index)->get_tensor_view_layout();
auto tvl = node->get_output_tensor_ptr(index)->get_tensor_layout();
return dynamic_cast<runtime::cpu::LayoutDescriptor&>(*tvl).get_mkldnn_md();
}
......
......@@ -51,8 +51,8 @@ void runtime::cpu::op::ConvertLayout::validate_and_infer_types()
{
const auto& arg = get_argument(0);
const auto& arg_tensor_view = arg->get_output_tensor_view(arg_output_index);
const auto& arg_layout = arg_tensor_view->get_tensor_view_layout();
const auto& arg_tensor = arg->get_output_tensor_ptr(arg_output_index);
const auto& arg_layout = arg_tensor->get_tensor_layout();
if (!arg_layout)
{
......@@ -60,5 +60,5 @@ void runtime::cpu::op::ConvertLayout::validate_and_infer_types()
}
set_output_type(0, output_layout->get_element_type(), output_layout->get_shape());
get_output_tensor_view()->set_tensor_view_layout(output_layout);
get_output_tensor_ptr()->set_tensor_layout(output_layout);
}
......@@ -64,7 +64,7 @@ using namespace ngraph;
using namespace ngraph::runtime::cpu;
// Check if the input layout matches the layout requested in `required_mds`
// If not, insert a layout conversion node between the input tensorview and
// If not, insert a layout conversion node between the input tensor and
// the `node`. For now, only MKLDNN nodes/kernels can request specific layouts
shared_ptr<Node> runtime::cpu::pass::CPULayout::insert_input_conversions(
runtime::cpu::CPU_ExternalFunction* external_function,
......@@ -85,9 +85,8 @@ shared_ptr<Node> runtime::cpu::pass::CPULayout::insert_input_conversions(
for (const descriptor::Input& input : node->get_inputs())
{
const auto& output = input.get_output();
auto tv = output.get_tensor_view();
auto tvl =
dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout());
auto tv = output.get_tensor_ptr();
auto tvl = dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_layout());
if (!tvl)
{
......@@ -149,8 +148,8 @@ void runtime::cpu::pass::CPULayout::set_output_layouts(shared_ptr<Node>& node,
{
for (size_t i = 0; i < node->get_output_size(); ++i)
{
auto tv = node->get_output_tensor_view(i);
auto tvl = tv->get_tensor_view_layout();
auto tv = node->get_output_tensor_ptr(i);
auto tvl = tv->get_tensor_layout();
if (tvl)
{
throw ngraph_error("Node (" + node->get_name() +
......@@ -159,7 +158,7 @@ void runtime::cpu::pass::CPULayout::set_output_layouts(shared_ptr<Node>& node,
}
auto layout = std::make_shared<ngraph::runtime::cpu::LayoutDescriptor>(*tv);
layout->set_mkldnn_md(output_mds[i]);
tv->set_tensor_view_layout(layout);
tv->set_tensor_layout(layout);
NGRAPH_DEBUG << "Setting Node: " << node->get_name()
<< " output layout: " << output_mds[i].data.format << endl;
}
......@@ -176,10 +175,10 @@ void runtime::cpu::pass::CPULayout::set_native_layouts(
for (descriptor::Input& input : node->get_inputs())
{
const auto& output = input.get_output();
auto tv = output.get_tensor_view();
auto tv = output.get_tensor_ptr();
auto et = tv->get_element_type();
auto shape = tv->get_shape();
auto tvl = tv->get_tensor_view_layout();
auto tvl = tv->get_tensor_layout();
auto cpu_tvl = dynamic_cast<runtime::cpu::LayoutDescriptor*>(tvl.get());
if (cpu_tvl && cpu_tvl->is_mkldnn_layout())
......@@ -238,8 +237,8 @@ void runtime::cpu::pass::CPULayout::set_native_layouts(
for (size_t i = 0; i < node->get_output_size(); ++i)
{
auto tv = node->get_output_tensor_view(i);
if (tv->get_tensor_view_layout())
auto tv = node->get_output_tensor_ptr(i);
if (tv->get_tensor_layout())
{
// TODO(jbobba): Should this be an error instead?
// Some unit tests are sharing nodes across graphs
......@@ -255,7 +254,7 @@ void runtime::cpu::pass::CPULayout::set_native_layouts(
mkldnn_utils::create_blocked_mkldnn_md(shape, layout->get_strides(), et);
layout->set_mkldnn_md(native_md);
}
tv->set_tensor_view_layout(layout);
tv->set_tensor_layout(layout);
}
}
......@@ -1172,10 +1171,7 @@ namespace ngraph
{
auto result = static_cast<const ngraph::op::Result*>(node.get());
auto cpu_tvl = dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(
node->get_inputs()[0]
.get_output()
.get_tensor_view()
->get_tensor_view_layout());
node->get_inputs()[0].get_output().get_tensor_ptr()->get_tensor_layout());
if (result->needs_default_layout() || !cpu_tvl->is_mkldnn_layout() ||
cpu_tvl->get_size() * cpu_tvl->get_element_type().size() !=
......@@ -1203,8 +1199,8 @@ namespace ngraph
auto axis_order = reshape->get_input_order();
auto tvl = node->get_inputs()[0]
.get_output()
.get_tensor_view()
->get_tensor_view_layout();
.get_tensor_ptr()
->get_tensor_layout();
auto cpu_tvl = dynamic_cast<runtime::cpu::LayoutDescriptor*>(tvl.get());
if (cpu_tvl && cpu_tvl->is_mkldnn_layout())
{
......@@ -1237,7 +1233,7 @@ namespace ngraph
}
set_native_layouts(external_function, node);
auto output_tvl = dynamic_pointer_cast<runtime::cpu::LayoutDescriptor>(
node->get_output_tensor_view()->get_tensor_view_layout());
node->get_output_tensor_ptr()->get_tensor_layout());
// TODO (jbobba): For now non-MKLDNN layouts are always in row-major format
// Enable this once we support non row-major strided formats
// output_tvl->set_strides(output_strides);
......
......@@ -44,7 +44,7 @@ void ngraph::runtime::cpu::pass::CPUPostLayoutOptimizations::construct_weight_fu
auto reshape_conv =
std::make_shared<ngraph::op::Reshape>(param, AxisVector{0}, Shape{16, 4, 1, 1});
auto data_conv = std::make_shared<pattern::op::Label>(element::f32, Shape{16, 4, 7, 7});
auto tvt = reshape_conv->get_outputs().at(0).get_tensor_view().get();
auto tvt = reshape_conv->get_outputs().at(0).get_tensor_ptr().get();
auto lt_desc = std::make_shared<runtime::cpu::LayoutDescriptor>(*tvt);
auto cvt_lt_conv = std::make_shared<runtime::cpu::op::ConvertLayout>(reshape_conv, lt_desc);
auto conv = std::make_shared<ngraph::op::Convolution>(
......
......@@ -25,7 +25,7 @@
#include <tuple>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/function.hpp"
......@@ -275,7 +275,7 @@ void runtime::gpu::GPU_ExternalFunction::emit_header()
#include <cudnn.h>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/function.hpp"
......@@ -381,18 +381,16 @@ void runtime::gpu::GPU_ExternalFunction::emit_constant_declarations()
const op::Constant* c = dynamic_cast<ngraph::op::Constant*>(node.get());
if (c)
{
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view();
shared_ptr<descriptor::Tensor> tv = node->get_outputs()[0].get_tensor_ptr();
// get an allocator for transient per kernel gpu memory
runtime::gpu::GPUAllocator allocator =
m_shared_context->m_primitive_emitter->get_memory_allocator();
size_t idx = allocator.reserve_argspace(
c->get_data_ptr(),
tv->get_tensor().size() * tv->get_tensor().get_element_type().size());
m_writer << "static size_t " << tv->get_tensor().get_name() << "_idx = " << idx
<< ";\n";
m_writer << "static " << tv->get_tensor().get_element_type().c_type_string() << "* "
<< tv->get_tensor().get_name() << " = nullptr;\n";
m_variable_name_map[tv->get_tensor().get_name()] = tv->get_tensor().get_name();
size_t idx = allocator.reserve_argspace(c->get_data_ptr(),
tv->size() * tv->get_element_type().size());
m_writer << "static size_t " << tv->get_name() << "_idx = " << idx << ";\n";
m_writer << "static " << tv->get_element_type().c_type_string() << "* "
<< tv->get_name() << " = nullptr;\n";
m_variable_name_map[tv->get_name()] = tv->get_name();
}
}
}
......@@ -411,12 +409,11 @@ void runtime::gpu::GPU_ExternalFunction::emit_constant_declarations()
const op::Constant* c = dynamic_cast<ngraph::op::Constant*>(node.get());
if (c)
{
shared_ptr<descriptor::TensorView> tv =
node->get_outputs()[0].get_tensor_view();
m_writer << tv->get_tensor().get_name() << " = reinterpret_cast<"
<< tv->get_tensor().get_element_type().c_type_string()
shared_ptr<descriptor::Tensor> tv = node->get_outputs()[0].get_tensor_ptr();
m_writer << tv->get_name() << " = reinterpret_cast<"
<< tv->get_element_type().c_type_string()
<< "*>(runtime::gpu::invoke_memory_primitive(m_runtime_context, "
<< tv->get_tensor().get_name() << "_idx));\n";
<< tv->get_name() << "_idx));\n";
}
}
}
......@@ -482,15 +479,15 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
set<string> output_names;
for (shared_ptr<Node> op : current_function->get_results())
{
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
output_names.insert(tv->get_tensor().get_name());
shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr();
output_names.insert(tv->get_name());
}
set<descriptor::TensorView*> constants;
for (shared_ptr<Node> node : m_function_ordered_ops.at(current_function))
{
if (dynamic_cast<ngraph::op::Constant*>(node.get()))
{
shared_ptr<descriptor::TensorView> tv = node->get_outputs()[0].get_tensor_view();
shared_ptr<descriptor::Tensor> tv = node->get_outputs()[0].get_tensor_ptr();
constants.insert(tv.get());
}
}
......@@ -513,12 +510,12 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
{
for (size_t i = 0; i < param->get_output_size(); ++i)
{
shared_ptr<descriptor::TensorView> tv = param->get_output_tensor_view(i);
shared_ptr<descriptor::Tensor> tv = param->get_output_tensor_ptr(i);
const element::Type& et = tv->get_element_type();
string type = et.c_type_string();
stringstream ss;
ss << "((" << type << "*)(inputs[" << arg_index << "]))";
m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
m_variable_name_map[tv->get_name()] = ss.str();
arg_index++;
}
}
......@@ -527,11 +524,11 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
for (size_t i = 0; i < current_function->get_output_size(); ++i)
{
shared_ptr<Node> op = current_function->get_output_op(i);
shared_ptr<descriptor::TensorView> tv = op->get_output_tensor_view();
shared_ptr<descriptor::Tensor> tv = op->get_output_tensor_ptr();
string type = tv->get_element_type().c_type_string();
stringstream ss;
ss << "((" << type << "*)(outputs[" << i << "]))";
m_variable_name_map[tv->get_tensor().get_name()] = ss.str();
m_variable_name_map[tv->get_name()] = ss.str();
auto res = dynamic_pointer_cast<ngraph::op::Result>(op);
//keep assigning different outputs to a result descriptor
......@@ -540,9 +537,9 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
auto input_node = res->get_inputs().at(0).get_output().get_node();
if (!input_node->is_constant() && !input_node->is_parameter())
{
shared_ptr<descriptor::TensorView> itv =
res->get_inputs().at(0).get_output().get_tensor_view();
m_variable_name_map[itv->get_tensor().get_name()] = ss.str();
shared_ptr<descriptor::Tensor> itv =
res->get_inputs().at(0).get_output().get_tensor_ptr();
m_variable_name_map[itv->get_name()] = ss.str();
}
}
......@@ -562,18 +559,16 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
in.push_back(GPU_TensorViewWrapper(
tv, m_variable_name_map[tv->get_tensor().get_name()]));
node_input_names.emplace_back(tv->get_tensor().get_name());
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
in.push_back(GPU_TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
node_input_names.emplace_back(tv->get_name());
}
vector<GPU_TensorViewWrapper> out;
for (const descriptor::Output& output : node->get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
out.push_back(GPU_TensorViewWrapper(
tv, m_variable_name_map[tv->get_tensor().get_name()]));
node_output_names.emplace_back(tv->get_tensor().get_name());
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
out.push_back(GPU_TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
node_output_names.emplace_back(tv->get_name());
}
// Emit function description comment
......@@ -646,7 +641,7 @@ void runtime::gpu::GPU_ExternalFunction::compile()
m_pass_manager.register_pass<ngraph::pass::LikeReplacement>();
m_pass_manager
.register_pass<ngraph::pass::AssignLayout<descriptor::layout::DenseTensorViewLayout>>();
.register_pass<ngraph::pass::AssignLayout<descriptor::layout::DenseTensorLayout>>();
m_pass_manager.register_pass<runtime::gpu::pass::GPULayout>(this);
m_pass_manager.register_pass<ngraph::pass::Liveness>();
......@@ -762,7 +757,7 @@ string runtime::gpu::GPU_ExternalFunction::emit_op_as_function(const Node& node,
for (const descriptor::Input& input : node.get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
GPU_TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)};
if (!contains(arg_names, tvw.get_name()))
{
......@@ -779,7 +774,7 @@ string runtime::gpu::GPU_ExternalFunction::emit_op_as_function(const Node& node,
vector<GPU_TensorViewWrapper> out;
for (const descriptor::Output& output : node.get_outputs())
{
shared_ptr<descriptor::TensorView> tv = output.get_tensor_view();
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
GPU_TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)};
if (arg_index++ > 0)
{
......
......@@ -18,7 +18,7 @@
#include <cuda_runtime.h>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/gpu/gpu_backend.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view.hpp"
#include "ngraph/runtime/gpu/gpu_util.hpp"
......@@ -33,8 +33,8 @@ runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& elemen
std::make_shared<ngraph::descriptor::TensorView>(element_type, shape, "external"))
, m_custom_memory(false)
{
m_descriptor->set_tensor_view_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
m_buffer_size = shape_size(shape) * element_type.size();
if (memory_pointer != nullptr)
......
......@@ -15,7 +15,7 @@
//*****************************************************************************
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/descriptor/tensor.hpp"
using namespace std;
......@@ -30,29 +30,29 @@ runtime::gpu::GPU_TensorViewWrapper::GPU_TensorViewWrapper(
size_t runtime::gpu::GPU_TensorViewWrapper::get_size() const
{
return m_tensor->get_tensor_view_layout()->get_size();
return m_tensor->get_tensor_layout()->get_size();
}
const Shape& runtime::gpu::GPU_TensorViewWrapper::get_shape() const
{
return m_tensor->get_tensor_view_layout()->get_shape();
return m_tensor->get_tensor_layout()->get_shape();
}
const Strides& runtime::gpu::GPU_TensorViewWrapper::get_strides() const
Strides runtime::gpu::GPU_TensorViewWrapper::get_strides() const
{
return m_tensor->get_tensor_view_layout()->get_strides();
return m_tensor->get_tensor_layout()->get_strides();
}
const element::Type& runtime::gpu::GPU_TensorViewWrapper::get_element_type() const
{
return m_tensor->get_tensor_view_layout()->get_element_type();
return m_tensor->get_tensor_layout()->get_element_type();
}
const std::string& runtime::gpu::GPU_TensorViewWrapper::get_name() const
{
if (m_alias.empty())
{
return m_tensor->get_tensor().get_name();
return m_tensor->get_name();
}
else
{
......
......@@ -40,7 +40,7 @@ public:
size_t get_size() const;
const Shape& get_shape() const;
const Strides& get_strides() const;
Strides get_strides() const;
const element::Type& get_element_type() const;
const std::string& get_name() const;
const std::string& get_type() const;
......
......@@ -17,7 +17,7 @@
#include <cstring>
#include <memory>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/host_tensor_view.hpp"
using namespace ngraph;
......@@ -33,10 +33,10 @@ runtime::HostTensorView::HostTensorView(const ngraph::element::Type& element_typ
, m_aligned_buffer_pool(nullptr)
{
m_descriptor->set_tensor_view_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
m_buffer_size = m_descriptor->get_tensor_layout()->get_size() * element_type.size();
if (memory_pointer != nullptr)
{
......@@ -102,10 +102,10 @@ void runtime::HostTensorView::read(void* target, size_t tensor_offset, size_t n)
size_t runtime::HostTensorView::get_size() const
{
return get_tensor_view_layout()->get_size();
return get_tensor_layout()->get_size();
}
const element::Type& runtime::HostTensorView::get_element_type() const
{
return get_tensor_view_layout()->get_element_type();
return get_tensor_layout()->get_element_type();
}
......@@ -234,7 +234,7 @@ bool runtime::intelgpu::IntelGPUBackend::compile(shared_ptr<Function> func)
{
arguments_check(op, 0, 1);
const string& element_name = op->get_output_tensor_view()->get_tensor().get_name();
const string& element_name = op->get_output_tensor_ptr()->get_name();
const cldnn::layout element_layout =
IntelGPULayout::create_cldnn_layout(op->get_element_type(), op->get_shape());
......
......@@ -24,7 +24,7 @@ using namespace ngraph;
runtime::intelgpu::IntelGPULayout::IntelGPULayout(const descriptor::TensorView& tv,
const cldnn::layout& layout)
: TensorViewLayout(tv)
: TensorLayout(tv)
, cldnn_layout(layout)
{
}
......@@ -35,16 +35,12 @@ size_t runtime::intelgpu::IntelGPULayout::get_index_offset(const vector<size_t>&
{
throw ngraph_error("Indices have incorrect rank");
}
size_t result = 0;
for (int i = 0; i < indices.size(); i++)
{
result += strides[i] + indices[i];
}
return result;
return inner_product(indices.cbegin(), indices.cend(), strides.cbegin(), 0);
}
bool runtime::intelgpu::IntelGPULayout::
operator==(const descriptor::layout::TensorViewLayout& other) const
operator==(const descriptor::layout::TensorLayout& other) const
{
const IntelGPULayout* p_other = dynamic_cast<const IntelGPULayout*>(&other);
if (!p_other)
......
......@@ -20,7 +20,7 @@
#include <CPP/layout.hpp>
#include <CPP/tensor.hpp>
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
namespace ngraph
{
......@@ -33,20 +33,16 @@ namespace ngraph
}
}
class ngraph::runtime::intelgpu::IntelGPULayout
: public ngraph::descriptor::layout::TensorViewLayout
class ngraph::runtime::intelgpu::IntelGPULayout : public ngraph::descriptor::layout::TensorLayout
{
public:
IntelGPULayout(const ngraph::descriptor::TensorView& tv, const cldnn::layout& layout);
~IntelGPULayout() override {}
size_t get_size() override { return cldnn_layout.get_linear_size(); }
size_t get_index_offset(const std::vector<size_t>& indices) override;
const Strides& get_strides() const override { return strides; }
bool operator==(const TensorViewLayout& other) const override;
Strides get_strides() const override { return strides; }
bool operator==(const TensorLayout& other) const override;
void set_cldnn_layout(const cldnn::layout& layout) { cldnn_layout = layout; }
cldnn::layout get_cldnn_layout() const { return cldnn_layout; }
static cldnn::data_types get_cldnn_type(const ngraph::element::Type& element_type);
static cldnn::layout create_cldnn_layout(const ngraph::element::Type& element_type,
const Shape& element_shape);
......
......@@ -18,7 +18,7 @@
#include <CPP/data.hpp>
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_tensor_view.hpp"
......@@ -33,7 +33,7 @@ runtime::intelgpu::IntelGPUTensorView::IntelGPUTensorView(const element::Type& e
{
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(element_type, shape);
m_descriptor->set_tensor_view_layout(
m_descriptor->set_tensor_layout(
std::make_shared<runtime::intelgpu::IntelGPULayout>(*m_descriptor, layout));
if (nullptr != memory_pointer)
......
......@@ -15,7 +15,7 @@
//*****************************************************************************
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/descriptor/layout/dense_tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/except.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/select.hpp"
......@@ -29,7 +29,7 @@
using namespace std;
using namespace ngraph;
using descriptor::layout::DenseTensorViewLayout;
using descriptor::layout::DenseTensorLayout;
extern "C" const char* get_ngraph_version_string()
{
......@@ -66,7 +66,7 @@ bool runtime::interpreter::INTBackend::compile(shared_ptr<Function> function)
instance.m_is_compiled = true;
pass::Manager pass_manager;
pass_manager.register_pass<pass::LikeReplacement>();
pass_manager.register_pass<pass::AssignLayout<DenseTensorViewLayout>>();
pass_manager.register_pass<pass::AssignLayout<DenseTensorLayout>>();
pass_manager.register_pass<pass::Liveness>();
pass_manager.run_passes(function);
......@@ -113,7 +113,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
{
for (size_t i = 0; i < param->get_output_size(); ++i)
{
descriptor::TensorView* tv = param->get_output_tensor_view(i).get();
descriptor::Tensor* tv = param->get_output_tensor_ptr(i).get();
tensor_map.insert({tv, func_inputs[input_count++]});
}
}
......@@ -126,7 +126,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
{
throw ngraph_error("One of function's outputs isn't op::Result");
}
descriptor::TensorView* tv = output->get_output_tensor_view(0).get();
descriptor::TensorView* tv = output->get_output_tensor_ptr(0).get();
tensor_map.insert({tv, func_outputs[output_count]});
}
......@@ -143,7 +143,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
vector<shared_ptr<runtime::HostTensorView>> op_inputs;
for (const descriptor::Input& input : op->get_inputs())
{
descriptor::TensorView* tv = input.get_output().get_tensor_view().get();
descriptor::TensorView* tv = input.get_output().get_tensor_ptr().get();
op_inputs.push_back(tensor_map.at(tv));
}
......@@ -151,7 +151,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
vector<shared_ptr<runtime::HostTensorView>> op_outputs;
for (size_t i = 0; i < op->get_output_size(); ++i)
{
descriptor::TensorView* tv = op->get_output_tensor_view(i).get();
descriptor::TensorView* tv = op->get_output_tensor_ptr(i).get();
shared_ptr<runtime::HostTensorView> htv;
if (!contains_key(tensor_map, tv))
{
......@@ -173,9 +173,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
element::Type type;
switch (type_id)
{
case OP_TYPEID::Convert:
type = op->get_inputs().at(0).get_tensor().get_element_type();
break;
case OP_TYPEID::Convert: type = op->get_input_element_type(0); break;
case OP_TYPEID::Equal:
case OP_TYPEID::Greater:
case OP_TYPEID::GreaterEq:
......@@ -185,7 +183,7 @@ bool runtime::interpreter::INTBackend::call(shared_ptr<Function> function,
// Get the type of the second input, not the first
// All BinaryElementwiseComparision ops have the same type for inputs
// Select has bool for first input and the type we are interested in for the second
type = op->get_inputs().at(1).get_tensor().get_element_type();
type = op->get_input_element_type(1);
break;
default: type = op->get_outputs().at(0).get_element_type(); break;
}
......@@ -312,7 +310,7 @@ void runtime::interpreter::INTBackend::perform_nan_check(
size_t arg_number = 1;
for (shared_ptr<HostTensorView> tv : tvs)
{
const element::Type& type = tv->get_tensor().get_element_type();
const element::Type& type = tv->get_element_type();
if (type == element::f32)
{
const float* data = tv->get_data_ptr<float>();
......
......@@ -15,18 +15,18 @@
//*****************************************************************************
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/type/element_type.hpp"
using namespace ngraph;
using namespace std;
shared_ptr<const descriptor::TensorView> runtime::TensorView::get_tensor_view_descriptor() const
shared_ptr<const descriptor::TensorView> runtime::TensorView::get_descriptor() const
{
return m_descriptor;
}
shared_ptr<descriptor::TensorView> runtime::TensorView::get_descriptor() const
shared_ptr<descriptor::TensorView> runtime::TensorView::get_descriptor()
{
return m_descriptor;
}
......@@ -36,14 +36,14 @@ const Shape& runtime::TensorView::get_shape() const
return m_descriptor->get_shape();
}
const Strides& runtime::TensorView::get_strides() const
Strides runtime::TensorView::get_strides() const
{
return m_descriptor->get_tensor_view_layout()->get_strides();
return m_descriptor->get_tensor_layout()->get_strides();
}
shared_ptr<descriptor::layout::TensorViewLayout> runtime::TensorView::get_tensor_view_layout() const
shared_ptr<descriptor::layout::TensorLayout> runtime::TensorView::get_tensor_layout() const
{
return m_descriptor->get_tensor_view_layout();
return m_descriptor->get_tensor_layout();
}
size_t runtime::TensorView::get_element_count() const
......@@ -58,5 +58,5 @@ size_t runtime::TensorView::get_element_count() const
const descriptor::Tensor& runtime::TensorView::get_tensor() const
{
return get_tensor_view_descriptor()->get_tensor();
return *get_descriptor();
}
......@@ -46,17 +46,16 @@ namespace ngraph
virtual ~TensorView() {}
TensorView& operator=(const TensorView&) = default;
std::shared_ptr<const ngraph::descriptor::Tensor> get_tensor_view_descriptor() const;
virtual std::shared_ptr<const ngraph::descriptor::Tensor> get_descriptor() const;
virtual std::shared_ptr<descriptor::TensorView> get_descriptor() const;
virtual std::shared_ptr<descriptor::Tensor> get_descriptor();
const ngraph::Shape& get_shape() const;
const ngraph::Strides& get_strides() const;
ngraph::Strides get_strides() const;
size_t get_element_count() const;
const ngraph::descriptor::Tensor& get_tensor() const;
std::shared_ptr<ngraph::descriptor::layout::TensorViewLayout>
get_tensor_view_layout() const;
std::shared_ptr<ngraph::descriptor::layout::TensorLayout> get_tensor_layout() const;
bool get_stale() { return m_stale; }
void set_stale(bool val) { m_stale = val; }
......
......@@ -1118,7 +1118,7 @@ TEST(cpu_fusion, weight_fusion)
auto reshape_conv =
std::make_shared<ngraph::op::Reshape>(param, AxisVector{0}, Shape{16, 4, 1, 1});
auto data_conv = std::make_shared<op::Parameter>(element::f32, Shape{16, 4, 7, 7});
auto tvt = reshape_conv->get_outputs().at(0).get_tensor_view().get();
auto tvt = reshape_conv->get_outputs().at(0).get_tensor_ptr().get();
auto lt_desc = std::make_shared<runtime::cpu::LayoutDescriptor>(*tvt);
auto cvt_lt_conv = std::make_shared<runtime::cpu::op::ConvertLayout>(reshape_conv, lt_desc);
auto conv = std::make_shared<ngraph::op::Convolution>(
......@@ -1127,7 +1127,7 @@ TEST(cpu_fusion, weight_fusion)
auto reshape_conv_bprop =
std::make_shared<op::Reshape>(param, AxisVector{0}, Shape{16, 4, 1, 1});
auto dummy_arg_conv_bprop = std::make_shared<op::Parameter>(element::f32, Shape{1, 16, 7, 7});
auto tvt_bprop = reshape_conv_bprop->get_outputs().at(0).get_tensor_view().get();
auto tvt_bprop = reshape_conv_bprop->get_outputs().at(0).get_tensor_ptr().get();
auto lt_desc_bprop = std::make_shared<runtime::cpu::LayoutDescriptor>(*tvt_bprop);
auto cvt_lt_conv_bprop =
std::make_shared<runtime::cpu::op::ConvertLayout>(reshape_conv_bprop, lt_desc_bprop);
......
......@@ -65,7 +65,7 @@ namespace ngraph
T atol = 1e-8f)
{
// Check that the layouts are compatible
if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout())
if (*a->get_tensor_layout() != *b->get_tensor_layout())
{
throw ngraph_error("Cannot compare tensors with different layouts");
}
......
......@@ -85,7 +85,7 @@ bool test::all_close_f(const std::shared_ptr<runtime::TensorView>& a,
int tolerance_bits)
{
// Check that the layouts are compatible
if (*a->get_tensor_view_layout() != *b->get_tensor_view_layout())
if (*a->get_tensor_layout() != *b->get_tensor_layout())
{
throw ngraph_error("Cannot compare tensors with different layouts");
}
......
......@@ -26,7 +26,7 @@ using namespace ngraph;
vector<float> read_float_vector(shared_ptr<runtime::TensorView> tv)
{
vector<float> float_vec;
element::Type element_type = tv->get_tensor_view_layout()->get_element_type();
element::Type element_type = tv->get_tensor_layout()->get_element_type();
if (element_type == element::boolean)
{
......
......@@ -20,7 +20,7 @@
#include <list>
#include <memory>
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/backend.hpp"
......@@ -46,7 +46,7 @@ void copy_data(std::shared_ptr<ngraph::runtime::TensorView> tv, const std::vecto
template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::TensorView> tv)
{
if (ngraph::element::from<T>() != tv->get_tensor_view_layout()->get_element_type())
if (ngraph::element::from<T>() != tv->get_tensor_layout()->get_element_type())
{
throw std::invalid_argument("read_vector type must match TensorView type");
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment