Unverified Commit c6d1af4f authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Remove collect_tensor_views and clean up CallFrames (#866)

* remove tensor_call from backends

* remove obsolete methods
parent aadc9ce4
...@@ -73,12 +73,6 @@ namespace ngraph ...@@ -73,12 +73,6 @@ namespace ngraph
m_tensor_view_layout = tensor_view_layout; m_tensor_view_layout = tensor_view_layout;
} }
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<TensorView>& value) const
{
views.push_back(value);
}
protected: protected:
std::shared_ptr<const TensorViewType> m_tensor_view_type; std::shared_ptr<const TensorViewType> m_tensor_view_type;
std::shared_ptr<layout::TensorViewLayout> m_tensor_view_layout; std::shared_ptr<layout::TensorViewLayout> m_tensor_view_layout;
......
...@@ -37,9 +37,9 @@ runtime::cpu::CPU_CallFrame::~CPU_CallFrame() ...@@ -37,9 +37,9 @@ runtime::cpu::CPU_CallFrame::~CPU_CallFrame()
cleanup_runtime_context(); cleanup_runtime_context();
} }
void runtime::cpu::CPU_CallFrame::tensor_call( void runtime::cpu::CPU_CallFrame::call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& output_tvs, const std::vector<std::shared_ptr<runtime::TensorView>>& output_tvs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& input_tvs) const std::vector<std::shared_ptr<runtime::TensorView>>& input_tvs)
{ {
vector<void*> inputs; vector<void*> inputs;
vector<void*> outputs; vector<void*> outputs;
...@@ -71,26 +71,6 @@ void runtime::cpu::CPU_CallFrame::tensor_call( ...@@ -71,26 +71,6 @@ void runtime::cpu::CPU_CallFrame::tensor_call(
} }
} }
void runtime::cpu::CPU_CallFrame::call(
const std::vector<std::shared_ptr<runtime::TensorView>>& results,
const std::vector<std::shared_ptr<runtime::TensorView>>& arguments)
{
// TODO: Check types of args and result
vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<runtime::TensorView> argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<runtime::TensorView> result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(outputs, inputs);
}
void runtime::cpu::CPU_CallFrame::propagate_layouts( void runtime::cpu::CPU_CallFrame::propagate_layouts(
const std::vector<std::shared_ptr<runtime::TensorView>>& tvs, const std::vector<std::shared_ptr<runtime::TensorView>>& tvs,
const LayoutDescriptorPtrs& layouts) const const LayoutDescriptorPtrs& layouts) const
......
...@@ -52,11 +52,6 @@ namespace ngraph ...@@ -52,11 +52,6 @@ namespace ngraph
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs); const std::vector<std::shared_ptr<runtime::TensorView>>& inputs);
/// @brief Invoke the function with tuples pre-expanded to their underlying
/// tensor views.
void tensor_call(const std::vector<std::shared_ptr<TensorView>>& outputs,
const std::vector<std::shared_ptr<TensorView>>& inputs);
void propagate_layouts(const std::vector<std::shared_ptr<runtime::TensorView>>& tvs, void propagate_layouts(const std::vector<std::shared_ptr<runtime::TensorView>>& tvs,
const LayoutDescriptorPtrs& layouts) const; const LayoutDescriptorPtrs& layouts) const;
......
...@@ -42,9 +42,9 @@ runtime::gpu::GPU_CallFrame::~GPU_CallFrame() ...@@ -42,9 +42,9 @@ runtime::gpu::GPU_CallFrame::~GPU_CallFrame()
cleanup_runtime_context(); cleanup_runtime_context();
} }
void runtime::gpu::GPU_CallFrame::tensor_call( void runtime::gpu::GPU_CallFrame::call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& output_tvs, const std::vector<std::shared_ptr<runtime::TensorView>>& output_tvs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& input_tvs) const std::vector<std::shared_ptr<runtime::TensorView>>& input_tvs)
{ {
//Device tensors //Device tensors
vector<void*> inputs; vector<void*> inputs;
...@@ -66,26 +66,6 @@ void runtime::gpu::GPU_CallFrame::tensor_call( ...@@ -66,26 +66,6 @@ void runtime::gpu::GPU_CallFrame::tensor_call(
m_compiled_function(inputs.data(), outputs.data(), m_external_function->m_ctx.get()); m_compiled_function(inputs.data(), outputs.data(), m_external_function->m_ctx.get());
} }
void runtime::gpu::GPU_CallFrame::call(
const std::vector<std::shared_ptr<runtime::TensorView>>& results,
const std::vector<std::shared_ptr<runtime::TensorView>>& arguments)
{
// TODO: Check types of args and result
vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<runtime::TensorView> argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<runtime::TensorView> result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(outputs, inputs);
}
void runtime::gpu::GPU_CallFrame::setup_runtime_context() void runtime::gpu::GPU_CallFrame::setup_runtime_context()
{ {
cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle); cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle);
......
...@@ -55,11 +55,6 @@ namespace ngraph ...@@ -55,11 +55,6 @@ namespace ngraph
void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs, void call(const std::vector<std::shared_ptr<runtime::TensorView>>& outputs,
const std::vector<std::shared_ptr<runtime::TensorView>>& inputs); const std::vector<std::shared_ptr<runtime::TensorView>>& inputs);
/// @brief Invoke the function with tuples pre-expanded to their underlying
/// tensor views.
void tensor_call(const std::vector<std::shared_ptr<TensorView>>& outputs,
const std::vector<std::shared_ptr<TensorView>>& inputs);
void setup_runtime_context(); void setup_runtime_context();
void cleanup_runtime_context(); void cleanup_runtime_context();
......
...@@ -208,14 +208,7 @@ void runtime::interpreter::INT_CallFrame::generate_calls( ...@@ -208,14 +208,7 @@ void runtime::interpreter::INT_CallFrame::generate_calls(
} }
} }
void runtime::interpreter::INT_CallFrame::tensor_call( void runtime::interpreter::INT_CallFrame::call(
const vector<shared_ptr<runtime::HostTensorView>>& output_tvs,
const vector<shared_ptr<runtime::HostTensorView>>& input_tvs)
{
call(m_function, output_tvs, input_tvs);
}
void runtime::interpreter::INT_CallFrame::tensor_call(
const vector<shared_ptr<runtime::TensorView>>& output_tvs, const vector<shared_ptr<runtime::TensorView>>& output_tvs,
const vector<shared_ptr<runtime::TensorView>>& input_tvs) const vector<shared_ptr<runtime::TensorView>>& input_tvs)
{ {
...@@ -229,26 +222,7 @@ void runtime::interpreter::INT_CallFrame::tensor_call( ...@@ -229,26 +222,7 @@ void runtime::interpreter::INT_CallFrame::tensor_call(
{ {
out.push_back(static_pointer_cast<runtime::HostTensorView>(tv)); out.push_back(static_pointer_cast<runtime::HostTensorView>(tv));
} }
tensor_call(out, args); call(m_function, out, args);
}
void runtime::interpreter::INT_CallFrame::call(
const vector<shared_ptr<runtime::TensorView>>& results,
const vector<shared_ptr<runtime::TensorView>>& arguments)
{
vector<shared_ptr<runtime::TensorView>> inputs;
for (shared_ptr<runtime::TensorView> argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
vector<shared_ptr<runtime::TensorView>> outputs;
for (shared_ptr<runtime::TensorView> result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(outputs, inputs);
} }
vector<runtime::PerformanceCounter> vector<runtime::PerformanceCounter>
......
...@@ -140,12 +140,6 @@ public: ...@@ -140,12 +140,6 @@ public:
void set_nan_check(bool); void set_nan_check(bool);
private: private:
/// @brief Invoke the function with tuples pre-expanded to their underlying
/// tensor views.
void tensor_call(const std::vector<std::shared_ptr<TensorView>>& outputs,
const std::vector<std::shared_ptr<TensorView>>& inputs);
void tensor_call(const std::vector<std::shared_ptr<HostTensorView>>& outputs,
const std::vector<std::shared_ptr<HostTensorView>>& inputs);
void call(std::shared_ptr<Function> function, void call(std::shared_ptr<Function> function,
const std::vector<std::shared_ptr<runtime::HostTensorView>>& output_tvs, const std::vector<std::shared_ptr<runtime::HostTensorView>>& output_tvs,
const std::vector<std::shared_ptr<runtime::HostTensorView>>& input_tvs); const std::vector<std::shared_ptr<runtime::HostTensorView>>& input_tvs);
......
...@@ -32,12 +32,6 @@ shared_ptr<descriptor::TensorView> runtime::TensorView::get_descriptor() const ...@@ -32,12 +32,6 @@ shared_ptr<descriptor::TensorView> runtime::TensorView::get_descriptor() const
return m_descriptor; return m_descriptor;
} }
void runtime::TensorView::collect_tensor_views(vector<shared_ptr<TensorView>>& views,
const shared_ptr<TensorView>& value) const
{
views.push_back(value);
}
const Shape& runtime::TensorView::get_shape() const const Shape& runtime::TensorView::get_shape() const
{ {
return m_descriptor->get_tensor_view_type()->get_shape(); return m_descriptor->get_tensor_view_type()->get_shape();
......
...@@ -50,9 +50,6 @@ namespace ngraph ...@@ -50,9 +50,6 @@ namespace ngraph
virtual std::shared_ptr<descriptor::TensorView> get_descriptor() const; virtual std::shared_ptr<descriptor::TensorView> get_descriptor() const;
virtual void collect_tensor_views(std::vector<std::shared_ptr<TensorView>>& views,
const std::shared_ptr<TensorView>& value) const;
const ngraph::Shape& get_shape() const; const ngraph::Shape& get_shape() const;
const ngraph::Strides& get_strides() const; const ngraph::Strides& get_strides() const;
size_t get_element_count() const; size_t get_element_count() const;
......
...@@ -46,12 +46,6 @@ bool TensorViewType::operator==(const TensorViewType& that) const ...@@ -46,12 +46,6 @@ bool TensorViewType::operator==(const TensorViewType& that) const
return rc; return rc;
} }
void TensorViewType::collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const
{
views.push_back(shared_from_this());
}
std::ostream& ngraph::operator<<(std::ostream& out, const TensorViewType& obj) std::ostream& ngraph::operator<<(std::ostream& out, const TensorViewType& obj)
{ {
out << "TensorViewType(" << obj.m_element_type << ", {" << join(obj.m_shape) << "})"; out << "TensorViewType(" << obj.m_element_type << ", {" << join(obj.m_shape) << "})";
......
...@@ -44,7 +44,6 @@ namespace ngraph ...@@ -44,7 +44,6 @@ namespace ngraph
const Shape& get_shape() const { return m_shape; } const Shape& get_shape() const { return m_shape; }
bool operator==(const TensorViewType& that) const; bool operator==(const TensorViewType& that) const;
bool operator!=(const TensorViewType& that) const; bool operator!=(const TensorViewType& that) const;
void collect_tensor_views(std::vector<std::shared_ptr<const TensorViewType>>& views) const;
friend std::ostream& operator<<(std::ostream&, const TensorViewType&); friend std::ostream& operator<<(std::ostream&, const TensorViewType&);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment