Commit eb43fcc5 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Change hybrid to static library (#3051)

* wip

* hybrid as a static backend and not part of ngraph

* only for linux

* fix link problem

* style

* remove hybrid

* fix compile error
parent 9ba4a78a
......@@ -464,28 +464,6 @@ set (SRC
validation_util.hpp
)
set(SRC ${SRC}
runtime/hybrid/hybrid_backend.cpp
runtime/hybrid/hybrid_backend.hpp
runtime/hybrid/hybrid_executable.cpp
runtime/hybrid/hybrid_executable.hpp
runtime/hybrid/hybrid_tensor.cpp
runtime/hybrid/hybrid_util.cpp
runtime/hybrid/hybrid_util.hpp
runtime/hybrid/op/function_call.cpp
runtime/hybrid/op/function_call.hpp
runtime/hybrid/pass/default_placement.cpp
runtime/hybrid/pass/default_placement.hpp
runtime/hybrid/pass/dump.cpp
runtime/hybrid/pass/dump.hpp
runtime/hybrid/pass/fix_get_output_element.cpp
runtime/hybrid/pass/fix_get_output_element.hpp
runtime/hybrid/pass/liveness.cpp
runtime/hybrid/pass/liveness.hpp
runtime/hybrid/pass/memory_layout.cpp
runtime/hybrid/pass/memory_layout.hpp
)
set(SRC ${SRC}
runtime/dynamic/dynamic_backend.cpp
runtime/dynamic/dynamic_backend.hpp
......
......@@ -28,7 +28,6 @@
#include "ngraph/runtime/gpu/gpu_primitive_emitter.hpp"
#include "ngraph/runtime/gpu/gpu_tensor.hpp"
#include "ngraph/runtime/gpu/gpu_util.hpp"
#include "ngraph/runtime/hybrid/hybrid_backend.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/hybrid/hybrid_backend.hpp"
#include "ngraph/runtime/hybrid/hybrid_executable.hpp"
#include "ngraph/runtime/hybrid/hybrid_tensor.hpp"
using namespace ngraph;
using namespace std;
runtime::hybrid::HybridBackend::HybridBackend(
const std::vector<std::shared_ptr<runtime::Backend>>& backend_list)
: m_backend_list{backend_list}
{
}
shared_ptr<runtime::Tensor>
runtime::hybrid::HybridBackend::create_tensor(const element::Type& element_type,
const Shape& shape)
{
return m_backend_list[0]->create_tensor(element_type, shape);
}
shared_ptr<runtime::Tensor> runtime::hybrid::HybridBackend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer)
{
return m_backend_list[0]->create_tensor(element_type, shape, memory_pointer);
}
shared_ptr<runtime::Executable>
runtime::hybrid::HybridBackend::compile(shared_ptr<Function> func,
bool enable_performance_collection)
{
return make_shared<HybridExecutable>(
m_backend_list, func, enable_performance_collection, m_debug_enabled);
}
bool runtime::hybrid::HybridBackend::is_supported(const Node& node) const
{
return true;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/runtime/backend.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
class HybridBackend;
}
}
}
class ngraph::runtime::hybrid::HybridBackend : public ngraph::runtime::Backend
{
public:
HybridBackend(const std::vector<std::shared_ptr<runtime::Backend>>& backend_list);
std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type,
const ngraph::Shape& shape) override;
std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type,
const ngraph::Shape& shape,
void* memory_pointer) override;
std::shared_ptr<Executable> compile(std::shared_ptr<ngraph::Function> func,
bool enable_performance_data = false) override;
bool is_supported(const ngraph::Node& node) const override;
void set_debug_enabled(bool flag) { m_debug_enabled = flag; }
protected:
std::vector<std::shared_ptr<runtime::Backend>> m_backend_list;
bool m_debug_enabled = false;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/hybrid/hybrid_executable.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/hybrid/hybrid_backend.hpp"
#include "ngraph/runtime/hybrid/hybrid_util.hpp"
#include "ngraph/runtime/hybrid/pass/default_placement.hpp"
#include "ngraph/runtime/hybrid/pass/dump.hpp"
#include "ngraph/runtime/hybrid/pass/fix_get_output_element.hpp"
#include "ngraph/runtime/hybrid/pass/liveness.hpp"
#include "ngraph/runtime/hybrid/pass/memory_layout.hpp"
#include "ngraph/runtime/tensor.hpp"
using namespace ngraph;
using namespace std;
runtime::hybrid::HybridExecutable::HybridExecutable(
const std::vector<std::shared_ptr<runtime::Backend>>& backend_list,
const shared_ptr<Function>& func,
bool enable_performance_collection,
bool debug_enabled)
: m_function{clone_function(*func)}
, m_backend_list{backend_list}
, m_debug_enabled{debug_enabled}
{
if (backend_list.size() == 0)
{
throw runtime_error("Hybrid Executable constructed with zero-sized backend list");
}
// Run placement pass
ngraph::pass::Manager pass_manager;
configure_passes(pass_manager);
pass_manager.run_passes(m_function);
runtime::hybrid::rewrite_function(m_function, m_backend_list);
m_executable = backend_list[0]->compile(m_function);
set_parameters_and_results(*func);
}
void runtime::hybrid::HybridExecutable::configure_passes(ngraph::pass::Manager& pass_manager)
{
pass_manager.register_pass<runtime::hybrid::pass::DefaultPlacement>(m_backend_list);
pass_manager.register_pass<runtime::hybrid::pass::FixGetOutputElement>();
pass_manager.register_pass<runtime::hybrid::pass::Liveness>();
pass_manager.register_pass<runtime::hybrid::pass::Dump>("graph.dump");
if (m_debug_enabled)
{
pass_manager.register_pass<ngraph::pass::VisualizeTree>("graph.dot", node_modifiers);
}
}
bool runtime::hybrid::HybridExecutable::call(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
{
bool rc = true;
using node_map_t = unordered_map<shared_ptr<Node>, shared_ptr<runtime::Tensor>>;
// Parameter and result node in m_function maps to one Tensor
node_map_t map_node_to_tensor;
for (size_t i = 0; i < inputs.size(); ++i)
{
map_node_to_tensor[m_function->get_parameters()[i]] = inputs[i];
}
for (size_t i = 0; i < outputs.size(); ++i)
{
map_node_to_tensor[m_function->get_results()[i]] = outputs[i];
}
// Init backend
size_t placement = m_function->get_placement();
auto backend = m_backend_list[placement];
// Prepare parameter Tensors
vector<shared_ptr<runtime::Tensor>> parameters;
for (const shared_ptr<op::Parameter>& parameter_node : m_function->get_parameters())
{
auto it = map_node_to_tensor.find(parameter_node);
if (it != map_node_to_tensor.end())
{
parameters.push_back(it->second);
}
else
{
throw runtime_error("Parameter temp not found in hybrid cache");
}
}
// Prepare result Tensors
vector<shared_ptr<runtime::Tensor>> results;
map<runtime::Tensor*, runtime::Tensor*> copy_back;
for (const shared_ptr<op::Result>& result_node : m_function->get_results())
{
auto it = map_node_to_tensor.find(result_node);
if (it != map_node_to_tensor.end())
{
results.push_back(it->second);
}
else
{
throw runtime_error("Result temp not found in hybrid cache");
}
}
m_executable->call(results, parameters);
// Need to copy any results to the correct device
for (const auto& p : copy_back)
{
p.second->copy_from(*p.first);
}
return rc;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/pass/manager.hpp"
#include "ngraph/runtime/executable.hpp"
namespace ngraph
{
namespace runtime
{
class Backend;
namespace hybrid
{
class HybridExecutable;
}
} // namespace runtime
} // namespace ngraph
class ngraph::runtime::hybrid::HybridExecutable : public runtime::Executable
{
public:
HybridExecutable(const std::vector<std::shared_ptr<runtime::Backend>>& backend_list,
const std::shared_ptr<Function>& func,
bool enable_performance_collection = false,
bool debug_enabled = false);
bool call(const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& inputs) override;
template <typename T>
std::shared_ptr<T> get_as() const
{
if (auto exec = std::dynamic_pointer_cast<T>(m_executable))
{
return exec;
}
else
{
throw ngraph::ngraph_error("Requested invalid ngraph::Executable subclass");
}
}
/// Allow overriding the configuration of the pass manager. If you overload this method
/// you must define all passes.
virtual void configure_passes(ngraph::pass::Manager& pass_manager);
protected:
std::shared_ptr<ngraph::Function> m_function;
std::shared_ptr<Executable> m_executable;
std::unordered_map<std::shared_ptr<ngraph::op::Parameter>, std::shared_ptr<ngraph::op::Result>>
m_map_parameter_to_result;
std::vector<std::shared_ptr<runtime::Backend>> m_backend_list;
bool m_debug_enabled = false;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstring>
#include <memory>
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/hybrid/hybrid_tensor.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
using namespace std;
static const size_t alignment = 64;
runtime::HybridTensor::HybridTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, ""))
, m_allocated_buffer_pool(nullptr)
, m_aligned_buffer_pool(nullptr)
{
m_descriptor->set_tensor_layout(
std::make_shared<ngraph::descriptor::layout::DenseTensorLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_layout()->get_size() * element_type.size();
if (memory_pointer != nullptr)
{
m_aligned_buffer_pool = static_cast<char*>(memory_pointer);
}
else if (m_buffer_size > 0)
{
size_t allocation_size = m_buffer_size + alignment;
m_allocated_buffer_pool = static_cast<char*>(ngraph_malloc(allocation_size));
m_aligned_buffer_pool = m_allocated_buffer_pool;
size_t mod = size_t(m_aligned_buffer_pool) % alignment;
if (mod != 0)
{
m_aligned_buffer_pool += (alignment - mod);
}
}
}
runtime::HybridTensor::HybridTensor(const ngraph::element::Type& element_type, const Shape& shape)
: HybridTensor(element_type, shape, nullptr)
{
}
runtime::HybridTensor::~HybridTensor()
{
if (m_allocated_buffer_pool != nullptr)
{
ngraph_free(m_allocated_buffer_pool);
}
}
char* runtime::HybridTensor::get_data_ptr()
{
return m_aligned_buffer_pool;
}
const char* runtime::HybridTensor::get_data_ptr() const
{
return m_aligned_buffer_pool;
}
void runtime::HybridTensor::write(const void* source, size_t n)
{
if (n > m_buffer_size)
{
throw out_of_range("write access past end of tensor");
}
char* target = get_data_ptr();
memcpy(target, source, n);
}
void runtime::HybridTensor::read(void* target, size_t n) const
{
if (n > m_buffer_size)
{
throw out_of_range("read access past end of tensor");
}
const char* source = get_data_ptr();
memcpy(target, source, n);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/type/element_type.hpp"
namespace ngraph
{
namespace runtime
{
class HybridTensor;
}
}
class ngraph::runtime::HybridTensor : public ngraph::runtime::Tensor
{
public:
HybridTensor(const ngraph::element::Type& element_type, const Shape& shape);
HybridTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer);
virtual ~HybridTensor() override;
char* get_data_ptr();
const char* get_data_ptr() const;
template <typename T>
T* get_data_ptr()
{
return reinterpret_cast<T*>(get_data_ptr());
}
template <typename T>
const T* get_data_ptr() const
{
return reinterpret_cast<T*>(get_data_ptr());
}
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
/// \param n Number of bytes to write, must be integral number of elements.
void write(const void* p, size_t n) override;
/// \brief Read bytes directly from the tensor
/// \param p Pointer to destination for data
/// \param n Number of bytes to read, must be integral number of elements.
void read(void* p, size_t n) const override;
protected:
HybridTensor(const HybridTensor&) = delete;
HybridTensor(HybridTensor&&) = delete;
HybridTensor& operator=(const HybridTensor&) = delete;
char* m_allocated_buffer_pool;
char* m_aligned_buffer_pool;
size_t m_buffer_size;
};
This diff is collapsed.
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include <unordered_map>
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/result.hpp"
namespace ngraph
{
namespace runtime
{
class Backend;
namespace hybrid
{
void rewrite_function(
const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::Backend>>& backend_list);
void node_modifiers(const Node& node, std::vector<std::string>& attributes);
}
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "function_call.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/runtime/backend.hpp"
using namespace std;
using namespace ngraph;
runtime::hybrid::op::FunctionCall::FunctionCall(const NodeVector& outputs,
const NodeVector& inputs,
const Function& function,
shared_ptr<Backend> backend)
: Op("FunctionCall", inputs)
, m_function_outputs{outputs}
, m_function{ngraph::clone_function(function)}
, m_backend{backend}
, m_executable{backend->compile(m_function)}
{
set_output_size(outputs.size());
for (size_t i = 0; i < outputs.size(); i++)
{
set_output_type(i, outputs[i]->get_element_type(), outputs[i]->get_output_shape(0));
}
}
shared_ptr<Node>
runtime::hybrid::op::FunctionCall::copy_with_new_args(const NodeVector& new_args) const
{
return make_shared<FunctionCall>(m_function_outputs, new_args, *m_function, m_backend);
}
shared_ptr<runtime::Backend> runtime::hybrid::op::FunctionCall::get_backend() const
{
return m_backend;
}
shared_ptr<runtime::Executable> runtime::hybrid::op::FunctionCall::get_executable() const
{
return m_executable;
}
shared_ptr<Function> runtime::hybrid::op::FunctionCall::get_function() const
{
return m_function;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
#include "ngraph/runtime/backend.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
namespace op
{
class FunctionCall;
}
}
}
}
class ngraph::runtime::hybrid::op::FunctionCall : public ngraph::op::Op
{
public:
FunctionCall(const NodeVector& outputs,
const NodeVector& inputs,
const Function& function,
std::shared_ptr<Backend> backend);
std::shared_ptr<Backend> get_backend() const;
std::shared_ptr<Executable> get_executable() const;
std::shared_ptr<Function> get_function() const;
private:
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
const NodeVector m_function_outputs;
std::shared_ptr<Function> m_function;
std::shared_ptr<Backend> m_backend;
std::shared_ptr<Executable> m_executable;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
NGRAPH_OP(FunctionCall, ngraph::runtime::hybrid::op)
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/hybrid/pass/default_placement.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/placement.hpp"
#include "ngraph/runtime/backend.hpp"
using namespace ngraph;
using namespace std;
runtime::hybrid::pass::DefaultPlacement::DefaultPlacement(
const vector<shared_ptr<runtime::Backend>>& placement_backends)
: m_placement_backends(placement_backends)
{
}
bool runtime::hybrid::pass::DefaultPlacement::run_on_node(shared_ptr<Node> node)
{
size_t backend_index = 0;
for (auto backend : m_placement_backends)
{
if (backend->is_supported(*node))
{
node->set_placement_index(backend_index);
return false;
}
backend_index++;
}
throw runtime_error("Node " + node->get_name() + " not supported by any backend");
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <exception>
#include <functional>
#include <sstream>
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
namespace pass
{
class DefaultPlacement;
}
}
}
}
class ngraph::runtime::hybrid::pass::DefaultPlacement : public ngraph::pass::NodePass
{
public:
DefaultPlacement(
const std::vector<std::shared_ptr<ngraph::runtime::Backend>>& placement_backends);
private:
bool run_on_node(std::shared_ptr<Node> node) override;
std::vector<std::shared_ptr<ngraph::runtime::Backend>> m_placement_backends;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <fstream>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/runtime/hybrid/pass/dump.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
runtime::hybrid::pass::Dump::Dump(const string& output_file)
: m_output_file{output_file}
{
}
bool runtime::hybrid::pass::Dump::run_on_module(vector<shared_ptr<Function>>& functions)
{
ofstream out{m_output_file};
if (out)
{
for (shared_ptr<Function> f : functions)
{
out << "=====================================================================\n";
out << f->get_name() << " start\n";
out << "=====================================================================\n";
for (const shared_ptr<Node>& node : f->get_ordered_ops())
{
out << node->get_name() << "(";
vector<string> inputs;
for (const Input<Node>& input : node->inputs())
{
inputs.push_back(input.get_tensor().get_name());
}
out << join(inputs);
out << ") -> ";
vector<string> outputs;
for (const Output<Node>& output : node->outputs())
{
outputs.push_back(output.get_tensor().get_name());
}
out << join(outputs);
out << "\n";
out << " " << node->get_placement_index() << " Placement\n";
for (const descriptor::Tensor* tensor : node->liveness_new_list)
{
out << " N " << tensor->get_name() << "\n";
}
for (const descriptor::Tensor* tensor : node->liveness_free_list)
{
out << " F " << tensor->get_name() << "\n";
}
}
out << "=====================================================================\n";
out << f->get_name() << " end\n";
out << "=====================================================================\n";
}
}
return false;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <string>
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
namespace pass
{
class Dump;
}
}
}
}
class ngraph::runtime::hybrid::pass::Dump : public ngraph::pass::ModulePass
{
public:
Dump(const std::string& output_file);
virtual bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override;
private:
const std::string m_output_file;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/hybrid/pass/fix_get_output_element.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/placement.hpp"
#include "ngraph/runtime/backend.hpp"
using namespace ngraph;
using namespace std;
runtime::hybrid::pass::FixGetOutputElement::FixGetOutputElement()
{
}
bool runtime::hybrid::pass::FixGetOutputElement::run_on_node(shared_ptr<Node> node)
{
if (node->description() == "GetOutputElement")
{
auto parent = node->get_arguments().at(0);
node->set_placement_index(parent->get_placement_index());
}
return false;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <exception>
#include <functional>
#include <sstream>
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
namespace pass
{
class FixGetOutputElement;
}
}
}
}
class ngraph::runtime::hybrid::pass::FixGetOutputElement : public ngraph::pass::NodePass
{
public:
FixGetOutputElement();
private:
bool run_on_node(std::shared_ptr<Node> node) override;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <exception>
#include <sstream>
#include <unordered_set>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/function.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/result.hpp"
#include "ngraph/runtime/hybrid/pass/liveness.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
bool runtime::hybrid::pass::Liveness::run_on_function(shared_ptr<ngraph::Function> function)
{
list<shared_ptr<Node>> ops = function->get_ordered_ops();
unordered_set<descriptor::Tensor*> persistent_tensors;
unordered_set<descriptor::Tensor*> output_tensors;
for (const shared_ptr<op::Parameter>& node : function->get_parameters())
{
for (auto& output : node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
persistent_tensors.insert(&tensor);
}
}
for (const shared_ptr<op::Result>& node : function->get_results())
{
for (auto& output : node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
persistent_tensors.insert(&tensor);
output_tensors.insert(&tensor);
}
}
for (const shared_ptr<Node>& node : ops)
{
if (auto constant_node = dynamic_pointer_cast<op::Constant>(node))
{
for (auto& output : constant_node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
persistent_tensors.insert(&tensor);
}
}
}
unordered_set<descriptor::Tensor*> currently_live;
for (auto it = ops.rbegin(); it != ops.rend(); it++)
{
const shared_ptr<Node>& node = *it;
node->liveness_new_list.clear();
node->liveness_free_list.clear();
unordered_set<descriptor::Tensor*> input_tensor_decls;
for (auto& input : node->inputs())
{
descriptor::Tensor& tensor = input.get_tensor();
if (persistent_tensors.find(&tensor) == persistent_tensors.end())
{
input_tensor_decls.insert(&tensor);
}
}
unordered_set<descriptor::Tensor*> output_tensor_decls;
for (auto& output : node->outputs())
{
descriptor::Tensor& tensor = output.get_tensor();
if (persistent_tensors.find(&tensor) == persistent_tensors.end())
{
output_tensor_decls.insert(&tensor);
}
}
unordered_set<descriptor::Tensor*> free_tensor_decls;
unordered_set<descriptor::Tensor*> new_tensor_decls;
unordered_set<descriptor::Tensor*> all_tensor_decls = input_tensor_decls;
all_tensor_decls.insert(output_tensor_decls.begin(), output_tensor_decls.end());
for (descriptor::Tensor* tensor_decl : all_tensor_decls)
{
if (currently_live.find(tensor_decl) == currently_live.end())
{
// this is the last node that value is seen in
// delete it at the end of the op
currently_live.insert(tensor_decl);
if (output_tensors.find(tensor_decl) == output_tensors.end())
{
// Don't free output tensors
free_tensor_decls.insert(tensor_decl);
}
}
}
for (descriptor::Tensor* output_decl : output_tensor_decls)
{
auto currently_live_it = currently_live.find(output_decl);
if (currently_live_it != currently_live.end())
{
new_tensor_decls.insert(output_decl);
currently_live.erase(currently_live_it);
}
}
node->liveness_free_list = free_tensor_decls;
node->liveness_new_list = new_tensor_decls;
}
return false;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
namespace pass
{
class Liveness;
}
}
}
}
class ngraph::runtime::hybrid::pass::Liveness : public ngraph::pass::FunctionPass
{
public:
bool run_on_function(std::shared_ptr<ngraph::Function>) override;
};
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <exception>
#include <sstream>
#include "ngraph/op/concat.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/runtime/hybrid/pass/memory_layout.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
runtime::hybrid::pass::MemoryLayout::MemoryLayout(size_t alignment)
: m_alignment(alignment)
{
if (m_alignment == 0)
{
throw invalid_argument("Memory alignment must be > 0");
}
}
bool runtime::hybrid::pass::MemoryLayout::run_on_function(shared_ptr<ngraph::Function> function)
{
ngraph::pass::MemoryManager mm(m_alignment, false);
for (shared_ptr<Node> node : function->get_ordered_ops())
{
for (descriptor::Tensor* tensor : node->liveness_new_list)
{
size_t offset = mm.allocate(tensor->size());
tensor->set_pool_offset(offset);
}
for (const descriptor::Tensor* tensor : node->liveness_free_list)
{
mm.free(tensor->get_pool_offset());
}
}
function->set_temporary_pool_size(mm.max_allocated());
return false;
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
namespace pass
{
class MemoryLayout;
}
}
}
}
class ngraph::runtime::hybrid::pass::MemoryLayout : public ngraph::pass::FunctionPass
{
public:
MemoryLayout(size_t alignment = 64);
bool run_on_function(std::shared_ptr<ngraph::Function>) override;
private:
size_t m_alignment;
};
......@@ -65,7 +65,9 @@
#include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#ifdef INTERPRETER_USE_HYBRID
#include "ngraph/runtime/hybrid/op/function_call.hpp"
#endif
#include "ngraph/runtime/interpreter/node_wrapper.hpp"
#include "ngraph/runtime/reference/abs.hpp"
#include "ngraph/runtime/reference/acos.hpp"
......@@ -762,7 +764,7 @@ private:
}
else if (type == element::i32)
{
reference::embedding<T, int>(args[0]->get_data_ptr<const int>(),
reference::embedding<T, int32_t>(args[0]->get_data_ptr<const int>(),
args[1]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),
element_count,
......@@ -806,6 +808,7 @@ private:
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
break;
}
#ifdef INTERPRETER_USE_HYBRID
case OP_TYPEID::FunctionCall:
{
auto f = static_cast<const runtime::hybrid::op::FunctionCall*>(&node);
......@@ -829,6 +832,7 @@ private:
executable->call(outputs, inputs);
break;
}
#endif
case OP_TYPEID::Floor:
{
size_t element_count = shape_size(node.get_output_shape(0));
......
......@@ -29,7 +29,9 @@ runtime::interpreter::NodeWrapper::NodeWrapper(const shared_ptr<const Node>& nod
#define NGRAPH_OP(a, b) {#a, runtime::interpreter::OP_TYPEID::a},
static unordered_map<string, runtime::interpreter::OP_TYPEID> typeid_map{
#include "ngraph/op/op_tbl.hpp"
#ifdef INTERPRETER_USE_HYBRID
#include "ngraph/runtime/hybrid/op/op_tbl.hpp"
#endif
};
#undef NGRAPH_OP
......
......@@ -40,7 +40,9 @@ namespace ngraph
enum class ngraph::runtime::interpreter::OP_TYPEID
{
#include "ngraph/op/op_tbl.hpp"
#ifdef INTERPRETER_USE_HYBRID
#include "ngraph/runtime/hybrid/op/op_tbl.hpp"
#endif
};
#undef NGRAPH_OP
......
......@@ -98,9 +98,6 @@ if (NGRAPH_INTERPRETER_ENABLE)
backend_debug_api.cpp
builder.cpp
backend_api.cpp)
if (NGRAPH_CPU_ENABLE)
list(APPEND SRC hybrid_backend.cpp)
endif()
set(ACTIVE_BACKEND_LIST ${ACTIVE_BACKEND_LIST} INTERPRETER)
endif()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment