Commit 1e634f4b authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

changes to get hybrid not dependent on any backends and cpu configured as fallback cpu (#2319)

parent 29d29162
...@@ -145,3 +145,8 @@ vector<runtime::PerformanceCounter> ...@@ -145,3 +145,8 @@ vector<runtime::PerformanceCounter>
} }
return rc; return rc;
} }
bool runtime::cpu::CPU_Backend::is_supported(const Node& op) const
{
return true;
}
...@@ -58,6 +58,8 @@ namespace ngraph ...@@ -58,6 +58,8 @@ namespace ngraph
std::vector<PerformanceCounter> std::vector<PerformanceCounter>
get_performance_data(std::shared_ptr<Function> func) const override; get_performance_data(std::shared_ptr<Function> func) const override;
bool is_supported(const Node& node) const override;
private: private:
class FunctionInstance class FunctionInstance
{ {
......
...@@ -18,13 +18,10 @@ ...@@ -18,13 +18,10 @@
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp" #include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/gpu/gpu_backend.hpp"
#include "ngraph/runtime/gpu/gpu_tensor.hpp"
#include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/hybrid/hybrid_util.hpp" #include "ngraph/runtime/hybrid/hybrid_util.hpp"
#include "ngraph/runtime/hybrid/pass/assign_placement.hpp" #include "ngraph/runtime/hybrid/pass/assign_placement.hpp"
#include "ngraph/runtime/hybrid/pass/fix_get_output_element.hpp" #include "ngraph/runtime/hybrid/pass/fix_get_output_element.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/tensor.hpp" #include "ngraph/runtime/tensor.hpp"
using namespace ngraph; using namespace ngraph;
...@@ -205,32 +202,6 @@ bool runtime::hybrid::HybridBackend::is_supported(const Node& node) const ...@@ -205,32 +202,6 @@ bool runtime::hybrid::HybridBackend::is_supported(const Node& node) const
return true; return true;
} }
string runtime::hybrid::HybridBackend::get_placement_name(const runtime::Tensor* t)
{
string rc;
if (dynamic_cast<const runtime::HostTensor*>(t) != nullptr)
{
rc = "HostTensor";
}
else if (dynamic_cast<const runtime::gpu::GPUTensor*>(t) != nullptr)
{
rc = "GPUTensor";
}
return rc;
}
string runtime::hybrid::HybridBackend::get_placement_name(const runtime::Backend* t)
{
string rc;
if (dynamic_cast<const runtime::interpreter::INTBackend*>(t) != nullptr)
{
rc = "INTBackend";
}
else if (dynamic_cast<const runtime::gpu::GPU_Backend*>(t) != nullptr)
{
rc = "GPU_Backend";
}
return rc;
}
size_t runtime::hybrid::HybridBackend::get_placement(const runtime::Tensor* t) size_t runtime::hybrid::HybridBackend::get_placement(const runtime::Tensor* t)
{ {
size_t index = 0; size_t index = 0;
......
...@@ -70,7 +70,5 @@ private: ...@@ -70,7 +70,5 @@ private:
std::map<std::shared_ptr<ngraph::Function>, FunctionInstance> m_function_map; std::map<std::shared_ptr<ngraph::Function>, FunctionInstance> m_function_map;
std::vector<std::shared_ptr<runtime::Backend>> m_backend_list; std::vector<std::shared_ptr<runtime::Backend>> m_backend_list;
std::string get_placement_name(const runtime::Tensor* t);
std::string get_placement_name(const runtime::Backend* t);
size_t get_placement(const runtime::Tensor* t); size_t get_placement(const runtime::Tensor* t);
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment